]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
8bba70a36f66295e6215dda5aa05b8921961edb5
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes;
233 be16 max_pdes;
234 __u8 pad[52];
235 struct phys_disk_entry {
236 char guid[DDF_GUID_LEN];
237 be32 refnum;
238 be16 type;
239 be16 state;
240 be64 config_size; /* DDF structures must be after here */
241 char path[18]; /* another horrible structure really */
242 __u8 pad[6];
243 } entries[0];
244 };
245
246 /* phys_disk_entry.type is a bitmap - bigendian remember */
247 #define DDF_Forced_PD_GUID 1
248 #define DDF_Active_in_VD 2
249 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
250 #define DDF_Spare 8 /* overrides Global_spare */
251 #define DDF_Foreign 16
252 #define DDF_Legacy 32 /* no DDF on this device */
253
254 #define DDF_Interface_mask 0xf00
255 #define DDF_Interface_SCSI 0x100
256 #define DDF_Interface_SAS 0x200
257 #define DDF_Interface_SATA 0x300
258 #define DDF_Interface_FC 0x400
259
260 /* phys_disk_entry.state is a bigendian bitmap */
261 #define DDF_Online 1
262 #define DDF_Failed 2 /* overrides 1,4,8 */
263 #define DDF_Rebuilding 4
264 #define DDF_Transition 8
265 #define DDF_SMART 16
266 #define DDF_ReadErrors 32
267 #define DDF_Missing 64
268
269 /* The content of the virt_section global scope */
270 struct virtual_disk {
271 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
272 be32 crc;
273 be16 populated_vdes;
274 be16 max_vdes;
275 __u8 pad[52];
276 struct virtual_entry {
277 char guid[DDF_GUID_LEN];
278 be16 unit;
279 __u16 pad0; /* 0xffff */
280 be16 guid_crc;
281 be16 type;
282 __u8 state;
283 __u8 init_state;
284 __u8 pad1[14];
285 char name[16];
286 } entries[0];
287 };
288
289 /* virtual_entry.type is a bitmap - bigendian */
290 #define DDF_Shared 1
291 #define DDF_Enforce_Groups 2
292 #define DDF_Unicode 4
293 #define DDF_Owner_Valid 8
294
295 /* virtual_entry.state is a bigendian bitmap */
296 #define DDF_state_mask 0x7
297 #define DDF_state_optimal 0x0
298 #define DDF_state_degraded 0x1
299 #define DDF_state_deleted 0x2
300 #define DDF_state_missing 0x3
301 #define DDF_state_failed 0x4
302 #define DDF_state_part_optimal 0x5
303
304 #define DDF_state_morphing 0x8
305 #define DDF_state_inconsistent 0x10
306
307 /* virtual_entry.init_state is a bigendian bitmap */
308 #define DDF_initstate_mask 0x03
309 #define DDF_init_not 0x00
310 #define DDF_init_quick 0x01 /* initialisation is progress.
311 * i.e. 'state_inconsistent' */
312 #define DDF_init_full 0x02
313
314 #define DDF_access_mask 0xc0
315 #define DDF_access_rw 0x00
316 #define DDF_access_ro 0x80
317 #define DDF_access_blocked 0xc0
318
319 /* The content of the config_section - local scope
320 * It has multiple records each config_record_len sectors
321 * They can be vd_config or spare_assign
322 */
323
324 struct vd_config {
325 be32 magic; /* DDF_VD_CONF_MAGIC */
326 be32 crc;
327 char guid[DDF_GUID_LEN];
328 be32 timestamp;
329 be32 seqnum;
330 __u8 pad0[24];
331 be16 prim_elmnt_count;
332 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
333 __u8 prl;
334 __u8 rlq;
335 __u8 sec_elmnt_count;
336 __u8 sec_elmnt_seq;
337 __u8 srl;
338 be64 blocks; /* blocks per component could be different
339 * on different component devices...(only
340 * for concat I hope) */
341 be64 array_blocks; /* blocks in array */
342 __u8 pad1[8];
343 be32 spare_refs[8];
344 __u8 cache_pol[8];
345 __u8 bg_rate;
346 __u8 pad2[3];
347 __u8 pad3[52];
348 __u8 pad4[192];
349 __u8 v0[32]; /* reserved- 0xff */
350 __u8 v1[32]; /* reserved- 0xff */
351 __u8 v2[16]; /* reserved- 0xff */
352 __u8 v3[16]; /* reserved- 0xff */
353 __u8 vendor[32];
354 be32 phys_refnum[0]; /* refnum of each disk in sequence */
355 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
356 bvd are always the same size */
357 };
358 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
359
360 /* vd_config.cache_pol[7] is a bitmap */
361 #define DDF_cache_writeback 1 /* else writethrough */
362 #define DDF_cache_wadaptive 2 /* only applies if writeback */
363 #define DDF_cache_readahead 4
364 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
365 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
366 #define DDF_cache_wallowed 32 /* enable write caching */
367 #define DDF_cache_rallowed 64 /* enable read caching */
368
369 struct spare_assign {
370 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
371 be32 crc;
372 be32 timestamp;
373 __u8 reserved[7];
374 __u8 type;
375 be16 populated; /* SAEs used */
376 be16 max; /* max SAEs */
377 __u8 pad[8];
378 struct spare_assign_entry {
379 char guid[DDF_GUID_LEN];
380 be16 secondary_element;
381 __u8 pad[6];
382 } spare_ents[0];
383 };
384 /* spare_assign.type is a bitmap */
385 #define DDF_spare_dedicated 0x1 /* else global */
386 #define DDF_spare_revertible 0x2 /* else committable */
387 #define DDF_spare_active 0x4 /* else not active */
388 #define DDF_spare_affinity 0x8 /* enclosure affinity */
389
390 /* The data_section contents - local scope */
391 struct disk_data {
392 be32 magic; /* DDF_PHYS_DATA_MAGIC */
393 be32 crc;
394 char guid[DDF_GUID_LEN];
395 be32 refnum; /* crc of some magic drive data ... */
396 __u8 forced_ref; /* set when above was not result of magic */
397 __u8 forced_guid; /* set if guid was forced rather than magic */
398 __u8 vendor[32];
399 __u8 pad[442];
400 };
401
402 /* bbm_section content */
403 struct bad_block_log {
404 be32 magic;
405 be32 crc;
406 be16 entry_count;
407 be32 spare_count;
408 __u8 pad[10];
409 be64 first_spare;
410 struct mapped_block {
411 be64 defective_start;
412 be32 replacement_start;
413 be16 remap_count;
414 __u8 pad[2];
415 } entries[0];
416 };
417
418 /* Struct for internally holding ddf structures */
419 /* The DDF structure stored on each device is potentially
420 * quite different, as some data is global and some is local.
421 * The global data is:
422 * - ddf header
423 * - controller_data
424 * - Physical disk records
425 * - Virtual disk records
426 * The local data is:
427 * - Configuration records
428 * - Physical Disk data section
429 * ( and Bad block and vendor which I don't care about yet).
430 *
431 * The local data is parsed into separate lists as it is read
432 * and reconstructed for writing. This means that we only need
433 * to make config changes once and they are automatically
434 * propagated to all devices.
435 * Note that the ddf_super has space of the conf and disk data
436 * for this disk and also for a list of all such data.
437 * The list is only used for the superblock that is being
438 * built in Create or Assemble to describe the whole array.
439 */
440 struct ddf_super {
441 struct ddf_header anchor, primary, secondary;
442 struct ddf_controller_data controller;
443 struct ddf_header *active;
444 struct phys_disk *phys;
445 struct virtual_disk *virt;
446 char *conf;
447 int pdsize, vdsize;
448 unsigned int max_part, mppe, conf_rec_len;
449 int currentdev;
450 int updates_pending;
451 struct vcl {
452 union {
453 char space[512];
454 struct {
455 struct vcl *next;
456 unsigned int vcnum; /* index into ->virt */
457 struct vd_config **other_bvds;
458 __u64 *block_sizes; /* NULL if all the same */
459 };
460 };
461 struct vd_config conf;
462 } *conflist, *currentconf;
463 struct dl {
464 union {
465 char space[512];
466 struct {
467 struct dl *next;
468 int major, minor;
469 char *devname;
470 int fd;
471 unsigned long long size; /* sectors */
472 be64 primary_lba; /* sectors */
473 be64 secondary_lba; /* sectors */
474 be64 workspace_lba; /* sectors */
475 int pdnum; /* index in ->phys */
476 struct spare_assign *spare;
477 void *mdupdate; /* hold metadata update */
478
479 /* These fields used by auto-layout */
480 int raiddisk; /* slot to fill in autolayout */
481 __u64 esize;
482 };
483 };
484 struct disk_data disk;
485 struct vcl *vlist[0]; /* max_part in size */
486 } *dlist, *add_list;
487 };
488
489 #ifndef offsetof
490 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
491 #endif
492
493 #if DEBUG
494 static int all_ff(const char *guid);
495 static void pr_state(struct ddf_super *ddf, const char *msg)
496 {
497 unsigned int i;
498 dprintf("%s/%s: ", __func__, msg);
499 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
500 if (all_ff(ddf->virt->entries[i].guid))
501 continue;
502 dprintf("%u(s=%02x i=%02x) ", i,
503 ddf->virt->entries[i].state,
504 ddf->virt->entries[i].init_state);
505 }
506 dprintf("\n");
507 }
508 #else
509 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
510 #endif
511
512 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
513 {
514 ddf->updates_pending = 1;
515 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
516 pr_state(ddf, func);
517 }
518
519 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
520
521 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
522 be32 refnum, unsigned int nmax,
523 const struct vd_config **bvd,
524 unsigned int *idx);
525
526 static be32 calc_crc(void *buf, int len)
527 {
528 /* crcs are always at the same place as in the ddf_header */
529 struct ddf_header *ddf = buf;
530 be32 oldcrc = ddf->crc;
531 __u32 newcrc;
532 ddf->crc = cpu_to_be32(0xffffffff);
533
534 newcrc = crc32(0, buf, len);
535 ddf->crc = oldcrc;
536 /* The crc is store (like everything) bigendian, so convert
537 * here for simplicity
538 */
539 return cpu_to_be32(newcrc);
540 }
541
542 #define DDF_INVALID_LEVEL 0xff
543 #define DDF_NO_SECONDARY 0xff
544 static int err_bad_md_layout(const mdu_array_info_t *array)
545 {
546 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
547 array->level, array->layout, array->raid_disks);
548 return -1;
549 }
550
551 static int layout_md2ddf(const mdu_array_info_t *array,
552 struct vd_config *conf)
553 {
554 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
555 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
556 __u8 sec_elmnt_count = 1;
557 __u8 srl = DDF_NO_SECONDARY;
558
559 switch (array->level) {
560 case LEVEL_LINEAR:
561 prl = DDF_CONCAT;
562 break;
563 case 0:
564 rlq = DDF_RAID0_SIMPLE;
565 prl = DDF_RAID0;
566 break;
567 case 1:
568 switch (array->raid_disks) {
569 case 2:
570 rlq = DDF_RAID1_SIMPLE;
571 break;
572 case 3:
573 rlq = DDF_RAID1_MULTI;
574 break;
575 default:
576 return err_bad_md_layout(array);
577 }
578 prl = DDF_RAID1;
579 break;
580 case 4:
581 if (array->layout != 0)
582 return err_bad_md_layout(array);
583 rlq = DDF_RAID4_N;
584 prl = DDF_RAID4;
585 break;
586 case 5:
587 switch (array->layout) {
588 case ALGORITHM_LEFT_ASYMMETRIC:
589 rlq = DDF_RAID5_N_RESTART;
590 break;
591 case ALGORITHM_RIGHT_ASYMMETRIC:
592 rlq = DDF_RAID5_0_RESTART;
593 break;
594 case ALGORITHM_LEFT_SYMMETRIC:
595 rlq = DDF_RAID5_N_CONTINUE;
596 break;
597 case ALGORITHM_RIGHT_SYMMETRIC:
598 /* not mentioned in standard */
599 default:
600 return err_bad_md_layout(array);
601 }
602 prl = DDF_RAID5;
603 break;
604 case 6:
605 switch (array->layout) {
606 case ALGORITHM_ROTATING_N_RESTART:
607 rlq = DDF_RAID5_N_RESTART;
608 break;
609 case ALGORITHM_ROTATING_ZERO_RESTART:
610 rlq = DDF_RAID6_0_RESTART;
611 break;
612 case ALGORITHM_ROTATING_N_CONTINUE:
613 rlq = DDF_RAID5_N_CONTINUE;
614 break;
615 default:
616 return err_bad_md_layout(array);
617 }
618 prl = DDF_RAID6;
619 break;
620 case 10:
621 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
622 rlq = DDF_RAID1_SIMPLE;
623 prim_elmnt_count = cpu_to_be16(2);
624 sec_elmnt_count = array->raid_disks / 2;
625 } else if (array->raid_disks % 3 == 0
626 && array->layout == 0x103) {
627 rlq = DDF_RAID1_MULTI;
628 prim_elmnt_count = cpu_to_be16(3);
629 sec_elmnt_count = array->raid_disks / 3;
630 } else
631 return err_bad_md_layout(array);
632 srl = DDF_2SPANNED;
633 prl = DDF_RAID1;
634 break;
635 default:
636 return err_bad_md_layout(array);
637 }
638 conf->prl = prl;
639 conf->prim_elmnt_count = prim_elmnt_count;
640 conf->rlq = rlq;
641 conf->srl = srl;
642 conf->sec_elmnt_count = sec_elmnt_count;
643 return 0;
644 }
645
646 static int err_bad_ddf_layout(const struct vd_config *conf)
647 {
648 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
649 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
650 return -1;
651 }
652
653 static int layout_ddf2md(const struct vd_config *conf,
654 mdu_array_info_t *array)
655 {
656 int level = LEVEL_UNSUPPORTED;
657 int layout = 0;
658 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
659
660 if (conf->sec_elmnt_count > 1) {
661 /* see also check_secondary() */
662 if (conf->prl != DDF_RAID1 ||
663 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
664 pr_err("Unsupported secondary RAID level %u/%u\n",
665 conf->prl, conf->srl);
666 return -1;
667 }
668 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
669 layout = 0x102;
670 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
671 layout = 0x103;
672 else
673 return err_bad_ddf_layout(conf);
674 raiddisks *= conf->sec_elmnt_count;
675 level = 10;
676 goto good;
677 }
678
679 switch (conf->prl) {
680 case DDF_CONCAT:
681 level = LEVEL_LINEAR;
682 break;
683 case DDF_RAID0:
684 if (conf->rlq != DDF_RAID0_SIMPLE)
685 return err_bad_ddf_layout(conf);
686 level = 0;
687 break;
688 case DDF_RAID1:
689 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
690 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
691 return err_bad_ddf_layout(conf);
692 level = 1;
693 break;
694 case DDF_RAID4:
695 if (conf->rlq != DDF_RAID4_N)
696 return err_bad_ddf_layout(conf);
697 level = 4;
698 break;
699 case DDF_RAID5:
700 switch (conf->rlq) {
701 case DDF_RAID5_N_RESTART:
702 layout = ALGORITHM_LEFT_ASYMMETRIC;
703 break;
704 case DDF_RAID5_0_RESTART:
705 layout = ALGORITHM_RIGHT_ASYMMETRIC;
706 break;
707 case DDF_RAID5_N_CONTINUE:
708 layout = ALGORITHM_LEFT_SYMMETRIC;
709 break;
710 default:
711 return err_bad_ddf_layout(conf);
712 }
713 level = 5;
714 break;
715 case DDF_RAID6:
716 switch (conf->rlq) {
717 case DDF_RAID5_N_RESTART:
718 layout = ALGORITHM_ROTATING_N_RESTART;
719 break;
720 case DDF_RAID6_0_RESTART:
721 layout = ALGORITHM_ROTATING_ZERO_RESTART;
722 break;
723 case DDF_RAID5_N_CONTINUE:
724 layout = ALGORITHM_ROTATING_N_CONTINUE;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 }
729 level = 6;
730 break;
731 default:
732 return err_bad_ddf_layout(conf);
733 };
734
735 good:
736 array->level = level;
737 array->layout = layout;
738 array->raid_disks = raiddisks;
739 return 0;
740 }
741
742 static int load_ddf_header(int fd, unsigned long long lba,
743 unsigned long long size,
744 int type,
745 struct ddf_header *hdr, struct ddf_header *anchor)
746 {
747 /* read a ddf header (primary or secondary) from fd/lba
748 * and check that it is consistent with anchor
749 * Need to check:
750 * magic, crc, guid, rev, and LBA's header_type, and
751 * everything after header_type must be the same
752 */
753 if (lba >= size-1)
754 return 0;
755
756 if (lseek64(fd, lba<<9, 0) < 0)
757 return 0;
758
759 if (read(fd, hdr, 512) != 512)
760 return 0;
761
762 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
763 pr_err("%s: bad header magic\n", __func__);
764 return 0;
765 }
766 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
767 pr_err("%s: bad CRC\n", __func__);
768 return 0;
769 }
770 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
771 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
772 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
773 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
774 hdr->type != type ||
775 memcmp(anchor->pad2, hdr->pad2, 512 -
776 offsetof(struct ddf_header, pad2)) != 0) {
777 pr_err("%s: header mismatch\n", __func__);
778 return 0;
779 }
780
781 /* Looks good enough to me... */
782 return 1;
783 }
784
785 static void *load_section(int fd, struct ddf_super *super, void *buf,
786 be32 offset_be, be32 len_be, int check)
787 {
788 unsigned long long offset = be32_to_cpu(offset_be);
789 unsigned long long len = be32_to_cpu(len_be);
790 int dofree = (buf == NULL);
791
792 if (check)
793 if (len != 2 && len != 8 && len != 32
794 && len != 128 && len != 512)
795 return NULL;
796
797 if (len > 1024)
798 return NULL;
799 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
800 buf = NULL;
801
802 if (!buf)
803 return NULL;
804
805 if (super->active->type == 1)
806 offset += be64_to_cpu(super->active->primary_lba);
807 else
808 offset += be64_to_cpu(super->active->secondary_lba);
809
810 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
811 if (dofree)
812 free(buf);
813 return NULL;
814 }
815 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
816 if (dofree)
817 free(buf);
818 return NULL;
819 }
820 return buf;
821 }
822
823 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
824 {
825 unsigned long long dsize;
826
827 get_dev_size(fd, NULL, &dsize);
828
829 if (lseek64(fd, dsize-512, 0) < 0) {
830 if (devname)
831 pr_err("Cannot seek to anchor block on %s: %s\n",
832 devname, strerror(errno));
833 return 1;
834 }
835 if (read(fd, &super->anchor, 512) != 512) {
836 if (devname)
837 pr_err("Cannot read anchor block on %s: %s\n",
838 devname, strerror(errno));
839 return 1;
840 }
841 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
842 if (devname)
843 pr_err("no DDF anchor found on %s\n",
844 devname);
845 return 2;
846 }
847 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
848 if (devname)
849 pr_err("bad CRC on anchor on %s\n",
850 devname);
851 return 2;
852 }
853 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
854 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
855 if (devname)
856 pr_err("can only support super revision"
857 " %.8s and earlier, not %.8s on %s\n",
858 DDF_REVISION_2, super->anchor.revision,devname);
859 return 2;
860 }
861 super->active = NULL;
862 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
863 dsize >> 9, 1,
864 &super->primary, &super->anchor) == 0) {
865 if (devname)
866 pr_err("Failed to load primary DDF header "
867 "on %s\n", devname);
868 } else
869 super->active = &super->primary;
870
871 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
872 dsize >> 9, 2,
873 &super->secondary, &super->anchor)) {
874 if (super->active == NULL
875 || (be32_to_cpu(super->primary.seq)
876 < be32_to_cpu(super->secondary.seq) &&
877 !super->secondary.openflag)
878 || (be32_to_cpu(super->primary.seq)
879 == be32_to_cpu(super->secondary.seq) &&
880 super->primary.openflag && !super->secondary.openflag)
881 )
882 super->active = &super->secondary;
883 } else if (devname)
884 pr_err("Failed to load secondary DDF header on %s\n",
885 devname);
886 if (super->active == NULL)
887 return 2;
888 return 0;
889 }
890
891 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
892 {
893 void *ok;
894 ok = load_section(fd, super, &super->controller,
895 super->active->controller_section_offset,
896 super->active->controller_section_length,
897 0);
898 super->phys = load_section(fd, super, NULL,
899 super->active->phys_section_offset,
900 super->active->phys_section_length,
901 1);
902 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
903
904 super->virt = load_section(fd, super, NULL,
905 super->active->virt_section_offset,
906 super->active->virt_section_length,
907 1);
908 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
909 if (!ok ||
910 !super->phys ||
911 !super->virt) {
912 free(super->phys);
913 free(super->virt);
914 super->phys = NULL;
915 super->virt = NULL;
916 return 2;
917 }
918 super->conflist = NULL;
919 super->dlist = NULL;
920
921 super->max_part = be16_to_cpu(super->active->max_partitions);
922 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
923 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
924 return 0;
925 }
926
927 #define DDF_UNUSED_BVD 0xff
928 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
929 {
930 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
931 unsigned int i, vdsize;
932 void *p;
933 if (n_vds == 0) {
934 vcl->other_bvds = NULL;
935 return 0;
936 }
937 vdsize = ddf->conf_rec_len * 512;
938 if (posix_memalign(&p, 512, n_vds *
939 (vdsize + sizeof(struct vd_config *))) != 0)
940 return -1;
941 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
942 for (i = 0; i < n_vds; i++) {
943 vcl->other_bvds[i] = p + i * vdsize;
944 memset(vcl->other_bvds[i], 0, vdsize);
945 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
946 }
947 return 0;
948 }
949
950 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
951 unsigned int len)
952 {
953 int i;
954 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
955 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
956 break;
957
958 if (i < vcl->conf.sec_elmnt_count-1) {
959 if (be32_to_cpu(vd->seqnum) <=
960 be32_to_cpu(vcl->other_bvds[i]->seqnum))
961 return;
962 } else {
963 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
964 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
965 break;
966 if (i == vcl->conf.sec_elmnt_count-1) {
967 pr_err("no space for sec level config %u, count is %u\n",
968 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
969 return;
970 }
971 }
972 memcpy(vcl->other_bvds[i], vd, len);
973 }
974
975 static int load_ddf_local(int fd, struct ddf_super *super,
976 char *devname, int keep)
977 {
978 struct dl *dl;
979 struct stat stb;
980 char *conf;
981 unsigned int i;
982 unsigned int confsec;
983 int vnum;
984 unsigned int max_virt_disks = be16_to_cpu
985 (super->active->max_vd_entries);
986 unsigned long long dsize;
987
988 /* First the local disk info */
989 if (posix_memalign((void**)&dl, 512,
990 sizeof(*dl) +
991 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
992 pr_err("%s could not allocate disk info buffer\n",
993 __func__);
994 return 1;
995 }
996
997 load_section(fd, super, &dl->disk,
998 super->active->data_section_offset,
999 super->active->data_section_length,
1000 0);
1001 dl->devname = devname ? xstrdup(devname) : NULL;
1002
1003 fstat(fd, &stb);
1004 dl->major = major(stb.st_rdev);
1005 dl->minor = minor(stb.st_rdev);
1006 dl->next = super->dlist;
1007 dl->fd = keep ? fd : -1;
1008
1009 dl->size = 0;
1010 if (get_dev_size(fd, devname, &dsize))
1011 dl->size = dsize >> 9;
1012 /* If the disks have different sizes, the LBAs will differ
1013 * between phys disks.
1014 * At this point here, the values in super->active must be valid
1015 * for this phys disk. */
1016 dl->primary_lba = super->active->primary_lba;
1017 dl->secondary_lba = super->active->secondary_lba;
1018 dl->workspace_lba = super->active->workspace_lba;
1019 dl->spare = NULL;
1020 for (i = 0 ; i < super->max_part ; i++)
1021 dl->vlist[i] = NULL;
1022 super->dlist = dl;
1023 dl->pdnum = -1;
1024 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1025 if (memcmp(super->phys->entries[i].guid,
1026 dl->disk.guid, DDF_GUID_LEN) == 0)
1027 dl->pdnum = i;
1028
1029 /* Now the config list. */
1030 /* 'conf' is an array of config entries, some of which are
1031 * probably invalid. Those which are good need to be copied into
1032 * the conflist
1033 */
1034
1035 conf = load_section(fd, super, super->conf,
1036 super->active->config_section_offset,
1037 super->active->config_section_length,
1038 0);
1039 super->conf = conf;
1040 vnum = 0;
1041 for (confsec = 0;
1042 confsec < be32_to_cpu(super->active->config_section_length);
1043 confsec += super->conf_rec_len) {
1044 struct vd_config *vd =
1045 (struct vd_config *)((char*)conf + confsec*512);
1046 struct vcl *vcl;
1047
1048 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1049 if (dl->spare)
1050 continue;
1051 if (posix_memalign((void**)&dl->spare, 512,
1052 super->conf_rec_len*512) != 0) {
1053 pr_err("%s could not allocate spare info buf\n",
1054 __func__);
1055 return 1;
1056 }
1057
1058 memcpy(dl->spare, vd, super->conf_rec_len*512);
1059 continue;
1060 }
1061 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1062 continue;
1063 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1064 if (memcmp(vcl->conf.guid,
1065 vd->guid, DDF_GUID_LEN) == 0)
1066 break;
1067 }
1068
1069 if (vcl) {
1070 dl->vlist[vnum++] = vcl;
1071 if (vcl->other_bvds != NULL &&
1072 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1073 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1074 continue;
1075 }
1076 if (be32_to_cpu(vd->seqnum) <=
1077 be32_to_cpu(vcl->conf.seqnum))
1078 continue;
1079 } else {
1080 if (posix_memalign((void**)&vcl, 512,
1081 (super->conf_rec_len*512 +
1082 offsetof(struct vcl, conf))) != 0) {
1083 pr_err("%s could not allocate vcl buf\n",
1084 __func__);
1085 return 1;
1086 }
1087 vcl->next = super->conflist;
1088 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1089 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1090 if (alloc_other_bvds(super, vcl) != 0) {
1091 pr_err("%s could not allocate other bvds\n",
1092 __func__);
1093 free(vcl);
1094 return 1;
1095 };
1096 super->conflist = vcl;
1097 dl->vlist[vnum++] = vcl;
1098 }
1099 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1100 for (i=0; i < max_virt_disks ; i++)
1101 if (memcmp(super->virt->entries[i].guid,
1102 vcl->conf.guid, DDF_GUID_LEN)==0)
1103 break;
1104 if (i < max_virt_disks)
1105 vcl->vcnum = i;
1106 }
1107
1108 return 0;
1109 }
1110
1111 #ifndef MDASSEMBLE
1112 static int load_super_ddf_all(struct supertype *st, int fd,
1113 void **sbp, char *devname);
1114 #endif
1115
1116 static void free_super_ddf(struct supertype *st);
1117
1118 static int load_super_ddf(struct supertype *st, int fd,
1119 char *devname)
1120 {
1121 unsigned long long dsize;
1122 struct ddf_super *super;
1123 int rv;
1124
1125 if (get_dev_size(fd, devname, &dsize) == 0)
1126 return 1;
1127
1128 if (!st->ignore_hw_compat && test_partition(fd))
1129 /* DDF is not allowed on partitions */
1130 return 1;
1131
1132 /* 32M is a lower bound */
1133 if (dsize <= 32*1024*1024) {
1134 if (devname)
1135 pr_err("%s is too small for ddf: "
1136 "size is %llu sectors.\n",
1137 devname, dsize>>9);
1138 return 1;
1139 }
1140 if (dsize & 511) {
1141 if (devname)
1142 pr_err("%s is an odd size for ddf: "
1143 "size is %llu bytes.\n",
1144 devname, dsize);
1145 return 1;
1146 }
1147
1148 free_super_ddf(st);
1149
1150 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1151 pr_err("malloc of %zu failed.\n",
1152 sizeof(*super));
1153 return 1;
1154 }
1155 memset(super, 0, sizeof(*super));
1156
1157 rv = load_ddf_headers(fd, super, devname);
1158 if (rv) {
1159 free(super);
1160 return rv;
1161 }
1162
1163 /* Have valid headers and have chosen the best. Let's read in the rest*/
1164
1165 rv = load_ddf_global(fd, super, devname);
1166
1167 if (rv) {
1168 if (devname)
1169 pr_err("Failed to load all information "
1170 "sections on %s\n", devname);
1171 free(super);
1172 return rv;
1173 }
1174
1175 rv = load_ddf_local(fd, super, devname, 0);
1176
1177 if (rv) {
1178 if (devname)
1179 pr_err("Failed to load all information "
1180 "sections on %s\n", devname);
1181 free(super);
1182 return rv;
1183 }
1184
1185 /* Should possibly check the sections .... */
1186
1187 st->sb = super;
1188 if (st->ss == NULL) {
1189 st->ss = &super_ddf;
1190 st->minor_version = 0;
1191 st->max_devs = 512;
1192 }
1193 return 0;
1194
1195 }
1196
1197 static void free_super_ddf(struct supertype *st)
1198 {
1199 struct ddf_super *ddf = st->sb;
1200 if (ddf == NULL)
1201 return;
1202 free(ddf->phys);
1203 free(ddf->virt);
1204 free(ddf->conf);
1205 while (ddf->conflist) {
1206 struct vcl *v = ddf->conflist;
1207 ddf->conflist = v->next;
1208 if (v->block_sizes)
1209 free(v->block_sizes);
1210 if (v->other_bvds)
1211 /*
1212 v->other_bvds[0] points to beginning of buffer,
1213 see alloc_other_bvds()
1214 */
1215 free(v->other_bvds[0]);
1216 free(v);
1217 }
1218 while (ddf->dlist) {
1219 struct dl *d = ddf->dlist;
1220 ddf->dlist = d->next;
1221 if (d->fd >= 0)
1222 close(d->fd);
1223 if (d->spare)
1224 free(d->spare);
1225 free(d);
1226 }
1227 while (ddf->add_list) {
1228 struct dl *d = ddf->add_list;
1229 ddf->add_list = d->next;
1230 if (d->fd >= 0)
1231 close(d->fd);
1232 if (d->spare)
1233 free(d->spare);
1234 free(d);
1235 }
1236 free(ddf);
1237 st->sb = NULL;
1238 }
1239
1240 static struct supertype *match_metadata_desc_ddf(char *arg)
1241 {
1242 /* 'ddf' only support containers */
1243 struct supertype *st;
1244 if (strcmp(arg, "ddf") != 0 &&
1245 strcmp(arg, "default") != 0
1246 )
1247 return NULL;
1248
1249 st = xcalloc(1, sizeof(*st));
1250 st->ss = &super_ddf;
1251 st->max_devs = 512;
1252 st->minor_version = 0;
1253 st->sb = NULL;
1254 return st;
1255 }
1256
1257 #ifndef MDASSEMBLE
1258
1259 static mapping_t ddf_state[] = {
1260 { "Optimal", 0},
1261 { "Degraded", 1},
1262 { "Deleted", 2},
1263 { "Missing", 3},
1264 { "Failed", 4},
1265 { "Partially Optimal", 5},
1266 { "-reserved-", 6},
1267 { "-reserved-", 7},
1268 { NULL, 0}
1269 };
1270
1271 static mapping_t ddf_init_state[] = {
1272 { "Not Initialised", 0},
1273 { "QuickInit in Progress", 1},
1274 { "Fully Initialised", 2},
1275 { "*UNKNOWN*", 3},
1276 { NULL, 0}
1277 };
1278 static mapping_t ddf_access[] = {
1279 { "Read/Write", 0},
1280 { "Reserved", 1},
1281 { "Read Only", 2},
1282 { "Blocked (no access)", 3},
1283 { NULL ,0}
1284 };
1285
1286 static mapping_t ddf_level[] = {
1287 { "RAID0", DDF_RAID0},
1288 { "RAID1", DDF_RAID1},
1289 { "RAID3", DDF_RAID3},
1290 { "RAID4", DDF_RAID4},
1291 { "RAID5", DDF_RAID5},
1292 { "RAID1E",DDF_RAID1E},
1293 { "JBOD", DDF_JBOD},
1294 { "CONCAT",DDF_CONCAT},
1295 { "RAID5E",DDF_RAID5E},
1296 { "RAID5EE",DDF_RAID5EE},
1297 { "RAID6", DDF_RAID6},
1298 { NULL, 0}
1299 };
1300 static mapping_t ddf_sec_level[] = {
1301 { "Striped", DDF_2STRIPED},
1302 { "Mirrored", DDF_2MIRRORED},
1303 { "Concat", DDF_2CONCAT},
1304 { "Spanned", DDF_2SPANNED},
1305 { NULL, 0}
1306 };
1307 #endif
1308
1309 static int all_ff(const char *guid)
1310 {
1311 int i;
1312 for (i = 0; i < DDF_GUID_LEN; i++)
1313 if (guid[i] != (char)0xff)
1314 return 0;
1315 return 1;
1316 }
1317
1318 static const char *guid_str(const char *guid)
1319 {
1320 static char buf[DDF_GUID_LEN*2+1];
1321 int i;
1322 char *p = buf;
1323 for (i = 0; i < DDF_GUID_LEN; i++) {
1324 unsigned char c = guid[i];
1325 if (c >= 32 && c < 127)
1326 p += sprintf(p, "%c", c);
1327 else
1328 p += sprintf(p, "%02x", c);
1329 }
1330 *p = '\0';
1331 return (const char *) buf;
1332 }
1333
1334 #ifndef MDASSEMBLE
1335 static void print_guid(char *guid, int tstamp)
1336 {
1337 /* A GUIDs are part (or all) ASCII and part binary.
1338 * They tend to be space padded.
1339 * We print the GUID in HEX, then in parentheses add
1340 * any initial ASCII sequence, and a possible
1341 * time stamp from bytes 16-19
1342 */
1343 int l = DDF_GUID_LEN;
1344 int i;
1345
1346 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1347 if ((i&3)==0 && i != 0) printf(":");
1348 printf("%02X", guid[i]&255);
1349 }
1350
1351 printf("\n (");
1352 while (l && guid[l-1] == ' ')
1353 l--;
1354 for (i=0 ; i<l ; i++) {
1355 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1356 fputc(guid[i], stdout);
1357 else
1358 break;
1359 }
1360 if (tstamp) {
1361 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1362 char tbuf[100];
1363 struct tm *tm;
1364 tm = localtime(&then);
1365 strftime(tbuf, 100, " %D %T",tm);
1366 fputs(tbuf, stdout);
1367 }
1368 printf(")");
1369 }
1370
1371 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1372 {
1373 int crl = sb->conf_rec_len;
1374 struct vcl *vcl;
1375
1376 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1377 unsigned int i;
1378 struct vd_config *vc = &vcl->conf;
1379
1380 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1381 continue;
1382 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1383 continue;
1384
1385 /* Ok, we know about this VD, let's give more details */
1386 printf(" Raid Devices[%d] : %d (", n,
1387 be16_to_cpu(vc->prim_elmnt_count));
1388 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1389 int j;
1390 int cnt = be16_to_cpu(sb->phys->used_pdes);
1391 for (j=0; j<cnt; j++)
1392 if (be32_eq(vc->phys_refnum[i],
1393 sb->phys->entries[j].refnum))
1394 break;
1395 if (i) printf(" ");
1396 if (j < cnt)
1397 printf("%d", j);
1398 else
1399 printf("--");
1400 }
1401 printf(")\n");
1402 if (vc->chunk_shift != 255)
1403 printf(" Chunk Size[%d] : %d sectors\n", n,
1404 1 << vc->chunk_shift);
1405 printf(" Raid Level[%d] : %s\n", n,
1406 map_num(ddf_level, vc->prl)?:"-unknown-");
1407 if (vc->sec_elmnt_count != 1) {
1408 printf(" Secondary Position[%d] : %d of %d\n", n,
1409 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1410 printf(" Secondary Level[%d] : %s\n", n,
1411 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1412 }
1413 printf(" Device Size[%d] : %llu\n", n,
1414 be64_to_cpu(vc->blocks)/2);
1415 printf(" Array Size[%d] : %llu\n", n,
1416 be64_to_cpu(vc->array_blocks)/2);
1417 }
1418 }
1419
1420 static void examine_vds(struct ddf_super *sb)
1421 {
1422 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1423 unsigned int i;
1424 printf(" Virtual Disks : %d\n", cnt);
1425
1426 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1427 struct virtual_entry *ve = &sb->virt->entries[i];
1428 if (all_ff(ve->guid))
1429 continue;
1430 printf("\n");
1431 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1432 printf("\n");
1433 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1434 printf(" state[%d] : %s, %s%s\n", i,
1435 map_num(ddf_state, ve->state & 7),
1436 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1437 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1438 printf(" init state[%d] : %s\n", i,
1439 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1440 printf(" access[%d] : %s\n", i,
1441 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1442 printf(" Name[%d] : %.16s\n", i, ve->name);
1443 examine_vd(i, sb, ve->guid);
1444 }
1445 if (cnt) printf("\n");
1446 }
1447
1448 static void examine_pds(struct ddf_super *sb)
1449 {
1450 int cnt = be16_to_cpu(sb->phys->used_pdes);
1451 int i;
1452 struct dl *dl;
1453 printf(" Physical Disks : %d\n", cnt);
1454 printf(" Number RefNo Size Device Type/State\n");
1455
1456 for (i=0 ; i<cnt ; i++) {
1457 struct phys_disk_entry *pd = &sb->phys->entries[i];
1458 int type = be16_to_cpu(pd->type);
1459 int state = be16_to_cpu(pd->state);
1460
1461 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1462 //printf("\n");
1463 printf(" %3d %08x ", i,
1464 be32_to_cpu(pd->refnum));
1465 printf("%8lluK ",
1466 be64_to_cpu(pd->config_size)>>1);
1467 for (dl = sb->dlist; dl ; dl = dl->next) {
1468 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1469 char *dv = map_dev(dl->major, dl->minor, 0);
1470 if (dv) {
1471 printf("%-15s", dv);
1472 break;
1473 }
1474 }
1475 }
1476 if (!dl)
1477 printf("%15s","");
1478 printf(" %s%s%s%s%s",
1479 (type&2) ? "active":"",
1480 (type&4) ? "Global-Spare":"",
1481 (type&8) ? "spare" : "",
1482 (type&16)? ", foreign" : "",
1483 (type&32)? "pass-through" : "");
1484 if (state & DDF_Failed)
1485 /* This over-rides these three */
1486 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1487 printf("/%s%s%s%s%s%s%s",
1488 (state&1)? "Online": "Offline",
1489 (state&2)? ", Failed": "",
1490 (state&4)? ", Rebuilding": "",
1491 (state&8)? ", in-transition": "",
1492 (state&16)? ", SMART-errors": "",
1493 (state&32)? ", Unrecovered-Read-Errors": "",
1494 (state&64)? ", Missing" : "");
1495 printf("\n");
1496 }
1497 }
1498
1499 static void examine_super_ddf(struct supertype *st, char *homehost)
1500 {
1501 struct ddf_super *sb = st->sb;
1502
1503 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1504 printf(" Version : %.8s\n", sb->anchor.revision);
1505 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1506 printf("\n");
1507 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1508 printf("\n");
1509 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1510 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1511 DDF_HEADER_MAGIC)
1512 ?"yes" : "no");
1513 examine_vds(sb);
1514 examine_pds(sb);
1515 }
1516
1517 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1518
1519 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1520 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1521
1522 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1523 {
1524 /*
1525 * Figure out the VD number for this supertype.
1526 * Returns DDF_CONTAINER for the container itself,
1527 * and DDF_NOTFOUND on error.
1528 */
1529 struct ddf_super *ddf = st->sb;
1530 struct mdinfo *sra;
1531 char *sub, *end;
1532 unsigned int vcnum;
1533
1534 if (*st->container_devnm == '\0')
1535 return DDF_CONTAINER;
1536
1537 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1538 if (!sra || sra->array.major_version != -1 ||
1539 sra->array.minor_version != -2 ||
1540 !is_subarray(sra->text_version))
1541 return DDF_NOTFOUND;
1542
1543 sub = strchr(sra->text_version + 1, '/');
1544 if (sub != NULL)
1545 vcnum = strtoul(sub + 1, &end, 10);
1546 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1547 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1548 return DDF_NOTFOUND;
1549
1550 return vcnum;
1551 }
1552
1553 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1554 {
1555 /* We just write a generic DDF ARRAY entry
1556 */
1557 struct mdinfo info;
1558 char nbuf[64];
1559 getinfo_super_ddf(st, &info, NULL);
1560 fname_from_uuid(st, &info, nbuf, ':');
1561
1562 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1563 }
1564
1565 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1566 {
1567 /* We just write a generic DDF ARRAY entry
1568 */
1569 struct ddf_super *ddf = st->sb;
1570 struct mdinfo info;
1571 unsigned int i;
1572 char nbuf[64];
1573 getinfo_super_ddf(st, &info, NULL);
1574 fname_from_uuid(st, &info, nbuf, ':');
1575
1576 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1577 struct virtual_entry *ve = &ddf->virt->entries[i];
1578 struct vcl vcl;
1579 char nbuf1[64];
1580 if (all_ff(ve->guid))
1581 continue;
1582 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1583 ddf->currentconf =&vcl;
1584 uuid_from_super_ddf(st, info.uuid);
1585 fname_from_uuid(st, &info, nbuf1, ':');
1586 printf("ARRAY container=%s member=%d UUID=%s\n",
1587 nbuf+5, i, nbuf1+5);
1588 }
1589 }
1590
1591 static void export_examine_super_ddf(struct supertype *st)
1592 {
1593 struct mdinfo info;
1594 char nbuf[64];
1595 getinfo_super_ddf(st, &info, NULL);
1596 fname_from_uuid(st, &info, nbuf, ':');
1597 printf("MD_METADATA=ddf\n");
1598 printf("MD_LEVEL=container\n");
1599 printf("MD_UUID=%s\n", nbuf+5);
1600 printf("MD_DEVICES=%u\n",
1601 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1602 }
1603
1604 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1605 {
1606 void *buf;
1607 unsigned long long dsize, offset;
1608 int bytes;
1609 struct ddf_header *ddf;
1610 int written = 0;
1611
1612 /* The meta consists of an anchor, a primary, and a secondary.
1613 * This all lives at the end of the device.
1614 * So it is easiest to find the earliest of primary and
1615 * secondary, and copy everything from there.
1616 *
1617 * Anchor is 512 from end It contains primary_lba and secondary_lba
1618 * we choose one of those
1619 */
1620
1621 if (posix_memalign(&buf, 4096, 4096) != 0)
1622 return 1;
1623
1624 if (!get_dev_size(from, NULL, &dsize))
1625 goto err;
1626
1627 if (lseek64(from, dsize-512, 0) < 0)
1628 goto err;
1629 if (read(from, buf, 512) != 512)
1630 goto err;
1631 ddf = buf;
1632 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1633 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1634 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1635 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1636 goto err;
1637
1638 offset = dsize - 512;
1639 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1640 offset = be64_to_cpu(ddf->primary_lba) << 9;
1641 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1642 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1643
1644 bytes = dsize - offset;
1645
1646 if (lseek64(from, offset, 0) < 0 ||
1647 lseek64(to, offset, 0) < 0)
1648 goto err;
1649 while (written < bytes) {
1650 int n = bytes - written;
1651 if (n > 4096)
1652 n = 4096;
1653 if (read(from, buf, n) != n)
1654 goto err;
1655 if (write(to, buf, n) != n)
1656 goto err;
1657 written += n;
1658 }
1659 free(buf);
1660 return 0;
1661 err:
1662 free(buf);
1663 return 1;
1664 }
1665
1666 static void detail_super_ddf(struct supertype *st, char *homehost)
1667 {
1668 /* FIXME later
1669 * Could print DDF GUID
1670 * Need to find which array
1671 * If whole, briefly list all arrays
1672 * If one, give name
1673 */
1674 }
1675
1676 static void brief_detail_super_ddf(struct supertype *st)
1677 {
1678 struct mdinfo info;
1679 char nbuf[64];
1680 struct ddf_super *ddf = st->sb;
1681 unsigned int vcnum = get_vd_num_of_subarray(st);
1682 if (vcnum == DDF_CONTAINER)
1683 uuid_from_super_ddf(st, info.uuid);
1684 else if (vcnum == DDF_NOTFOUND)
1685 return;
1686 else
1687 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1688 fname_from_uuid(st, &info, nbuf,':');
1689 printf(" UUID=%s", nbuf + 5);
1690 }
1691 #endif
1692
1693 static int match_home_ddf(struct supertype *st, char *homehost)
1694 {
1695 /* It matches 'this' host if the controller is a
1696 * Linux-MD controller with vendor_data matching
1697 * the hostname
1698 */
1699 struct ddf_super *ddf = st->sb;
1700 unsigned int len;
1701
1702 if (!homehost)
1703 return 0;
1704 len = strlen(homehost);
1705
1706 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1707 len < sizeof(ddf->controller.vendor_data) &&
1708 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1709 ddf->controller.vendor_data[len] == 0);
1710 }
1711
1712 #ifndef MDASSEMBLE
1713 static int find_index_in_bvd(const struct ddf_super *ddf,
1714 const struct vd_config *conf, unsigned int n,
1715 unsigned int *n_bvd)
1716 {
1717 /*
1718 * Find the index of the n-th valid physical disk in this BVD
1719 */
1720 unsigned int i, j;
1721 for (i = 0, j = 0; i < ddf->mppe &&
1722 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1723 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1724 if (n == j) {
1725 *n_bvd = i;
1726 return 1;
1727 }
1728 j++;
1729 }
1730 }
1731 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1732 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1733 return 0;
1734 }
1735
1736 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1737 unsigned int n,
1738 unsigned int *n_bvd, struct vcl **vcl)
1739 {
1740 struct vcl *v;
1741
1742 for (v = ddf->conflist; v; v = v->next) {
1743 unsigned int nsec, ibvd = 0;
1744 struct vd_config *conf;
1745 if (inst != v->vcnum)
1746 continue;
1747 conf = &v->conf;
1748 if (conf->sec_elmnt_count == 1) {
1749 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1750 *vcl = v;
1751 return conf;
1752 } else
1753 goto bad;
1754 }
1755 if (v->other_bvds == NULL) {
1756 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1757 __func__, conf->sec_elmnt_count);
1758 goto bad;
1759 }
1760 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1761 if (conf->sec_elmnt_seq != nsec) {
1762 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1763 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1764 == nsec)
1765 break;
1766 }
1767 if (ibvd == conf->sec_elmnt_count)
1768 goto bad;
1769 conf = v->other_bvds[ibvd-1];
1770 }
1771 if (!find_index_in_bvd(ddf, conf,
1772 n - nsec*conf->sec_elmnt_count, n_bvd))
1773 goto bad;
1774 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1775 , __func__, n, *n_bvd, ibvd, inst);
1776 *vcl = v;
1777 return conf;
1778 }
1779 bad:
1780 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1781 return NULL;
1782 }
1783 #endif
1784
1785 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1786 {
1787 /* Find the entry in phys_disk which has the given refnum
1788 * and return it's index
1789 */
1790 unsigned int i;
1791 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1792 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1793 return i;
1794 return -1;
1795 }
1796
1797 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1798 {
1799 char buf[20];
1800 struct sha1_ctx ctx;
1801 sha1_init_ctx(&ctx);
1802 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1803 sha1_finish_ctx(&ctx, buf);
1804 memcpy(uuid, buf, 4*4);
1805 }
1806
1807 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1808 {
1809 /* The uuid returned here is used for:
1810 * uuid to put into bitmap file (Create, Grow)
1811 * uuid for backup header when saving critical section (Grow)
1812 * comparing uuids when re-adding a device into an array
1813 * In these cases the uuid required is that of the data-array,
1814 * not the device-set.
1815 * uuid to recognise same set when adding a missing device back
1816 * to an array. This is a uuid for the device-set.
1817 *
1818 * For each of these we can make do with a truncated
1819 * or hashed uuid rather than the original, as long as
1820 * everyone agrees.
1821 * In the case of SVD we assume the BVD is of interest,
1822 * though that might be the case if a bitmap were made for
1823 * a mirrored SVD - worry about that later.
1824 * So we need to find the VD configuration record for the
1825 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1826 * The first 16 bytes of the sha1 of these is used.
1827 */
1828 struct ddf_super *ddf = st->sb;
1829 struct vcl *vcl = ddf->currentconf;
1830 char *guid;
1831
1832 if (vcl)
1833 guid = vcl->conf.guid;
1834 else
1835 guid = ddf->anchor.guid;
1836 uuid_from_ddf_guid(guid, uuid);
1837 }
1838
1839 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1840
1841 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1842 {
1843 struct ddf_super *ddf = st->sb;
1844 int map_disks = info->array.raid_disks;
1845 __u32 *cptr;
1846
1847 if (ddf->currentconf) {
1848 getinfo_super_ddf_bvd(st, info, map);
1849 return;
1850 }
1851 memset(info, 0, sizeof(*info));
1852
1853 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1854 info->array.level = LEVEL_CONTAINER;
1855 info->array.layout = 0;
1856 info->array.md_minor = -1;
1857 cptr = (__u32 *)(ddf->anchor.guid + 16);
1858 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1859
1860 info->array.utime = 0;
1861 info->array.chunk_size = 0;
1862 info->container_enough = 1;
1863
1864 info->disk.major = 0;
1865 info->disk.minor = 0;
1866 if (ddf->dlist) {
1867 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1868 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1869
1870 info->data_offset = be64_to_cpu(ddf->phys->
1871 entries[info->disk.raid_disk].
1872 config_size);
1873 info->component_size = ddf->dlist->size - info->data_offset;
1874 } else {
1875 info->disk.number = -1;
1876 info->disk.raid_disk = -1;
1877 // info->disk.raid_disk = find refnum in the table and use index;
1878 }
1879 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1880
1881 info->recovery_start = MaxSector;
1882 info->reshape_active = 0;
1883 info->recovery_blocked = 0;
1884 info->name[0] = 0;
1885
1886 info->array.major_version = -1;
1887 info->array.minor_version = -2;
1888 strcpy(info->text_version, "ddf");
1889 info->safe_mode_delay = 0;
1890
1891 uuid_from_super_ddf(st, info->uuid);
1892
1893 if (map) {
1894 int i;
1895 for (i = 0 ; i < map_disks; i++) {
1896 if (i < info->array.raid_disks &&
1897 (be16_to_cpu(ddf->phys->entries[i].state)
1898 & DDF_Online) &&
1899 !(be16_to_cpu(ddf->phys->entries[i].state)
1900 & DDF_Failed))
1901 map[i] = 1;
1902 else
1903 map[i] = 0;
1904 }
1905 }
1906 }
1907
1908 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1909 {
1910 struct ddf_super *ddf = st->sb;
1911 struct vcl *vc = ddf->currentconf;
1912 int cd = ddf->currentdev;
1913 int n_prim;
1914 int j;
1915 struct dl *dl;
1916 int map_disks = info->array.raid_disks;
1917 __u32 *cptr;
1918 struct vd_config *conf;
1919
1920 memset(info, 0, sizeof(*info));
1921 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1922 return;
1923 info->array.md_minor = -1;
1924 cptr = (__u32 *)(vc->conf.guid + 16);
1925 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1926 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1927 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1928 info->custom_array_size = 0;
1929
1930 conf = &vc->conf;
1931 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1932 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1933 int ibvd = cd / n_prim - 1;
1934 cd %= n_prim;
1935 conf = vc->other_bvds[ibvd];
1936 }
1937
1938 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1939 info->data_offset =
1940 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
1941 if (vc->block_sizes)
1942 info->component_size = vc->block_sizes[cd];
1943 else
1944 info->component_size = be64_to_cpu(conf->blocks);
1945 }
1946
1947 for (dl = ddf->dlist; dl ; dl = dl->next)
1948 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
1949 break;
1950
1951 info->disk.major = 0;
1952 info->disk.minor = 0;
1953 info->disk.state = 0;
1954 if (dl) {
1955 info->disk.major = dl->major;
1956 info->disk.minor = dl->minor;
1957 info->disk.raid_disk = cd + conf->sec_elmnt_seq
1958 * be16_to_cpu(conf->prim_elmnt_count);
1959 info->disk.number = dl->pdnum;
1960 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1961 }
1962
1963 info->container_member = ddf->currentconf->vcnum;
1964
1965 info->recovery_start = MaxSector;
1966 info->resync_start = 0;
1967 info->reshape_active = 0;
1968 info->recovery_blocked = 0;
1969 if (!(ddf->virt->entries[info->container_member].state
1970 & DDF_state_inconsistent) &&
1971 (ddf->virt->entries[info->container_member].init_state
1972 & DDF_initstate_mask)
1973 == DDF_init_full)
1974 info->resync_start = MaxSector;
1975
1976 uuid_from_super_ddf(st, info->uuid);
1977
1978 info->array.major_version = -1;
1979 info->array.minor_version = -2;
1980 sprintf(info->text_version, "/%s/%d",
1981 st->container_devnm,
1982 info->container_member);
1983 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
1984
1985 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1986 info->name[16]=0;
1987 for(j=0; j<16; j++)
1988 if (info->name[j] == ' ')
1989 info->name[j] = 0;
1990
1991 if (map)
1992 for (j = 0; j < map_disks; j++) {
1993 map[j] = 0;
1994 if (j < info->array.raid_disks) {
1995 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1996 if (i >= 0 &&
1997 (be16_to_cpu(ddf->phys->entries[i].state)
1998 & DDF_Online) &&
1999 !(be16_to_cpu(ddf->phys->entries[i].state)
2000 & DDF_Failed))
2001 map[i] = 1;
2002 }
2003 }
2004 }
2005
2006 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2007 char *update,
2008 char *devname, int verbose,
2009 int uuid_set, char *homehost)
2010 {
2011 /* For 'assemble' and 'force' we need to return non-zero if any
2012 * change was made. For others, the return value is ignored.
2013 * Update options are:
2014 * force-one : This device looks a bit old but needs to be included,
2015 * update age info appropriately.
2016 * assemble: clear any 'faulty' flag to allow this device to
2017 * be assembled.
2018 * force-array: Array is degraded but being forced, mark it clean
2019 * if that will be needed to assemble it.
2020 *
2021 * newdev: not used ????
2022 * grow: Array has gained a new device - this is currently for
2023 * linear only
2024 * resync: mark as dirty so a resync will happen.
2025 * uuid: Change the uuid of the array to match what is given
2026 * homehost: update the recorded homehost
2027 * name: update the name - preserving the homehost
2028 * _reshape_progress: record new reshape_progress position.
2029 *
2030 * Following are not relevant for this version:
2031 * sparc2.2 : update from old dodgey metadata
2032 * super-minor: change the preferred_minor number
2033 * summaries: update redundant counters.
2034 */
2035 int rv = 0;
2036 // struct ddf_super *ddf = st->sb;
2037 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2038 // struct virtual_entry *ve = find_ve(ddf);
2039
2040 /* we don't need to handle "force-*" or "assemble" as
2041 * there is no need to 'trick' the kernel. We the metadata is
2042 * first updated to activate the array, all the implied modifications
2043 * will just happen.
2044 */
2045
2046 if (strcmp(update, "grow") == 0) {
2047 /* FIXME */
2048 } else if (strcmp(update, "resync") == 0) {
2049 // info->resync_checkpoint = 0;
2050 } else if (strcmp(update, "homehost") == 0) {
2051 /* homehost is stored in controller->vendor_data,
2052 * or it is when we are the vendor
2053 */
2054 // if (info->vendor_is_local)
2055 // strcpy(ddf->controller.vendor_data, homehost);
2056 rv = -1;
2057 } else if (strcmp(update, "name") == 0) {
2058 /* name is stored in virtual_entry->name */
2059 // memset(ve->name, ' ', 16);
2060 // strncpy(ve->name, info->name, 16);
2061 rv = -1;
2062 } else if (strcmp(update, "_reshape_progress") == 0) {
2063 /* We don't support reshape yet */
2064 } else if (strcmp(update, "assemble") == 0 ) {
2065 /* Do nothing, just succeed */
2066 rv = 0;
2067 } else
2068 rv = -1;
2069
2070 // update_all_csum(ddf);
2071
2072 return rv;
2073 }
2074
2075 static void make_header_guid(char *guid)
2076 {
2077 be32 stamp;
2078 /* Create a DDF Header of Virtual Disk GUID */
2079
2080 /* 24 bytes of fiction required.
2081 * first 8 are a 'vendor-id' - "Linux-MD"
2082 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2083 * Remaining 8 random number plus timestamp
2084 */
2085 memcpy(guid, T10, sizeof(T10));
2086 stamp = cpu_to_be32(0xdeadbeef);
2087 memcpy(guid+8, &stamp, 4);
2088 stamp = cpu_to_be32(0);
2089 memcpy(guid+12, &stamp, 4);
2090 stamp = cpu_to_be32(time(0) - DECADE);
2091 memcpy(guid+16, &stamp, 4);
2092 stamp._v32 = random32();
2093 memcpy(guid+20, &stamp, 4);
2094 }
2095
2096 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2097 {
2098 unsigned int i;
2099 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2100 if (all_ff(ddf->virt->entries[i].guid))
2101 return i;
2102 }
2103 return DDF_NOTFOUND;
2104 }
2105
2106 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2107 const char *name)
2108 {
2109 unsigned int i;
2110 if (name == NULL)
2111 return DDF_NOTFOUND;
2112 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2113 if (all_ff(ddf->virt->entries[i].guid))
2114 continue;
2115 if (!strncmp(name, ddf->virt->entries[i].name,
2116 sizeof(ddf->virt->entries[i].name)))
2117 return i;
2118 }
2119 return DDF_NOTFOUND;
2120 }
2121
2122 #ifndef MDASSEMBLE
2123 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2124 const char *guid)
2125 {
2126 unsigned int i;
2127 if (guid == NULL || all_ff(guid))
2128 return DDF_NOTFOUND;
2129 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2130 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2131 return i;
2132 return DDF_NOTFOUND;
2133 }
2134 #endif
2135
2136 static int init_super_ddf_bvd(struct supertype *st,
2137 mdu_array_info_t *info,
2138 unsigned long long size,
2139 char *name, char *homehost,
2140 int *uuid, unsigned long long data_offset);
2141
2142 static int init_super_ddf(struct supertype *st,
2143 mdu_array_info_t *info,
2144 unsigned long long size, char *name, char *homehost,
2145 int *uuid, unsigned long long data_offset)
2146 {
2147 /* This is primarily called by Create when creating a new array.
2148 * We will then get add_to_super called for each component, and then
2149 * write_init_super called to write it out to each device.
2150 * For DDF, Create can create on fresh devices or on a pre-existing
2151 * array.
2152 * To create on a pre-existing array a different method will be called.
2153 * This one is just for fresh drives.
2154 *
2155 * We need to create the entire 'ddf' structure which includes:
2156 * DDF headers - these are easy.
2157 * Controller data - a Sector describing this controller .. not that
2158 * this is a controller exactly.
2159 * Physical Disk Record - one entry per device, so
2160 * leave plenty of space.
2161 * Virtual Disk Records - again, just leave plenty of space.
2162 * This just lists VDs, doesn't give details
2163 * Config records - describes the VDs that use this disk
2164 * DiskData - describes 'this' device.
2165 * BadBlockManagement - empty
2166 * Diag Space - empty
2167 * Vendor Logs - Could we put bitmaps here?
2168 *
2169 */
2170 struct ddf_super *ddf;
2171 char hostname[17];
2172 int hostlen;
2173 int max_phys_disks, max_virt_disks;
2174 unsigned long long sector;
2175 int clen;
2176 int i;
2177 int pdsize, vdsize;
2178 struct phys_disk *pd;
2179 struct virtual_disk *vd;
2180
2181 if (data_offset != INVALID_SECTORS) {
2182 pr_err("data-offset not supported by DDF\n");
2183 return 0;
2184 }
2185
2186 if (st->sb)
2187 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2188 data_offset);
2189
2190 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2191 pr_err("%s could not allocate superblock\n", __func__);
2192 return 0;
2193 }
2194 memset(ddf, 0, sizeof(*ddf));
2195 ddf->dlist = NULL; /* no physical disks yet */
2196 ddf->conflist = NULL; /* No virtual disks yet */
2197 st->sb = ddf;
2198
2199 if (info == NULL) {
2200 /* zeroing superblock */
2201 return 0;
2202 }
2203
2204 /* At least 32MB *must* be reserved for the ddf. So let's just
2205 * start 32MB from the end, and put the primary header there.
2206 * Don't do secondary for now.
2207 * We don't know exactly where that will be yet as it could be
2208 * different on each device. To just set up the lengths.
2209 *
2210 */
2211
2212 ddf->anchor.magic = DDF_HEADER_MAGIC;
2213 make_header_guid(ddf->anchor.guid);
2214
2215 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2216 ddf->anchor.seq = cpu_to_be32(1);
2217 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2218 ddf->anchor.openflag = 0xFF;
2219 ddf->anchor.foreignflag = 0;
2220 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2221 ddf->anchor.pad0 = 0xff;
2222 memset(ddf->anchor.pad1, 0xff, 12);
2223 memset(ddf->anchor.header_ext, 0xff, 32);
2224 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2225 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2226 ddf->anchor.type = DDF_HEADER_ANCHOR;
2227 memset(ddf->anchor.pad2, 0xff, 3);
2228 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2229 /* Put this at bottom of 32M reserved.. */
2230 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2231 max_phys_disks = 1023; /* Should be enough */
2232 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2233 max_virt_disks = 255;
2234 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2235 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2236 ddf->max_part = 64;
2237 ddf->mppe = 256;
2238 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2239 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2240 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2241 memset(ddf->anchor.pad3, 0xff, 54);
2242 /* controller sections is one sector long immediately
2243 * after the ddf header */
2244 sector = 1;
2245 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2246 ddf->anchor.controller_section_length = cpu_to_be32(1);
2247 sector += 1;
2248
2249 /* phys is 8 sectors after that */
2250 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2251 sizeof(struct phys_disk_entry)*max_phys_disks,
2252 512);
2253 switch(pdsize/512) {
2254 case 2: case 8: case 32: case 128: case 512: break;
2255 default: abort();
2256 }
2257 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2258 ddf->anchor.phys_section_length =
2259 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2260 sector += pdsize/512;
2261
2262 /* virt is another 32 sectors */
2263 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2264 sizeof(struct virtual_entry) * max_virt_disks,
2265 512);
2266 switch(vdsize/512) {
2267 case 2: case 8: case 32: case 128: case 512: break;
2268 default: abort();
2269 }
2270 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2271 ddf->anchor.virt_section_length =
2272 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2273 sector += vdsize/512;
2274
2275 clen = ddf->conf_rec_len * (ddf->max_part+1);
2276 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2277 ddf->anchor.config_section_length = cpu_to_be32(clen);
2278 sector += clen;
2279
2280 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2281 ddf->anchor.data_section_length = cpu_to_be32(1);
2282 sector += 1;
2283
2284 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2285 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2286 ddf->anchor.diag_space_length = cpu_to_be32(0);
2287 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2288 ddf->anchor.vendor_length = cpu_to_be32(0);
2289 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2290
2291 memset(ddf->anchor.pad4, 0xff, 256);
2292
2293 memcpy(&ddf->primary, &ddf->anchor, 512);
2294 memcpy(&ddf->secondary, &ddf->anchor, 512);
2295
2296 ddf->primary.openflag = 1; /* I guess.. */
2297 ddf->primary.type = DDF_HEADER_PRIMARY;
2298
2299 ddf->secondary.openflag = 1; /* I guess.. */
2300 ddf->secondary.type = DDF_HEADER_SECONDARY;
2301
2302 ddf->active = &ddf->primary;
2303
2304 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2305
2306 /* 24 more bytes of fiction required.
2307 * first 8 are a 'vendor-id' - "Linux-MD"
2308 * Remaining 16 are serial number.... maybe a hostname would do?
2309 */
2310 memcpy(ddf->controller.guid, T10, sizeof(T10));
2311 gethostname(hostname, sizeof(hostname));
2312 hostname[sizeof(hostname) - 1] = 0;
2313 hostlen = strlen(hostname);
2314 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2315 for (i = strlen(T10) ; i+hostlen < 24; i++)
2316 ddf->controller.guid[i] = ' ';
2317
2318 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2319 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2320 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2321 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2322 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2323 memset(ddf->controller.pad, 0xff, 8);
2324 memset(ddf->controller.vendor_data, 0xff, 448);
2325 if (homehost && strlen(homehost) < 440)
2326 strcpy((char*)ddf->controller.vendor_data, homehost);
2327
2328 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2329 pr_err("%s could not allocate pd\n", __func__);
2330 return 0;
2331 }
2332 ddf->phys = pd;
2333 ddf->pdsize = pdsize;
2334
2335 memset(pd, 0xff, pdsize);
2336 memset(pd, 0, sizeof(*pd));
2337 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2338 pd->used_pdes = cpu_to_be16(0);
2339 pd->max_pdes = cpu_to_be16(max_phys_disks);
2340 memset(pd->pad, 0xff, 52);
2341 for (i = 0; i < max_phys_disks; i++)
2342 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2343
2344 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2345 pr_err("%s could not allocate vd\n", __func__);
2346 return 0;
2347 }
2348 ddf->virt = vd;
2349 ddf->vdsize = vdsize;
2350 memset(vd, 0, vdsize);
2351 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2352 vd->populated_vdes = cpu_to_be16(0);
2353 vd->max_vdes = cpu_to_be16(max_virt_disks);
2354 memset(vd->pad, 0xff, 52);
2355
2356 for (i=0; i<max_virt_disks; i++)
2357 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2358
2359 st->sb = ddf;
2360 ddf_set_updates_pending(ddf);
2361 return 1;
2362 }
2363
2364 static int chunk_to_shift(int chunksize)
2365 {
2366 return ffs(chunksize/512)-1;
2367 }
2368
2369 #ifndef MDASSEMBLE
2370 struct extent {
2371 unsigned long long start, size;
2372 };
2373 static int cmp_extent(const void *av, const void *bv)
2374 {
2375 const struct extent *a = av;
2376 const struct extent *b = bv;
2377 if (a->start < b->start)
2378 return -1;
2379 if (a->start > b->start)
2380 return 1;
2381 return 0;
2382 }
2383
2384 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2385 {
2386 /* find a list of used extents on the give physical device
2387 * (dnum) of the given ddf.
2388 * Return a malloced array of 'struct extent'
2389
2390 * FIXME ignore DDF_Legacy devices?
2391
2392 */
2393 struct extent *rv;
2394 int n = 0;
2395 unsigned int i;
2396 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2397
2398 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2399 return NULL;
2400
2401 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2402
2403 for (i = 0; i < ddf->max_part; i++) {
2404 const struct vd_config *bvd;
2405 unsigned int ibvd;
2406 struct vcl *v = dl->vlist[i];
2407 if (v == NULL ||
2408 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2409 &bvd, &ibvd) == DDF_NOTFOUND)
2410 continue;
2411 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2412 rv[n].size = be64_to_cpu(bvd->blocks);
2413 n++;
2414 }
2415 qsort(rv, n, sizeof(*rv), cmp_extent);
2416
2417 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2418 rv[n].size = 0;
2419 return rv;
2420 }
2421 #endif
2422
2423 static int init_super_ddf_bvd(struct supertype *st,
2424 mdu_array_info_t *info,
2425 unsigned long long size,
2426 char *name, char *homehost,
2427 int *uuid, unsigned long long data_offset)
2428 {
2429 /* We are creating a BVD inside a pre-existing container.
2430 * so st->sb is already set.
2431 * We need to create a new vd_config and a new virtual_entry
2432 */
2433 struct ddf_super *ddf = st->sb;
2434 unsigned int venum, i;
2435 struct virtual_entry *ve;
2436 struct vcl *vcl;
2437 struct vd_config *vc;
2438
2439 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2440 pr_err("This ddf already has an array called %s\n", name);
2441 return 0;
2442 }
2443 venum = find_unused_vde(ddf);
2444 if (venum == DDF_NOTFOUND) {
2445 pr_err("Cannot find spare slot for virtual disk\n");
2446 return 0;
2447 }
2448 ve = &ddf->virt->entries[venum];
2449
2450 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2451 * timestamp, random number
2452 */
2453 make_header_guid(ve->guid);
2454 ve->unit = cpu_to_be16(info->md_minor);
2455 ve->pad0 = 0xFFFF;
2456 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2457 DDF_GUID_LEN);
2458 ve->type = cpu_to_be16(0);
2459 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2460 if (info->state & 1) /* clean */
2461 ve->init_state = DDF_init_full;
2462 else
2463 ve->init_state = DDF_init_not;
2464
2465 memset(ve->pad1, 0xff, 14);
2466 memset(ve->name, ' ', 16);
2467 if (name)
2468 strncpy(ve->name, name, 16);
2469 ddf->virt->populated_vdes =
2470 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2471
2472 /* Now create a new vd_config */
2473 if (posix_memalign((void**)&vcl, 512,
2474 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2475 pr_err("%s could not allocate vd_config\n", __func__);
2476 return 0;
2477 }
2478 vcl->vcnum = venum;
2479 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2480 vc = &vcl->conf;
2481
2482 vc->magic = DDF_VD_CONF_MAGIC;
2483 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2484 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2485 vc->seqnum = cpu_to_be32(1);
2486 memset(vc->pad0, 0xff, 24);
2487 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2488 if (layout_md2ddf(info, vc) == -1 ||
2489 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2490 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2491 __func__, info->level, info->layout, info->raid_disks);
2492 free(vcl);
2493 return 0;
2494 }
2495 vc->sec_elmnt_seq = 0;
2496 if (alloc_other_bvds(ddf, vcl) != 0) {
2497 pr_err("%s could not allocate other bvds\n",
2498 __func__);
2499 free(vcl);
2500 return 0;
2501 }
2502 vc->blocks = cpu_to_be64(info->size * 2);
2503 vc->array_blocks = cpu_to_be64(
2504 calc_array_size(info->level, info->raid_disks, info->layout,
2505 info->chunk_size, info->size*2));
2506 memset(vc->pad1, 0xff, 8);
2507 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2508 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2509 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2510 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2511 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2512 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2513 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2514 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2515 memset(vc->cache_pol, 0, 8);
2516 vc->bg_rate = 0x80;
2517 memset(vc->pad2, 0xff, 3);
2518 memset(vc->pad3, 0xff, 52);
2519 memset(vc->pad4, 0xff, 192);
2520 memset(vc->v0, 0xff, 32);
2521 memset(vc->v1, 0xff, 32);
2522 memset(vc->v2, 0xff, 16);
2523 memset(vc->v3, 0xff, 16);
2524 memset(vc->vendor, 0xff, 32);
2525
2526 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2527 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2528
2529 for (i = 1; i < vc->sec_elmnt_count; i++) {
2530 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2531 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2532 }
2533
2534 vcl->next = ddf->conflist;
2535 ddf->conflist = vcl;
2536 ddf->currentconf = vcl;
2537 ddf_set_updates_pending(ddf);
2538 return 1;
2539 }
2540
2541
2542 #ifndef MDASSEMBLE
2543 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2544
2545 static void add_to_super_ddf_bvd(struct supertype *st,
2546 mdu_disk_info_t *dk, int fd, char *devname)
2547 {
2548 /* fd and devname identify a device with-in the ddf container (st).
2549 * dk identifies a location in the new BVD.
2550 * We need to find suitable free space in that device and update
2551 * the phys_refnum and lba_offset for the newly created vd_config.
2552 * We might also want to update the type in the phys_disk
2553 * section.
2554 *
2555 * Alternately: fd == -1 and we have already chosen which device to
2556 * use and recorded in dlist->raid_disk;
2557 */
2558 struct dl *dl;
2559 struct ddf_super *ddf = st->sb;
2560 struct vd_config *vc;
2561 unsigned int i;
2562 unsigned long long blocks, pos, esize;
2563 struct extent *ex;
2564 unsigned int raid_disk = dk->raid_disk;
2565
2566 if (fd == -1) {
2567 for (dl = ddf->dlist; dl ; dl = dl->next)
2568 if (dl->raiddisk == dk->raid_disk)
2569 break;
2570 } else {
2571 for (dl = ddf->dlist; dl ; dl = dl->next)
2572 if (dl->major == dk->major &&
2573 dl->minor == dk->minor)
2574 break;
2575 }
2576 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2577 return;
2578
2579 vc = &ddf->currentconf->conf;
2580 if (vc->sec_elmnt_count > 1) {
2581 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2582 if (raid_disk >= n)
2583 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2584 raid_disk %= n;
2585 }
2586
2587 ex = get_extents(ddf, dl);
2588 if (!ex)
2589 return;
2590
2591 i = 0; pos = 0;
2592 blocks = be64_to_cpu(vc->blocks);
2593 if (ddf->currentconf->block_sizes)
2594 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2595
2596 do {
2597 esize = ex[i].start - pos;
2598 if (esize >= blocks)
2599 break;
2600 pos = ex[i].start + ex[i].size;
2601 i++;
2602 } while (ex[i-1].size);
2603
2604 free(ex);
2605 if (esize < blocks)
2606 return;
2607
2608 ddf->currentdev = dk->raid_disk;
2609 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2610 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2611
2612 for (i = 0; i < ddf->max_part ; i++)
2613 if (dl->vlist[i] == NULL)
2614 break;
2615 if (i == ddf->max_part)
2616 return;
2617 dl->vlist[i] = ddf->currentconf;
2618
2619 if (fd >= 0)
2620 dl->fd = fd;
2621 if (devname)
2622 dl->devname = devname;
2623
2624 /* Check if we can mark array as optimal yet */
2625 i = ddf->currentconf->vcnum;
2626 ddf->virt->entries[i].state =
2627 (ddf->virt->entries[i].state & ~DDF_state_mask)
2628 | get_svd_state(ddf, ddf->currentconf);
2629 be16_clear(ddf->phys->entries[dl->pdnum].type,
2630 cpu_to_be16(DDF_Global_Spare));
2631 be16_set(ddf->phys->entries[dl->pdnum].type,
2632 cpu_to_be16(DDF_Active_in_VD));
2633 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2634 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2635 ddf->currentconf->vcnum, guid_str(vc->guid),
2636 dk->raid_disk);
2637 ddf_set_updates_pending(ddf);
2638 }
2639
2640 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2641 {
2642 unsigned int i;
2643 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2644 if (all_ff(ddf->phys->entries[i].guid))
2645 return i;
2646 }
2647 return DDF_NOTFOUND;
2648 }
2649
2650 /* add a device to a container, either while creating it or while
2651 * expanding a pre-existing container
2652 */
2653 static int add_to_super_ddf(struct supertype *st,
2654 mdu_disk_info_t *dk, int fd, char *devname,
2655 unsigned long long data_offset)
2656 {
2657 struct ddf_super *ddf = st->sb;
2658 struct dl *dd;
2659 time_t now;
2660 struct tm *tm;
2661 unsigned long long size;
2662 struct phys_disk_entry *pde;
2663 unsigned int n, i;
2664 struct stat stb;
2665 __u32 *tptr;
2666
2667 if (ddf->currentconf) {
2668 add_to_super_ddf_bvd(st, dk, fd, devname);
2669 return 0;
2670 }
2671
2672 /* This is device numbered dk->number. We need to create
2673 * a phys_disk entry and a more detailed disk_data entry.
2674 */
2675 fstat(fd, &stb);
2676 n = find_unused_pde(ddf);
2677 if (n == DDF_NOTFOUND) {
2678 pr_err("%s: No free slot in array, cannot add disk\n",
2679 __func__);
2680 return 1;
2681 }
2682 pde = &ddf->phys->entries[n];
2683 get_dev_size(fd, NULL, &size);
2684 if (size <= 32*1024*1024) {
2685 pr_err("%s: device size must be at least 32MB\n",
2686 __func__);
2687 return 1;
2688 }
2689 size >>= 9;
2690
2691 if (posix_memalign((void**)&dd, 512,
2692 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2693 pr_err("%s could allocate buffer for new disk, aborting\n",
2694 __func__);
2695 return 1;
2696 }
2697 dd->major = major(stb.st_rdev);
2698 dd->minor = minor(stb.st_rdev);
2699 dd->devname = devname;
2700 dd->fd = fd;
2701 dd->spare = NULL;
2702
2703 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2704 now = time(0);
2705 tm = localtime(&now);
2706 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2707 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2708 tptr = (__u32 *)(dd->disk.guid + 16);
2709 *tptr++ = random32();
2710 *tptr = random32();
2711
2712 do {
2713 /* Cannot be bothered finding a CRC of some irrelevant details*/
2714 dd->disk.refnum._v32 = random32();
2715 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2716 i > 0; i--)
2717 if (be32_eq(ddf->phys->entries[i-1].refnum,
2718 dd->disk.refnum))
2719 break;
2720 } while (i > 0);
2721
2722 dd->disk.forced_ref = 1;
2723 dd->disk.forced_guid = 1;
2724 memset(dd->disk.vendor, ' ', 32);
2725 memcpy(dd->disk.vendor, "Linux", 5);
2726 memset(dd->disk.pad, 0xff, 442);
2727 for (i = 0; i < ddf->max_part ; i++)
2728 dd->vlist[i] = NULL;
2729
2730 dd->pdnum = n;
2731
2732 if (st->update_tail) {
2733 int len = (sizeof(struct phys_disk) +
2734 sizeof(struct phys_disk_entry));
2735 struct phys_disk *pd;
2736
2737 pd = xmalloc(len);
2738 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2739 pd->used_pdes = cpu_to_be16(n);
2740 pde = &pd->entries[0];
2741 dd->mdupdate = pd;
2742 } else
2743 ddf->phys->used_pdes = cpu_to_be16(
2744 1 + be16_to_cpu(ddf->phys->used_pdes));
2745
2746 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2747 pde->refnum = dd->disk.refnum;
2748 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2749 pde->state = cpu_to_be16(DDF_Online);
2750 dd->size = size;
2751 /*
2752 * If there is already a device in dlist, try to reserve the same
2753 * amount of workspace. Otherwise, use 32MB.
2754 * We checked disk size above already.
2755 */
2756 #define __calc_lba(new, old, lba, mb) do { \
2757 unsigned long long dif; \
2758 if ((old) != NULL) \
2759 dif = (old)->size - be64_to_cpu((old)->lba); \
2760 else \
2761 dif = (new)->size; \
2762 if ((new)->size > dif) \
2763 (new)->lba = cpu_to_be64((new)->size - dif); \
2764 else \
2765 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2766 } while (0)
2767 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2768 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2769 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2770 pde->config_size = dd->workspace_lba;
2771
2772 sprintf(pde->path, "%17.17s","Information: nil") ;
2773 memset(pde->pad, 0xff, 6);
2774
2775 if (st->update_tail) {
2776 dd->next = ddf->add_list;
2777 ddf->add_list = dd;
2778 } else {
2779 dd->next = ddf->dlist;
2780 ddf->dlist = dd;
2781 ddf_set_updates_pending(ddf);
2782 }
2783
2784 return 0;
2785 }
2786
2787 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2788 {
2789 struct ddf_super *ddf = st->sb;
2790 struct dl *dl;
2791
2792 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2793 * disappeared from the container.
2794 * We need to arrange that it disappears from the metadata and
2795 * internal data structures too.
2796 * Most of the work is done by ddf_process_update which edits
2797 * the metadata and closes the file handle and attaches the memory
2798 * where free_updates will free it.
2799 */
2800 for (dl = ddf->dlist; dl ; dl = dl->next)
2801 if (dl->major == dk->major &&
2802 dl->minor == dk->minor)
2803 break;
2804 if (!dl)
2805 return -1;
2806
2807 if (st->update_tail) {
2808 int len = (sizeof(struct phys_disk) +
2809 sizeof(struct phys_disk_entry));
2810 struct phys_disk *pd;
2811
2812 pd = xmalloc(len);
2813 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2814 pd->used_pdes = cpu_to_be16(dl->pdnum);
2815 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2816 append_metadata_update(st, pd, len);
2817 }
2818 return 0;
2819 }
2820 #endif
2821
2822 /*
2823 * This is the write_init_super method for a ddf container. It is
2824 * called when creating a container or adding another device to a
2825 * container.
2826 */
2827 #define NULL_CONF_SZ 4096
2828
2829 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2830 {
2831 unsigned long long sector;
2832 struct ddf_header *header;
2833 int fd, i, n_config, conf_size, buf_size;
2834 int ret = 0;
2835 char *conf;
2836
2837 fd = d->fd;
2838
2839 switch (type) {
2840 case DDF_HEADER_PRIMARY:
2841 header = &ddf->primary;
2842 sector = be64_to_cpu(header->primary_lba);
2843 break;
2844 case DDF_HEADER_SECONDARY:
2845 header = &ddf->secondary;
2846 sector = be64_to_cpu(header->secondary_lba);
2847 break;
2848 default:
2849 return 0;
2850 }
2851
2852 header->type = type;
2853 header->openflag = 1;
2854 header->crc = calc_crc(header, 512);
2855
2856 lseek64(fd, sector<<9, 0);
2857 if (write(fd, header, 512) < 0)
2858 goto out;
2859
2860 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2861 if (write(fd, &ddf->controller, 512) < 0)
2862 goto out;
2863
2864 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2865 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2866 goto out;
2867 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2868 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2869 goto out;
2870
2871 /* Now write lots of config records. */
2872 n_config = ddf->max_part;
2873 conf_size = ddf->conf_rec_len * 512;
2874 conf = ddf->conf;
2875 buf_size = conf_size * (n_config + 1);
2876 if (!conf) {
2877 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2878 goto out;
2879 ddf->conf = conf;
2880 }
2881 for (i = 0 ; i <= n_config ; i++) {
2882 struct vcl *c;
2883 struct vd_config *vdc = NULL;
2884 if (i == n_config) {
2885 c = (struct vcl *)d->spare;
2886 if (c)
2887 vdc = &c->conf;
2888 } else {
2889 unsigned int dummy;
2890 c = d->vlist[i];
2891 if (c)
2892 get_pd_index_from_refnum(
2893 c, d->disk.refnum,
2894 ddf->mppe,
2895 (const struct vd_config **)&vdc,
2896 &dummy);
2897 }
2898 if (c) {
2899 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2900 i, be32_to_cpu(d->disk.refnum),
2901 guid_str(vdc->guid),
2902 vdc->sec_elmnt_seq);
2903 vdc->seqnum = header->seq;
2904 vdc->crc = calc_crc(vdc, conf_size);
2905 memcpy(conf + i*conf_size, vdc, conf_size);
2906 } else
2907 memset(conf + i*conf_size, 0xff, conf_size);
2908 }
2909 if (write(fd, conf, buf_size) != buf_size)
2910 goto out;
2911
2912 d->disk.crc = calc_crc(&d->disk, 512);
2913 if (write(fd, &d->disk, 512) < 0)
2914 goto out;
2915
2916 ret = 1;
2917 out:
2918 header->openflag = 0;
2919 header->crc = calc_crc(header, 512);
2920
2921 lseek64(fd, sector<<9, 0);
2922 if (write(fd, header, 512) < 0)
2923 ret = 0;
2924
2925 return ret;
2926 }
2927
2928 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
2929 {
2930 unsigned long long size;
2931 int fd = d->fd;
2932 if (fd < 0)
2933 return 0;
2934
2935 /* We need to fill in the primary, (secondary) and workspace
2936 * lba's in the headers, set their checksums,
2937 * Also checksum phys, virt....
2938 *
2939 * Then write everything out, finally the anchor is written.
2940 */
2941 get_dev_size(fd, NULL, &size);
2942 size /= 512;
2943 if (be64_to_cpu(d->workspace_lba) != 0ULL)
2944 ddf->anchor.workspace_lba = d->workspace_lba;
2945 else
2946 ddf->anchor.workspace_lba =
2947 cpu_to_be64(size - 32*1024*2);
2948 if (be64_to_cpu(d->primary_lba) != 0ULL)
2949 ddf->anchor.primary_lba = d->primary_lba;
2950 else
2951 ddf->anchor.primary_lba =
2952 cpu_to_be64(size - 16*1024*2);
2953 if (be64_to_cpu(d->secondary_lba) != 0ULL)
2954 ddf->anchor.secondary_lba = d->secondary_lba;
2955 else
2956 ddf->anchor.secondary_lba =
2957 cpu_to_be64(size - 32*1024*2);
2958 ddf->anchor.seq = ddf->active->seq;
2959 memcpy(&ddf->primary, &ddf->anchor, 512);
2960 memcpy(&ddf->secondary, &ddf->anchor, 512);
2961
2962 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2963 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
2964 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2965
2966 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
2967 return 0;
2968
2969 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
2970 return 0;
2971
2972 lseek64(fd, (size-1)*512, SEEK_SET);
2973 if (write(fd, &ddf->anchor, 512) < 0)
2974 return 0;
2975
2976 return 1;
2977 }
2978
2979 #ifndef MDASSEMBLE
2980 static int __write_init_super_ddf(struct supertype *st)
2981 {
2982 struct ddf_super *ddf = st->sb;
2983 struct dl *d;
2984 int attempts = 0;
2985 int successes = 0;
2986
2987 pr_state(ddf, __func__);
2988
2989 /* try to write updated metadata,
2990 * if we catch a failure move on to the next disk
2991 */
2992 for (d = ddf->dlist; d; d=d->next) {
2993 attempts++;
2994 successes += _write_super_to_disk(ddf, d);
2995 }
2996
2997 return attempts != successes;
2998 }
2999
3000 static int write_init_super_ddf(struct supertype *st)
3001 {
3002 struct ddf_super *ddf = st->sb;
3003 struct vcl *currentconf = ddf->currentconf;
3004
3005 /* we are done with currentconf reset it to point st at the container */
3006 ddf->currentconf = NULL;
3007
3008 if (st->update_tail) {
3009 /* queue the virtual_disk and vd_config as metadata updates */
3010 struct virtual_disk *vd;
3011 struct vd_config *vc;
3012 int len, tlen;
3013 unsigned int i;
3014
3015 if (!currentconf) {
3016 int len = (sizeof(struct phys_disk) +
3017 sizeof(struct phys_disk_entry));
3018
3019 /* adding a disk to the container. */
3020 if (!ddf->add_list)
3021 return 0;
3022
3023 append_metadata_update(st, ddf->add_list->mdupdate, len);
3024 ddf->add_list->mdupdate = NULL;
3025 return 0;
3026 }
3027
3028 /* Newly created VD */
3029
3030 /* First the virtual disk. We have a slightly fake header */
3031 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3032 vd = xmalloc(len);
3033 *vd = *ddf->virt;
3034 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3035 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3036 append_metadata_update(st, vd, len);
3037
3038 /* Then the vd_config */
3039 len = ddf->conf_rec_len * 512;
3040 tlen = len * currentconf->conf.sec_elmnt_count;
3041 vc = xmalloc(tlen);
3042 memcpy(vc, &currentconf->conf, len);
3043 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3044 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3045 len);
3046 append_metadata_update(st, vc, tlen);
3047
3048 /* FIXME I need to close the fds! */
3049 return 0;
3050 } else {
3051 struct dl *d;
3052 if (!currentconf)
3053 for (d = ddf->dlist; d; d=d->next)
3054 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3055 return __write_init_super_ddf(st);
3056 }
3057 }
3058
3059 #endif
3060
3061 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3062 unsigned long long data_offset)
3063 {
3064 /* We must reserve the last 32Meg */
3065 if (devsize <= 32*1024*2)
3066 return 0;
3067 return devsize - 32*1024*2;
3068 }
3069
3070 #ifndef MDASSEMBLE
3071
3072 static int reserve_space(struct supertype *st, int raiddisks,
3073 unsigned long long size, int chunk,
3074 unsigned long long *freesize)
3075 {
3076 /* Find 'raiddisks' spare extents at least 'size' big (but
3077 * only caring about multiples of 'chunk') and remember
3078 * them.
3079 * If the cannot be found, fail.
3080 */
3081 struct dl *dl;
3082 struct ddf_super *ddf = st->sb;
3083 int cnt = 0;
3084
3085 for (dl = ddf->dlist; dl ; dl=dl->next) {
3086 dl->raiddisk = -1;
3087 dl->esize = 0;
3088 }
3089 /* Now find largest extent on each device */
3090 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3091 struct extent *e = get_extents(ddf, dl);
3092 unsigned long long pos = 0;
3093 int i = 0;
3094 int found = 0;
3095 unsigned long long minsize = size;
3096
3097 if (size == 0)
3098 minsize = chunk;
3099
3100 if (!e)
3101 continue;
3102 do {
3103 unsigned long long esize;
3104 esize = e[i].start - pos;
3105 if (esize >= minsize) {
3106 found = 1;
3107 minsize = esize;
3108 }
3109 pos = e[i].start + e[i].size;
3110 i++;
3111 } while (e[i-1].size);
3112 if (found) {
3113 cnt++;
3114 dl->esize = minsize;
3115 }
3116 free(e);
3117 }
3118 if (cnt < raiddisks) {
3119 pr_err("not enough devices with space to create array.\n");
3120 return 0; /* No enough free spaces large enough */
3121 }
3122 if (size == 0) {
3123 /* choose the largest size of which there are at least 'raiddisk' */
3124 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3125 struct dl *dl2;
3126 if (dl->esize <= size)
3127 continue;
3128 /* This is bigger than 'size', see if there are enough */
3129 cnt = 0;
3130 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3131 if (dl2->esize >= dl->esize)
3132 cnt++;
3133 if (cnt >= raiddisks)
3134 size = dl->esize;
3135 }
3136 if (chunk) {
3137 size = size / chunk;
3138 size *= chunk;
3139 }
3140 *freesize = size;
3141 if (size < 32) {
3142 pr_err("not enough spare devices to create array.\n");
3143 return 0;
3144 }
3145 }
3146 /* We have a 'size' of which there are enough spaces.
3147 * We simply do a first-fit */
3148 cnt = 0;
3149 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3150 if (dl->esize < size)
3151 continue;
3152
3153 dl->raiddisk = cnt;
3154 cnt++;
3155 }
3156 return 1;
3157 }
3158
3159 static int
3160 validate_geometry_ddf_container(struct supertype *st,
3161 int level, int layout, int raiddisks,
3162 int chunk, unsigned long long size,
3163 unsigned long long data_offset,
3164 char *dev, unsigned long long *freesize,
3165 int verbose);
3166
3167 static int validate_geometry_ddf_bvd(struct supertype *st,
3168 int level, int layout, int raiddisks,
3169 int *chunk, unsigned long long size,
3170 unsigned long long data_offset,
3171 char *dev, unsigned long long *freesize,
3172 int verbose);
3173
3174 static int validate_geometry_ddf(struct supertype *st,
3175 int level, int layout, int raiddisks,
3176 int *chunk, unsigned long long size,
3177 unsigned long long data_offset,
3178 char *dev, unsigned long long *freesize,
3179 int verbose)
3180 {
3181 int fd;
3182 struct mdinfo *sra;
3183 int cfd;
3184
3185 /* ddf potentially supports lots of things, but it depends on
3186 * what devices are offered (and maybe kernel version?)
3187 * If given unused devices, we will make a container.
3188 * If given devices in a container, we will make a BVD.
3189 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3190 */
3191
3192 if (*chunk == UnSet)
3193 *chunk = DEFAULT_CHUNK;
3194
3195 if (level == -1000000) level = LEVEL_CONTAINER;
3196 if (level == LEVEL_CONTAINER) {
3197 /* Must be a fresh device to add to a container */
3198 return validate_geometry_ddf_container(st, level, layout,
3199 raiddisks, *chunk,
3200 size, data_offset, dev,
3201 freesize,
3202 verbose);
3203 }
3204
3205 if (!dev) {
3206 mdu_array_info_t array = {
3207 .level = level, .layout = layout,
3208 .raid_disks = raiddisks
3209 };
3210 struct vd_config conf;
3211 if (layout_md2ddf(&array, &conf) == -1) {
3212 if (verbose)
3213 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3214 level, layout, raiddisks);
3215 return 0;
3216 }
3217 /* Should check layout? etc */
3218
3219 if (st->sb && freesize) {
3220 /* --create was given a container to create in.
3221 * So we need to check that there are enough
3222 * free spaces and return the amount of space.
3223 * We may as well remember which drives were
3224 * chosen so that add_to_super/getinfo_super
3225 * can return them.
3226 */
3227 return reserve_space(st, raiddisks, size, *chunk, freesize);
3228 }
3229 return 1;
3230 }
3231
3232 if (st->sb) {
3233 /* A container has already been opened, so we are
3234 * creating in there. Maybe a BVD, maybe an SVD.
3235 * Should make a distinction one day.
3236 */
3237 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3238 chunk, size, data_offset, dev,
3239 freesize,
3240 verbose);
3241 }
3242 /* This is the first device for the array.
3243 * If it is a container, we read it in and do automagic allocations,
3244 * no other devices should be given.
3245 * Otherwise it must be a member device of a container, and we
3246 * do manual allocation.
3247 * Later we should check for a BVD and make an SVD.
3248 */
3249 fd = open(dev, O_RDONLY|O_EXCL, 0);
3250 if (fd >= 0) {
3251 sra = sysfs_read(fd, NULL, GET_VERSION);
3252 close(fd);
3253 if (sra && sra->array.major_version == -1 &&
3254 strcmp(sra->text_version, "ddf") == 0) {
3255
3256 /* load super */
3257 /* find space for 'n' devices. */
3258 /* remember the devices */
3259 /* Somehow return the fact that we have enough */
3260 }
3261
3262 if (verbose)
3263 pr_err("ddf: Cannot create this array "
3264 "on device %s - a container is required.\n",
3265 dev);
3266 return 0;
3267 }
3268 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3269 if (verbose)
3270 pr_err("ddf: Cannot open %s: %s\n",
3271 dev, strerror(errno));
3272 return 0;
3273 }
3274 /* Well, it is in use by someone, maybe a 'ddf' container. */
3275 cfd = open_container(fd);
3276 if (cfd < 0) {
3277 close(fd);
3278 if (verbose)
3279 pr_err("ddf: Cannot use %s: %s\n",
3280 dev, strerror(EBUSY));
3281 return 0;
3282 }
3283 sra = sysfs_read(cfd, NULL, GET_VERSION);
3284 close(fd);
3285 if (sra && sra->array.major_version == -1 &&
3286 strcmp(sra->text_version, "ddf") == 0) {
3287 /* This is a member of a ddf container. Load the container
3288 * and try to create a bvd
3289 */
3290 struct ddf_super *ddf;
3291 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3292 st->sb = ddf;
3293 strcpy(st->container_devnm, fd2devnm(cfd));
3294 close(cfd);
3295 return validate_geometry_ddf_bvd(st, level, layout,
3296 raiddisks, chunk, size,
3297 data_offset,
3298 dev, freesize,
3299 verbose);
3300 }
3301 close(cfd);
3302 } else /* device may belong to a different container */
3303 return 0;
3304
3305 return 1;
3306 }
3307
3308 static int
3309 validate_geometry_ddf_container(struct supertype *st,
3310 int level, int layout, int raiddisks,
3311 int chunk, unsigned long long size,
3312 unsigned long long data_offset,
3313 char *dev, unsigned long long *freesize,
3314 int verbose)
3315 {
3316 int fd;
3317 unsigned long long ldsize;
3318
3319 if (level != LEVEL_CONTAINER)
3320 return 0;
3321 if (!dev)
3322 return 1;
3323
3324 fd = open(dev, O_RDONLY|O_EXCL, 0);
3325 if (fd < 0) {
3326 if (verbose)
3327 pr_err("ddf: Cannot open %s: %s\n",
3328 dev, strerror(errno));
3329 return 0;
3330 }
3331 if (!get_dev_size(fd, dev, &ldsize)) {
3332 close(fd);
3333 return 0;
3334 }
3335 close(fd);
3336
3337 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3338 if (*freesize == 0)
3339 return 0;
3340
3341 return 1;
3342 }
3343
3344 static int validate_geometry_ddf_bvd(struct supertype *st,
3345 int level, int layout, int raiddisks,
3346 int *chunk, unsigned long long size,
3347 unsigned long long data_offset,
3348 char *dev, unsigned long long *freesize,
3349 int verbose)
3350 {
3351 struct stat stb;
3352 struct ddf_super *ddf = st->sb;
3353 struct dl *dl;
3354 unsigned long long pos = 0;
3355 unsigned long long maxsize;
3356 struct extent *e;
3357 int i;
3358 /* ddf/bvd supports lots of things, but not containers */
3359 if (level == LEVEL_CONTAINER) {
3360 if (verbose)
3361 pr_err("DDF cannot create a container within an container\n");
3362 return 0;
3363 }
3364 /* We must have the container info already read in. */
3365 if (!ddf)
3366 return 0;
3367
3368 if (!dev) {
3369 /* General test: make sure there is space for
3370 * 'raiddisks' device extents of size 'size'.
3371 */
3372 unsigned long long minsize = size;
3373 int dcnt = 0;
3374 if (minsize == 0)
3375 minsize = 8;
3376 for (dl = ddf->dlist; dl ; dl = dl->next)
3377 {
3378 int found = 0;
3379 pos = 0;
3380
3381 i = 0;
3382 e = get_extents(ddf, dl);
3383 if (!e) continue;
3384 do {
3385 unsigned long long esize;
3386 esize = e[i].start - pos;
3387 if (esize >= minsize)
3388 found = 1;
3389 pos = e[i].start + e[i].size;
3390 i++;
3391 } while (e[i-1].size);
3392 if (found)
3393 dcnt++;
3394 free(e);
3395 }
3396 if (dcnt < raiddisks) {
3397 if (verbose)
3398 pr_err("ddf: Not enough devices with "
3399 "space for this array (%d < %d)\n",
3400 dcnt, raiddisks);
3401 return 0;
3402 }
3403 return 1;
3404 }
3405 /* This device must be a member of the set */
3406 if (stat(dev, &stb) < 0)
3407 return 0;
3408 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3409 return 0;
3410 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3411 if (dl->major == (int)major(stb.st_rdev) &&
3412 dl->minor == (int)minor(stb.st_rdev))
3413 break;
3414 }
3415 if (!dl) {
3416 if (verbose)
3417 pr_err("ddf: %s is not in the "
3418 "same DDF set\n",
3419 dev);
3420 return 0;
3421 }
3422 e = get_extents(ddf, dl);
3423 maxsize = 0;
3424 i = 0;
3425 if (e) do {
3426 unsigned long long esize;
3427 esize = e[i].start - pos;
3428 if (esize >= maxsize)
3429 maxsize = esize;
3430 pos = e[i].start + e[i].size;
3431 i++;
3432 } while (e[i-1].size);
3433 *freesize = maxsize;
3434 // FIXME here I am
3435
3436 return 1;
3437 }
3438
3439 static int load_super_ddf_all(struct supertype *st, int fd,
3440 void **sbp, char *devname)
3441 {
3442 struct mdinfo *sra;
3443 struct ddf_super *super;
3444 struct mdinfo *sd, *best = NULL;
3445 int bestseq = 0;
3446 int seq;
3447 char nm[20];
3448 int dfd;
3449
3450 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3451 if (!sra)
3452 return 1;
3453 if (sra->array.major_version != -1 ||
3454 sra->array.minor_version != -2 ||
3455 strcmp(sra->text_version, "ddf") != 0)
3456 return 1;
3457
3458 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3459 return 1;
3460 memset(super, 0, sizeof(*super));
3461
3462 /* first, try each device, and choose the best ddf */
3463 for (sd = sra->devs ; sd ; sd = sd->next) {
3464 int rv;
3465 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3466 dfd = dev_open(nm, O_RDONLY);
3467 if (dfd < 0)
3468 return 2;
3469 rv = load_ddf_headers(dfd, super, NULL);
3470 close(dfd);
3471 if (rv == 0) {
3472 seq = be32_to_cpu(super->active->seq);
3473 if (super->active->openflag)
3474 seq--;
3475 if (!best || seq > bestseq) {
3476 bestseq = seq;
3477 best = sd;
3478 }
3479 }
3480 }
3481 if (!best)
3482 return 1;
3483 /* OK, load this ddf */
3484 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3485 dfd = dev_open(nm, O_RDONLY);
3486 if (dfd < 0)
3487 return 1;
3488 load_ddf_headers(dfd, super, NULL);
3489 load_ddf_global(dfd, super, NULL);
3490 close(dfd);
3491 /* Now we need the device-local bits */
3492 for (sd = sra->devs ; sd ; sd = sd->next) {
3493 int rv;
3494
3495 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3496 dfd = dev_open(nm, O_RDWR);
3497 if (dfd < 0)
3498 return 2;
3499 rv = load_ddf_headers(dfd, super, NULL);
3500 if (rv == 0)
3501 rv = load_ddf_local(dfd, super, NULL, 1);
3502 if (rv)
3503 return 1;
3504 }
3505
3506 *sbp = super;
3507 if (st->ss == NULL) {
3508 st->ss = &super_ddf;
3509 st->minor_version = 0;
3510 st->max_devs = 512;
3511 }
3512 strcpy(st->container_devnm, fd2devnm(fd));
3513 return 0;
3514 }
3515
3516 static int load_container_ddf(struct supertype *st, int fd,
3517 char *devname)
3518 {
3519 return load_super_ddf_all(st, fd, &st->sb, devname);
3520 }
3521
3522 #endif /* MDASSEMBLE */
3523
3524 static int check_secondary(const struct vcl *vc)
3525 {
3526 const struct vd_config *conf = &vc->conf;
3527 int i;
3528
3529 /* The only DDF secondary RAID level md can support is
3530 * RAID 10, if the stripe sizes and Basic volume sizes
3531 * are all equal.
3532 * Other configurations could in theory be supported by exposing
3533 * the BVDs to user space and using device mapper for the secondary
3534 * mapping. So far we don't support that.
3535 */
3536
3537 __u64 sec_elements[4] = {0, 0, 0, 0};
3538 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3539 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3540
3541 if (vc->other_bvds == NULL) {
3542 pr_err("No BVDs for secondary RAID found\n");
3543 return -1;
3544 }
3545 if (conf->prl != DDF_RAID1) {
3546 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3547 return -1;
3548 }
3549 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3550 pr_err("Secondary RAID level %d is unsupported\n",
3551 conf->srl);
3552 return -1;
3553 }
3554 __set_sec_seen(conf->sec_elmnt_seq);
3555 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3556 const struct vd_config *bvd = vc->other_bvds[i];
3557 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3558 continue;
3559 if (bvd->srl != conf->srl) {
3560 pr_err("Inconsistent secondary RAID level across BVDs\n");
3561 return -1;
3562 }
3563 if (bvd->prl != conf->prl) {
3564 pr_err("Different RAID levels for BVDs are unsupported\n");
3565 return -1;
3566 }
3567 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3568 pr_err("All BVDs must have the same number of primary elements\n");
3569 return -1;
3570 }
3571 if (bvd->chunk_shift != conf->chunk_shift) {
3572 pr_err("Different strip sizes for BVDs are unsupported\n");
3573 return -1;
3574 }
3575 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3576 pr_err("Different BVD sizes are unsupported\n");
3577 return -1;
3578 }
3579 __set_sec_seen(bvd->sec_elmnt_seq);
3580 }
3581 for (i = 0; i < conf->sec_elmnt_count; i++) {
3582 if (!__was_sec_seen(i)) {
3583 pr_err("BVD %d is missing\n", i);
3584 return -1;
3585 }
3586 }
3587 return 0;
3588 }
3589
3590 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3591 be32 refnum, unsigned int nmax,
3592 const struct vd_config **bvd,
3593 unsigned int *idx)
3594 {
3595 unsigned int i, j, n, sec, cnt;
3596
3597 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3598 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3599
3600 for (i = 0, j = 0 ; i < nmax ; i++) {
3601 /* j counts valid entries for this BVD */
3602 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3603 j++;
3604 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3605 *bvd = &vc->conf;
3606 *idx = i;
3607 return sec * cnt + j - 1;
3608 }
3609 }
3610 if (vc->other_bvds == NULL)
3611 goto bad;
3612
3613 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3614 struct vd_config *vd = vc->other_bvds[n-1];
3615 sec = vd->sec_elmnt_seq;
3616 if (sec == DDF_UNUSED_BVD)
3617 continue;
3618 for (i = 0, j = 0 ; i < nmax ; i++) {
3619 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3620 j++;
3621 if (be32_eq(vd->phys_refnum[i], refnum)) {
3622 *bvd = vd;
3623 *idx = i;
3624 return sec * cnt + j - 1;
3625 }
3626 }
3627 }
3628 bad:
3629 *bvd = NULL;
3630 return DDF_NOTFOUND;
3631 }
3632
3633 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3634 {
3635 /* Given a container loaded by load_super_ddf_all,
3636 * extract information about all the arrays into
3637 * an mdinfo tree.
3638 *
3639 * For each vcl in conflist: create an mdinfo, fill it in,
3640 * then look for matching devices (phys_refnum) in dlist
3641 * and create appropriate device mdinfo.
3642 */
3643 struct ddf_super *ddf = st->sb;
3644 struct mdinfo *rest = NULL;
3645 struct vcl *vc;
3646
3647 for (vc = ddf->conflist ; vc ; vc=vc->next)
3648 {
3649 unsigned int i;
3650 unsigned int j;
3651 struct mdinfo *this;
3652 char *ep;
3653 __u32 *cptr;
3654 unsigned int pd;
3655
3656 if (subarray &&
3657 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3658 *ep != '\0'))
3659 continue;
3660
3661 if (vc->conf.sec_elmnt_count > 1) {
3662 if (check_secondary(vc) != 0)
3663 continue;
3664 }
3665
3666 this = xcalloc(1, sizeof(*this));
3667 this->next = rest;
3668 rest = this;
3669
3670 if (layout_ddf2md(&vc->conf, &this->array))
3671 continue;
3672 this->array.md_minor = -1;
3673 this->array.major_version = -1;
3674 this->array.minor_version = -2;
3675 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3676 cptr = (__u32 *)(vc->conf.guid + 16);
3677 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3678 this->array.utime = DECADE +
3679 be32_to_cpu(vc->conf.timestamp);
3680 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3681
3682 i = vc->vcnum;
3683 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3684 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3685 DDF_init_full) {
3686 this->array.state = 0;
3687 this->resync_start = 0;
3688 } else {
3689 this->array.state = 1;
3690 this->resync_start = MaxSector;
3691 }
3692 memcpy(this->name, ddf->virt->entries[i].name, 16);
3693 this->name[16]=0;
3694 for(j=0; j<16; j++)
3695 if (this->name[j] == ' ')
3696 this->name[j] = 0;
3697
3698 memset(this->uuid, 0, sizeof(this->uuid));
3699 this->component_size = be64_to_cpu(vc->conf.blocks);
3700 this->array.size = this->component_size / 2;
3701 this->container_member = i;
3702
3703 ddf->currentconf = vc;
3704 uuid_from_super_ddf(st, this->uuid);
3705 if (!subarray)
3706 ddf->currentconf = NULL;
3707
3708 sprintf(this->text_version, "/%s/%d",
3709 st->container_devnm, this->container_member);
3710
3711 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3712 struct mdinfo *dev;
3713 struct dl *d;
3714 const struct vd_config *bvd;
3715 unsigned int iphys;
3716 int stt;
3717
3718 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3719 == 0xFFFFFFFF)
3720 continue;
3721
3722 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3723 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3724 != DDF_Online)
3725 continue;
3726
3727 i = get_pd_index_from_refnum(
3728 vc, ddf->phys->entries[pd].refnum,
3729 ddf->mppe, &bvd, &iphys);
3730 if (i == DDF_NOTFOUND)
3731 continue;
3732
3733 this->array.working_disks++;
3734
3735 for (d = ddf->dlist; d ; d=d->next)
3736 if (be32_eq(d->disk.refnum,
3737 ddf->phys->entries[pd].refnum))
3738 break;
3739 if (d == NULL)
3740 /* Haven't found that one yet, maybe there are others */
3741 continue;
3742
3743 dev = xcalloc(1, sizeof(*dev));
3744 dev->next = this->devs;
3745 this->devs = dev;
3746
3747 dev->disk.number = be32_to_cpu(d->disk.refnum);
3748 dev->disk.major = d->major;
3749 dev->disk.minor = d->minor;
3750 dev->disk.raid_disk = i;
3751 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3752 dev->recovery_start = MaxSector;
3753
3754 dev->events = be32_to_cpu(ddf->primary.seq);
3755 dev->data_offset =
3756 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3757 dev->component_size = be64_to_cpu(bvd->blocks);
3758 if (d->devname)
3759 strcpy(dev->name, d->devname);
3760 }
3761 }
3762 return rest;
3763 }
3764
3765 static int store_super_ddf(struct supertype *st, int fd)
3766 {
3767 struct ddf_super *ddf = st->sb;
3768 unsigned long long dsize;
3769 void *buf;
3770 int rc;
3771
3772 if (!ddf)
3773 return 1;
3774
3775 if (!get_dev_size(fd, NULL, &dsize))
3776 return 1;
3777
3778 if (ddf->dlist || ddf->conflist) {
3779 struct stat sta;
3780 struct dl *dl;
3781 int ofd, ret;
3782
3783 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3784 pr_err("%s: file descriptor for invalid device\n",
3785 __func__);
3786 return 1;
3787 }
3788 for (dl = ddf->dlist; dl; dl = dl->next)
3789 if (dl->major == (int)major(sta.st_rdev) &&
3790 dl->minor == (int)minor(sta.st_rdev))
3791 break;
3792 if (!dl) {
3793 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3794 (int)major(sta.st_rdev),
3795 (int)minor(sta.st_rdev));
3796 return 1;
3797 }
3798 ofd = dl->fd;
3799 dl->fd = fd;
3800 ret = (_write_super_to_disk(ddf, dl) != 1);
3801 dl->fd = ofd;
3802 return ret;
3803 }
3804
3805 if (posix_memalign(&buf, 512, 512) != 0)
3806 return 1;
3807 memset(buf, 0, 512);
3808
3809 lseek64(fd, dsize-512, 0);
3810 rc = write(fd, buf, 512);
3811 free(buf);
3812 if (rc < 0)
3813 return 1;
3814 return 0;
3815 }
3816
3817 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3818 {
3819 /*
3820 * return:
3821 * 0 same, or first was empty, and second was copied
3822 * 1 second had wrong number
3823 * 2 wrong uuid
3824 * 3 wrong other info
3825 */
3826 struct ddf_super *first = st->sb;
3827 struct ddf_super *second = tst->sb;
3828 struct dl *dl1, *dl2;
3829 struct vcl *vl1, *vl2;
3830 unsigned int max_vds, max_pds, pd, vd;
3831
3832 if (!first) {
3833 st->sb = tst->sb;
3834 tst->sb = NULL;
3835 return 0;
3836 }
3837
3838 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3839 return 2;
3840
3841 if (!be32_eq(first->anchor.seq, second->anchor.seq)) {
3842 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3843 be32_to_cpu(first->anchor.seq),
3844 be32_to_cpu(second->anchor.seq));
3845 return 3;
3846 }
3847 if (first->max_part != second->max_part ||
3848 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3849 !be16_eq(first->virt->populated_vdes,
3850 second->virt->populated_vdes)) {
3851 dprintf("%s: PD/VD number mismatch\n", __func__);
3852 return 3;
3853 }
3854
3855 max_pds = be16_to_cpu(first->phys->used_pdes);
3856 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3857 for (pd = 0; pd < max_pds; pd++)
3858 if (be32_eq(first->phys->entries[pd].refnum,
3859 dl2->disk.refnum))
3860 break;
3861 if (pd == max_pds) {
3862 dprintf("%s: no match for disk %08x\n", __func__,
3863 be32_to_cpu(dl2->disk.refnum));
3864 return 3;
3865 }
3866 }
3867
3868 max_vds = be16_to_cpu(first->active->max_vd_entries);
3869 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3870 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3871 continue;
3872 for (vd = 0; vd < max_vds; vd++)
3873 if (!memcmp(first->virt->entries[vd].guid,
3874 vl2->conf.guid, DDF_GUID_LEN))
3875 break;
3876 if (vd == max_vds) {
3877 dprintf("%s: no match for VD config\n", __func__);
3878 return 3;
3879 }
3880 }
3881 /* FIXME should I look at anything else? */
3882
3883 /*
3884 At this point we are fairly sure that the meta data matches.
3885 But the new disk may contain additional local data.
3886 Add it to the super block.
3887 */
3888 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3889 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3890 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3891 DDF_GUID_LEN))
3892 break;
3893 if (vl1) {
3894 if (vl1->other_bvds != NULL &&
3895 vl1->conf.sec_elmnt_seq !=
3896 vl2->conf.sec_elmnt_seq) {
3897 dprintf("%s: adding BVD %u\n", __func__,
3898 vl2->conf.sec_elmnt_seq);
3899 add_other_bvd(vl1, &vl2->conf,
3900 first->conf_rec_len*512);
3901 }
3902 continue;
3903 }
3904
3905 if (posix_memalign((void **)&vl1, 512,
3906 (first->conf_rec_len*512 +
3907 offsetof(struct vcl, conf))) != 0) {
3908 pr_err("%s could not allocate vcl buf\n",
3909 __func__);
3910 return 3;
3911 }
3912
3913 vl1->next = first->conflist;
3914 vl1->block_sizes = NULL;
3915 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3916 if (alloc_other_bvds(first, vl1) != 0) {
3917 pr_err("%s could not allocate other bvds\n",
3918 __func__);
3919 free(vl1);
3920 return 3;
3921 }
3922 for (vd = 0; vd < max_vds; vd++)
3923 if (!memcmp(first->virt->entries[vd].guid,
3924 vl1->conf.guid, DDF_GUID_LEN))
3925 break;
3926 vl1->vcnum = vd;
3927 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3928 first->conflist = vl1;
3929 }
3930
3931 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3932 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3933 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3934 break;
3935 if (dl1)
3936 continue;
3937
3938 if (posix_memalign((void **)&dl1, 512,
3939 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3940 != 0) {
3941 pr_err("%s could not allocate disk info buffer\n",
3942 __func__);
3943 return 3;
3944 }
3945 memcpy(dl1, dl2, sizeof(*dl1));
3946 dl1->mdupdate = NULL;
3947 dl1->next = first->dlist;
3948 dl1->fd = -1;
3949 for (pd = 0; pd < max_pds; pd++)
3950 if (be32_eq(first->phys->entries[pd].refnum,
3951 dl1->disk.refnum))
3952 break;
3953 dl1->pdnum = pd;
3954 if (dl2->spare) {
3955 if (posix_memalign((void **)&dl1->spare, 512,
3956 first->conf_rec_len*512) != 0) {
3957 pr_err("%s could not allocate spare info buf\n",
3958 __func__);
3959 return 3;
3960 }
3961 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3962 }
3963 for (vd = 0 ; vd < first->max_part ; vd++) {
3964 if (!dl2->vlist[vd]) {
3965 dl1->vlist[vd] = NULL;
3966 continue;
3967 }
3968 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3969 if (!memcmp(vl1->conf.guid,
3970 dl2->vlist[vd]->conf.guid,
3971 DDF_GUID_LEN))
3972 break;
3973 dl1->vlist[vd] = vl1;
3974 }
3975 }
3976 first->dlist = dl1;
3977 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3978 be32_to_cpu(dl1->disk.refnum));
3979 }
3980
3981 return 0;
3982 }
3983
3984 #ifndef MDASSEMBLE
3985 /*
3986 * A new array 'a' has been started which claims to be instance 'inst'
3987 * within container 'c'.
3988 * We need to confirm that the array matches the metadata in 'c' so
3989 * that we don't corrupt any metadata.
3990 */
3991 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3992 {
3993 struct ddf_super *ddf = c->sb;
3994 int n = atoi(inst);
3995 struct mdinfo *dev;
3996 struct dl *dl;
3997 static const char faulty[] = "faulty";
3998
3999 if (all_ff(ddf->virt->entries[n].guid)) {
4000 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4001 return -ENODEV;
4002 }
4003 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4004 guid_str(ddf->virt->entries[n].guid));
4005 for (dev = a->info.devs; dev; dev = dev->next) {
4006 for (dl = ddf->dlist; dl; dl = dl->next)
4007 if (dl->major == dev->disk.major &&
4008 dl->minor == dev->disk.minor)
4009 break;
4010 if (!dl) {
4011 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4012 __func__, dev->disk.major, dev->disk.minor, n);
4013 return -1;
4014 }
4015 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4016 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4017 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4018 __func__, n, dl->major, dl->minor,
4019 be16_to_cpu(
4020 ddf->phys->entries[dl->pdnum].state));
4021 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4022 sizeof(faulty) - 1)
4023 pr_err("Write to state_fd failed\n");
4024 dev->curr_state = DS_FAULTY;
4025 }
4026 }
4027 a->info.container_member = n;
4028 return 0;
4029 }
4030
4031 /*
4032 * The array 'a' is to be marked clean in the metadata.
4033 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4034 * clean up to the point (in sectors). If that cannot be recorded in the
4035 * metadata, then leave it as dirty.
4036 *
4037 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4038 * !global! virtual_disk.virtual_entry structure.
4039 */
4040 static int ddf_set_array_state(struct active_array *a, int consistent)
4041 {
4042 struct ddf_super *ddf = a->container->sb;
4043 int inst = a->info.container_member;
4044 int old = ddf->virt->entries[inst].state;
4045 if (consistent == 2) {
4046 /* Should check if a recovery should be started FIXME */
4047 consistent = 1;
4048 if (!is_resync_complete(&a->info))
4049 consistent = 0;
4050 }
4051 if (consistent)
4052 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4053 else
4054 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4055 if (old != ddf->virt->entries[inst].state)
4056 ddf_set_updates_pending(ddf);
4057
4058 old = ddf->virt->entries[inst].init_state;
4059 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4060 if (is_resync_complete(&a->info))
4061 ddf->virt->entries[inst].init_state |= DDF_init_full;
4062 else if (a->info.resync_start == 0)
4063 ddf->virt->entries[inst].init_state |= DDF_init_not;
4064 else
4065 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4066 if (old != ddf->virt->entries[inst].init_state)
4067 ddf_set_updates_pending(ddf);
4068
4069 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4070 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4071 consistent?"clean":"dirty",
4072 a->info.resync_start);
4073 return consistent;
4074 }
4075
4076 static int get_bvd_state(const struct ddf_super *ddf,
4077 const struct vd_config *vc)
4078 {
4079 unsigned int i, n_bvd, working = 0;
4080 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4081 int pd, st, state;
4082 for (i = 0; i < n_prim; i++) {
4083 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4084 continue;
4085 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4086 if (pd < 0)
4087 continue;
4088 st = be16_to_cpu(ddf->phys->entries[pd].state);
4089 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4090 == DDF_Online)
4091 working++;
4092 }
4093
4094 state = DDF_state_degraded;
4095 if (working == n_prim)
4096 state = DDF_state_optimal;
4097 else
4098 switch (vc->prl) {
4099 case DDF_RAID0:
4100 case DDF_CONCAT:
4101 case DDF_JBOD:
4102 state = DDF_state_failed;
4103 break;
4104 case DDF_RAID1:
4105 if (working == 0)
4106 state = DDF_state_failed;
4107 else if (working >= 2)
4108 state = DDF_state_part_optimal;
4109 break;
4110 case DDF_RAID4:
4111 case DDF_RAID5:
4112 if (working < n_prim - 1)
4113 state = DDF_state_failed;
4114 break;
4115 case DDF_RAID6:
4116 if (working < n_prim - 2)
4117 state = DDF_state_failed;
4118 else if (working == n_prim - 1)
4119 state = DDF_state_part_optimal;
4120 break;
4121 }
4122 return state;
4123 }
4124
4125 static int secondary_state(int state, int other, int seclevel)
4126 {
4127 if (state == DDF_state_optimal && other == DDF_state_optimal)
4128 return DDF_state_optimal;
4129 if (seclevel == DDF_2MIRRORED) {
4130 if (state == DDF_state_optimal || other == DDF_state_optimal)
4131 return DDF_state_part_optimal;
4132 if (state == DDF_state_failed && other == DDF_state_failed)
4133 return DDF_state_failed;
4134 return DDF_state_degraded;
4135 } else {
4136 if (state == DDF_state_failed || other == DDF_state_failed)
4137 return DDF_state_failed;
4138 if (state == DDF_state_degraded || other == DDF_state_degraded)
4139 return DDF_state_degraded;
4140 return DDF_state_part_optimal;
4141 }
4142 }
4143
4144 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4145 {
4146 int state = get_bvd_state(ddf, &vcl->conf);
4147 unsigned int i;
4148 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4149 state = secondary_state(
4150 state,
4151 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4152 vcl->conf.srl);
4153 }
4154 return state;
4155 }
4156
4157 /*
4158 * The state of each disk is stored in the global phys_disk structure
4159 * in phys_disk.entries[n].state.
4160 * This makes various combinations awkward.
4161 * - When a device fails in any array, it must be failed in all arrays
4162 * that include a part of this device.
4163 * - When a component is rebuilding, we cannot include it officially in the
4164 * array unless this is the only array that uses the device.
4165 *
4166 * So: when transitioning:
4167 * Online -> failed, just set failed flag. monitor will propagate
4168 * spare -> online, the device might need to be added to the array.
4169 * spare -> failed, just set failed. Don't worry if in array or not.
4170 */
4171 static void ddf_set_disk(struct active_array *a, int n, int state)
4172 {
4173 struct ddf_super *ddf = a->container->sb;
4174 unsigned int inst = a->info.container_member, n_bvd;
4175 struct vcl *vcl;
4176 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4177 &n_bvd, &vcl);
4178 int pd;
4179 struct mdinfo *mdi;
4180 struct dl *dl;
4181
4182 dprintf("%s: %d to %x\n", __func__, n, state);
4183 if (vc == NULL) {
4184 dprintf("ddf: cannot find instance %d!!\n", inst);
4185 return;
4186 }
4187 /* Find the matching slot in 'info'. */
4188 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4189 if (mdi->disk.raid_disk == n)
4190 break;
4191 if (!mdi) {
4192 pr_err("%s: cannot find raid disk %d\n",
4193 __func__, n);
4194 return;
4195 }
4196
4197 /* and find the 'dl' entry corresponding to that. */
4198 for (dl = ddf->dlist; dl; dl = dl->next)
4199 if (mdi->state_fd >= 0 &&
4200 mdi->disk.major == dl->major &&
4201 mdi->disk.minor == dl->minor)
4202 break;
4203 if (!dl) {
4204 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4205 __func__, n,
4206 mdi->disk.major, mdi->disk.minor);
4207 return;
4208 }
4209
4210 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4211 if (pd < 0 || pd != dl->pdnum) {
4212 /* disk doesn't currently exist or has changed.
4213 * If it is now in_sync, insert it. */
4214 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4215 __func__, dl->pdnum, dl->major, dl->minor,
4216 be32_to_cpu(dl->disk.refnum));
4217 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4218 __func__, inst, n_bvd,
4219 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4220 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4221 pd = dl->pdnum; /* FIXME: is this really correct ? */
4222 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4223 LBA_OFFSET(ddf, vc)[n_bvd] =
4224 cpu_to_be64(mdi->data_offset);
4225 be16_clear(ddf->phys->entries[pd].type,
4226 cpu_to_be16(DDF_Global_Spare));
4227 be16_set(ddf->phys->entries[pd].type,
4228 cpu_to_be16(DDF_Active_in_VD));
4229 ddf_set_updates_pending(ddf);
4230 }
4231 } else {
4232 be16 old = ddf->phys->entries[pd].state;
4233 if (state & DS_FAULTY)
4234 be16_set(ddf->phys->entries[pd].state,
4235 cpu_to_be16(DDF_Failed));
4236 if (state & DS_INSYNC) {
4237 be16_set(ddf->phys->entries[pd].state,
4238 cpu_to_be16(DDF_Online));
4239 be16_clear(ddf->phys->entries[pd].state,
4240 cpu_to_be16(DDF_Rebuilding));
4241 }
4242 if (!be16_eq(old, ddf->phys->entries[pd].state))
4243 ddf_set_updates_pending(ddf);
4244 }
4245
4246 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4247 be32_to_cpu(dl->disk.refnum), state,
4248 be16_to_cpu(ddf->phys->entries[pd].state));
4249
4250 /* Now we need to check the state of the array and update
4251 * virtual_disk.entries[n].state.
4252 * It needs to be one of "optimal", "degraded", "failed".
4253 * I don't understand 'deleted' or 'missing'.
4254 */
4255 state = get_svd_state(ddf, vcl);
4256
4257 if (ddf->virt->entries[inst].state !=
4258 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4259 | state)) {
4260
4261 ddf->virt->entries[inst].state =
4262 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4263 | state;
4264 ddf_set_updates_pending(ddf);
4265 }
4266
4267 }
4268
4269 static void ddf_sync_metadata(struct supertype *st)
4270 {
4271
4272 /*
4273 * Write all data to all devices.
4274 * Later, we might be able to track whether only local changes
4275 * have been made, or whether any global data has been changed,
4276 * but ddf is sufficiently weird that it probably always
4277 * changes global data ....
4278 */
4279 struct ddf_super *ddf = st->sb;
4280 if (!ddf->updates_pending)
4281 return;
4282 ddf->updates_pending = 0;
4283 __write_init_super_ddf(st);
4284 dprintf("ddf: sync_metadata\n");
4285 }
4286
4287 static int del_from_conflist(struct vcl **list, const char *guid)
4288 {
4289 struct vcl **p;
4290 int found = 0;
4291 for (p = list; p && *p; p = &((*p)->next))
4292 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4293 found = 1;
4294 *p = (*p)->next;
4295 }
4296 return found;
4297 }
4298
4299 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4300 {
4301 struct dl *dl;
4302 unsigned int vdnum, i;
4303 vdnum = find_vde_by_guid(ddf, guid);
4304 if (vdnum == DDF_NOTFOUND) {
4305 pr_err("%s: could not find VD %s\n", __func__,
4306 guid_str(guid));
4307 return -1;
4308 }
4309 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4310 pr_err("%s: could not find conf %s\n", __func__,
4311 guid_str(guid));
4312 return -1;
4313 }
4314 for (dl = ddf->dlist; dl; dl = dl->next)
4315 for (i = 0; i < ddf->max_part; i++)
4316 if (dl->vlist[i] != NULL &&
4317 !memcmp(dl->vlist[i]->conf.guid, guid,
4318 DDF_GUID_LEN))
4319 dl->vlist[i] = NULL;
4320 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4321 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4322 return 0;
4323 }
4324
4325 static int kill_subarray_ddf(struct supertype *st)
4326 {
4327 struct ddf_super *ddf = st->sb;
4328 /*
4329 * currentconf is set in container_content_ddf,
4330 * called with subarray arg
4331 */
4332 struct vcl *victim = ddf->currentconf;
4333 struct vd_config *conf;
4334 ddf->currentconf = NULL;
4335 unsigned int vdnum;
4336 if (!victim) {
4337 pr_err("%s: nothing to kill\n", __func__);
4338 return -1;
4339 }
4340 conf = &victim->conf;
4341 vdnum = find_vde_by_guid(ddf, conf->guid);
4342 if (vdnum == DDF_NOTFOUND) {
4343 pr_err("%s: could not find VD %s\n", __func__,
4344 guid_str(conf->guid));
4345 return -1;
4346 }
4347 if (st->update_tail) {
4348 struct virtual_disk *vd;
4349 int len = sizeof(struct virtual_disk)
4350 + sizeof(struct virtual_entry);
4351 vd = xmalloc(len);
4352 if (vd == NULL) {
4353 pr_err("%s: failed to allocate %d bytes\n", __func__,
4354 len);
4355 return -1;
4356 }
4357 memset(vd, 0 , len);
4358 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4359 vd->populated_vdes = cpu_to_be16(0);
4360 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4361 /* we use DDF_state_deleted as marker */
4362 vd->entries[0].state = DDF_state_deleted;
4363 append_metadata_update(st, vd, len);
4364 } else {
4365 _kill_subarray_ddf(ddf, conf->guid);
4366 ddf_set_updates_pending(ddf);
4367 ddf_sync_metadata(st);
4368 }
4369 return 0;
4370 }
4371
4372 static void copy_matching_bvd(struct ddf_super *ddf,
4373 struct vd_config *conf,
4374 const struct metadata_update *update)
4375 {
4376 unsigned int mppe =
4377 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4378 unsigned int len = ddf->conf_rec_len * 512;
4379 char *p;
4380 struct vd_config *vc;
4381 for (p = update->buf; p < update->buf + update->len; p += len) {
4382 vc = (struct vd_config *) p;
4383 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4384 memcpy(conf->phys_refnum, vc->phys_refnum,
4385 mppe * (sizeof(__u32) + sizeof(__u64)));
4386 return;
4387 }
4388 }
4389 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4390 conf->sec_elmnt_seq, guid_str(conf->guid));
4391 }
4392
4393 static void ddf_process_update(struct supertype *st,
4394 struct metadata_update *update)
4395 {
4396 /* Apply this update to the metadata.
4397 * The first 4 bytes are a DDF_*_MAGIC which guides
4398 * our actions.
4399 * Possible update are:
4400 * DDF_PHYS_RECORDS_MAGIC
4401 * Add a new physical device or remove an old one.
4402 * Changes to this record only happen implicitly.
4403 * used_pdes is the device number.
4404 * DDF_VIRT_RECORDS_MAGIC
4405 * Add a new VD. Possibly also change the 'access' bits.
4406 * populated_vdes is the entry number.
4407 * DDF_VD_CONF_MAGIC
4408 * New or updated VD. the VIRT_RECORD must already
4409 * exist. For an update, phys_refnum and lba_offset
4410 * (at least) are updated, and the VD_CONF must
4411 * be written to precisely those devices listed with
4412 * a phys_refnum.
4413 * DDF_SPARE_ASSIGN_MAGIC
4414 * replacement Spare Assignment Record... but for which device?
4415 *
4416 * So, e.g.:
4417 * - to create a new array, we send a VIRT_RECORD and
4418 * a VD_CONF. Then assemble and start the array.
4419 * - to activate a spare we send a VD_CONF to add the phys_refnum
4420 * and offset. This will also mark the spare as active with
4421 * a spare-assignment record.
4422 */
4423 struct ddf_super *ddf = st->sb;
4424 be32 *magic = (be32 *)update->buf;
4425 struct phys_disk *pd;
4426 struct virtual_disk *vd;
4427 struct vd_config *vc;
4428 struct vcl *vcl;
4429 struct dl *dl;
4430 unsigned int ent;
4431 unsigned int pdnum, pd2, len;
4432
4433 dprintf("Process update %x\n", be32_to_cpu(*magic));
4434
4435 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4436
4437 if (update->len != (sizeof(struct phys_disk) +
4438 sizeof(struct phys_disk_entry)))
4439 return;
4440 pd = (struct phys_disk*)update->buf;
4441
4442 ent = be16_to_cpu(pd->used_pdes);
4443 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4444 return;
4445 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4446 struct dl **dlp;
4447 /* removing this disk. */
4448 be16_set(ddf->phys->entries[ent].state,
4449 cpu_to_be16(DDF_Missing));
4450 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4451 struct dl *dl = *dlp;
4452 if (dl->pdnum == (signed)ent) {
4453 close(dl->fd);
4454 dl->fd = -1;
4455 /* FIXME this doesn't free
4456 * dl->devname */
4457 update->space = dl;
4458 *dlp = dl->next;
4459 break;
4460 }
4461 }
4462 ddf_set_updates_pending(ddf);
4463 return;
4464 }
4465 if (!all_ff(ddf->phys->entries[ent].guid))
4466 return;
4467 ddf->phys->entries[ent] = pd->entries[0];
4468 ddf->phys->used_pdes = cpu_to_be16
4469 (1 + be16_to_cpu(ddf->phys->used_pdes));
4470 ddf_set_updates_pending(ddf);
4471 if (ddf->add_list) {
4472 struct active_array *a;
4473 struct dl *al = ddf->add_list;
4474 ddf->add_list = al->next;
4475
4476 al->next = ddf->dlist;
4477 ddf->dlist = al;
4478
4479 /* As a device has been added, we should check
4480 * for any degraded devices that might make
4481 * use of this spare */
4482 for (a = st->arrays ; a; a=a->next)
4483 a->check_degraded = 1;
4484 }
4485 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4486
4487 if (update->len != (sizeof(struct virtual_disk) +
4488 sizeof(struct virtual_entry)))
4489 return;
4490 vd = (struct virtual_disk*)update->buf;
4491
4492 if (vd->entries[0].state == DDF_state_deleted) {
4493 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4494 return;
4495 } else {
4496
4497 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4498 if (ent != DDF_NOTFOUND) {
4499 dprintf("%s: VD %s exists already in slot %d\n",
4500 __func__, guid_str(vd->entries[0].guid),
4501 ent);
4502 return;
4503 }
4504 ent = find_unused_vde(ddf);
4505 if (ent == DDF_NOTFOUND)
4506 return;
4507 ddf->virt->entries[ent] = vd->entries[0];
4508 ddf->virt->populated_vdes =
4509 cpu_to_be16(
4510 1 + be16_to_cpu(
4511 ddf->virt->populated_vdes));
4512 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4513 __func__, guid_str(vd->entries[0].guid), ent,
4514 ddf->virt->entries[ent].state,
4515 ddf->virt->entries[ent].init_state);
4516 }
4517 ddf_set_updates_pending(ddf);
4518 }
4519
4520 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4521 vc = (struct vd_config*)update->buf;
4522 len = ddf->conf_rec_len * 512;
4523 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4524 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4525 __func__, guid_str(vc->guid), update->len,
4526 vc->sec_elmnt_count);
4527 return;
4528 }
4529 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4530 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4531 break;
4532 dprintf("%s: conf update for %s (%s)\n", __func__,
4533 guid_str(vc->guid), (vcl ? "old" : "new"));
4534 if (vcl) {
4535 /* An update, just copy the phys_refnum and lba_offset
4536 * fields
4537 */
4538 unsigned int i;
4539 unsigned int k;
4540 copy_matching_bvd(ddf, &vcl->conf, update);
4541 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4542 dprintf("BVD %u has %08x at %llu\n", 0,
4543 be32_to_cpu(vcl->conf.phys_refnum[k]),
4544 be64_to_cpu(LBA_OFFSET(ddf,
4545 &vcl->conf)[k]));
4546 for (i = 1; i < vc->sec_elmnt_count; i++) {
4547 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4548 update);
4549 for (k = 0; k < be16_to_cpu(
4550 vc->prim_elmnt_count); k++)
4551 dprintf("BVD %u has %08x at %llu\n", i,
4552 be32_to_cpu
4553 (vcl->other_bvds[i-1]->
4554 phys_refnum[k]),
4555 be64_to_cpu
4556 (LBA_OFFSET
4557 (ddf,
4558 vcl->other_bvds[i-1])[k]));
4559 }
4560 } else {
4561 /* A new VD_CONF */
4562 unsigned int i;
4563 if (!update->space)
4564 return;
4565 vcl = update->space;
4566 update->space = NULL;
4567 vcl->next = ddf->conflist;
4568 memcpy(&vcl->conf, vc, len);
4569 ent = find_vde_by_guid(ddf, vc->guid);
4570 if (ent == DDF_NOTFOUND)
4571 return;
4572 vcl->vcnum = ent;
4573 ddf->conflist = vcl;
4574 for (i = 1; i < vc->sec_elmnt_count; i++)
4575 memcpy(vcl->other_bvds[i-1],
4576 update->buf + len * i, len);
4577 }
4578 /* Set DDF_Transition on all Failed devices - to help
4579 * us detect those that are no longer in use
4580 */
4581 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4582 pdnum++)
4583 if (be16_and(ddf->phys->entries[pdnum].state,
4584 cpu_to_be16(DDF_Failed)))
4585 be16_set(ddf->phys->entries[pdnum].state,
4586 cpu_to_be16(DDF_Transition));
4587 /* Now make sure vlist is correct for each dl. */
4588 for (dl = ddf->dlist; dl; dl = dl->next) {
4589 unsigned int vn = 0;
4590 int in_degraded = 0;
4591 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4592 unsigned int dn, ibvd;
4593 const struct vd_config *conf;
4594 int vstate;
4595 dn = get_pd_index_from_refnum(vcl,
4596 dl->disk.refnum,
4597 ddf->mppe,
4598 &conf, &ibvd);
4599 if (dn == DDF_NOTFOUND)
4600 continue;
4601 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4602 dl->pdnum,
4603 be32_to_cpu(dl->disk.refnum),
4604 guid_str(conf->guid),
4605 conf->sec_elmnt_seq, vn);
4606 /* Clear the Transition flag */
4607 if (be16_and
4608 (ddf->phys->entries[dl->pdnum].state,
4609 cpu_to_be16(DDF_Failed)))
4610 be16_clear(ddf->phys
4611 ->entries[dl->pdnum].state,
4612 cpu_to_be16(DDF_Transition));
4613 dl->vlist[vn++] = vcl;
4614 vstate = ddf->virt->entries[vcl->vcnum].state
4615 & DDF_state_mask;
4616 if (vstate == DDF_state_degraded ||
4617 vstate == DDF_state_part_optimal)
4618 in_degraded = 1;
4619 }
4620 while (vn < ddf->max_part)
4621 dl->vlist[vn++] = NULL;
4622 if (dl->vlist[0]) {
4623 be16_clear(ddf->phys->entries[dl->pdnum].type,
4624 cpu_to_be16(DDF_Global_Spare));
4625 if (!be16_and(ddf->phys
4626 ->entries[dl->pdnum].type,
4627 cpu_to_be16(DDF_Active_in_VD))) {
4628 be16_set(ddf->phys
4629 ->entries[dl->pdnum].type,
4630 cpu_to_be16(DDF_Active_in_VD));
4631 if (in_degraded)
4632 be16_set(ddf->phys
4633 ->entries[dl->pdnum]
4634 .state,
4635 cpu_to_be16
4636 (DDF_Rebuilding));
4637 }
4638 }
4639 if (dl->spare) {
4640 be16_clear(ddf->phys->entries[dl->pdnum].type,
4641 cpu_to_be16(DDF_Global_Spare));
4642 be16_set(ddf->phys->entries[dl->pdnum].type,
4643 cpu_to_be16(DDF_Spare));
4644 }
4645 if (!dl->vlist[0] && !dl->spare) {
4646 be16_set(ddf->phys->entries[dl->pdnum].type,
4647 cpu_to_be16(DDF_Global_Spare));
4648 be16_clear(ddf->phys->entries[dl->pdnum].type,
4649 cpu_to_be16(DDF_Spare));
4650 be16_clear(ddf->phys->entries[dl->pdnum].type,
4651 cpu_to_be16(DDF_Active_in_VD));
4652 }
4653 }
4654
4655 /* Now remove any 'Failed' devices that are not part
4656 * of any VD. They will have the Transition flag set.
4657 * Once done, we need to update all dl->pdnum numbers.
4658 */
4659 pd2 = 0;
4660 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4661 pdnum++) {
4662 if (be16_and(ddf->phys->entries[pdnum].state,
4663 cpu_to_be16(DDF_Failed))
4664 && be16_and(ddf->phys->entries[pdnum].state,
4665 cpu_to_be16(DDF_Transition))) {
4666 /* skip this one unless in dlist*/
4667 for (dl = ddf->dlist; dl; dl = dl->next)
4668 if (dl->pdnum == (int)pdnum)
4669 break;
4670 if (!dl)
4671 continue;
4672 }
4673 if (pdnum == pd2)
4674 pd2++;
4675 else {
4676 ddf->phys->entries[pd2] =
4677 ddf->phys->entries[pdnum];
4678 for (dl = ddf->dlist; dl; dl = dl->next)
4679 if (dl->pdnum == (int)pdnum)
4680 dl->pdnum = pd2;
4681 pd2++;
4682 }
4683 }
4684 ddf->phys->used_pdes = cpu_to_be16(pd2);
4685 while (pd2 < pdnum) {
4686 memset(ddf->phys->entries[pd2].guid, 0xff,
4687 DDF_GUID_LEN);
4688 pd2++;
4689 }
4690
4691 ddf_set_updates_pending(ddf);
4692 }
4693 /* case DDF_SPARE_ASSIGN_MAGIC */
4694 }
4695
4696 static void ddf_prepare_update(struct supertype *st,
4697 struct metadata_update *update)
4698 {
4699 /* This update arrived at managemon.
4700 * We are about to pass it to monitor.
4701 * If a malloc is needed, do it here.
4702 */
4703 struct ddf_super *ddf = st->sb;
4704 be32 *magic = (be32 *)update->buf;
4705 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4706 struct vcl *vcl;
4707 struct vd_config *conf = (struct vd_config *) update->buf;
4708 if (posix_memalign(&update->space, 512,
4709 offsetof(struct vcl, conf)
4710 + ddf->conf_rec_len * 512) != 0) {
4711 update->space = NULL;
4712 return;
4713 }
4714 vcl = update->space;
4715 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4716 if (alloc_other_bvds(ddf, vcl) != 0) {
4717 free(update->space);
4718 update->space = NULL;
4719 }
4720 }
4721 }
4722
4723 /*
4724 * Check degraded state of a RAID10.
4725 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4726 */
4727 static int raid10_degraded(struct mdinfo *info)
4728 {
4729 int n_prim, n_bvds;
4730 int i;
4731 struct mdinfo *d;
4732 char *found;
4733 int ret = -1;
4734
4735 n_prim = info->array.layout & ~0x100;
4736 n_bvds = info->array.raid_disks / n_prim;
4737 found = xmalloc(n_bvds);
4738 if (found == NULL)
4739 return ret;
4740 memset(found, 0, n_bvds);
4741 for (d = info->devs; d; d = d->next) {
4742 i = d->disk.raid_disk / n_prim;
4743 if (i >= n_bvds) {
4744 pr_err("%s: BUG: invalid raid disk\n", __func__);
4745 goto out;
4746 }
4747 if (d->state_fd > 0)
4748 found[i]++;
4749 }
4750 ret = 2;
4751 for (i = 0; i < n_bvds; i++)
4752 if (!found[i]) {
4753 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4754 ret = 0;
4755 goto out;
4756 } else if (found[i] < n_prim) {
4757 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4758 n_bvds);
4759 ret = 1;
4760 }
4761 out:
4762 free(found);
4763 return ret;
4764 }
4765
4766 /*
4767 * Check if the array 'a' is degraded but not failed.
4768 * If it is, find as many spares as are available and needed and
4769 * arrange for their inclusion.
4770 * We only choose devices which are not already in the array,
4771 * and prefer those with a spare-assignment to this array.
4772 * otherwise we choose global spares - assuming always that
4773 * there is enough room.
4774 * For each spare that we assign, we return an 'mdinfo' which
4775 * describes the position for the device in the array.
4776 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4777 * the new phys_refnum and lba_offset values.
4778 *
4779 * Only worry about BVDs at the moment.
4780 */
4781 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4782 struct metadata_update **updates)
4783 {
4784 int working = 0;
4785 struct mdinfo *d;
4786 struct ddf_super *ddf = a->container->sb;
4787 int global_ok = 0;
4788 struct mdinfo *rv = NULL;
4789 struct mdinfo *di;
4790 struct metadata_update *mu;
4791 struct dl *dl;
4792 int i;
4793 unsigned int j;
4794 struct vcl *vcl;
4795 struct vd_config *vc;
4796 unsigned int n_bvd;
4797
4798 for (d = a->info.devs ; d ; d = d->next) {
4799 if ((d->curr_state & DS_FAULTY) &&
4800 d->state_fd >= 0)
4801 /* wait for Removal to happen */
4802 return NULL;
4803 if (d->state_fd >= 0)
4804 working ++;
4805 }
4806
4807 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4808 a->info.array.raid_disks,
4809 a->info.array.level);
4810 if (working == a->info.array.raid_disks)
4811 return NULL; /* array not degraded */
4812 switch (a->info.array.level) {
4813 case 1:
4814 if (working == 0)
4815 return NULL; /* failed */
4816 break;
4817 case 4:
4818 case 5:
4819 if (working < a->info.array.raid_disks - 1)
4820 return NULL; /* failed */
4821 break;
4822 case 6:
4823 if (working < a->info.array.raid_disks - 2)
4824 return NULL; /* failed */
4825 break;
4826 case 10:
4827 if (raid10_degraded(&a->info) < 1)
4828 return NULL;
4829 break;
4830 default: /* concat or stripe */
4831 return NULL; /* failed */
4832 }
4833
4834 /* For each slot, if it is not working, find a spare */
4835 dl = ddf->dlist;
4836 for (i = 0; i < a->info.array.raid_disks; i++) {
4837 for (d = a->info.devs ; d ; d = d->next)
4838 if (d->disk.raid_disk == i)
4839 break;
4840 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4841 if (d && (d->state_fd >= 0))
4842 continue;
4843
4844 /* OK, this device needs recovery. Find a spare */
4845 again:
4846 for ( ; dl ; dl = dl->next) {
4847 unsigned long long esize;
4848 unsigned long long pos;
4849 struct mdinfo *d2;
4850 int is_global = 0;
4851 int is_dedicated = 0;
4852 struct extent *ex;
4853 unsigned int j;
4854 be16 state = ddf->phys->entries[dl->pdnum].state;
4855 if (be16_and(state,
4856 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4857 !be16_and(state,
4858 cpu_to_be16(DDF_Online)))
4859 continue;
4860
4861 /* If in this array, skip */
4862 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4863 if (d2->state_fd >= 0 &&
4864 d2->disk.major == dl->major &&
4865 d2->disk.minor == dl->minor) {
4866 dprintf("%x:%x (%08x) already in array\n",
4867 dl->major, dl->minor,
4868 be32_to_cpu(dl->disk.refnum));
4869 break;
4870 }
4871 if (d2)
4872 continue;
4873 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4874 cpu_to_be16(DDF_Spare))) {
4875 /* Check spare assign record */
4876 if (dl->spare) {
4877 if (dl->spare->type & DDF_spare_dedicated) {
4878 /* check spare_ents for guid */
4879 for (j = 0 ;
4880 j < be16_to_cpu
4881 (dl->spare
4882 ->populated);
4883 j++) {
4884 if (memcmp(dl->spare->spare_ents[j].guid,
4885 ddf->virt->entries[a->info.container_member].guid,
4886 DDF_GUID_LEN) == 0)
4887 is_dedicated = 1;
4888 }
4889 } else
4890 is_global = 1;
4891 }
4892 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4893 cpu_to_be16(DDF_Global_Spare))) {
4894 is_global = 1;
4895 } else if (!be16_and(ddf->phys
4896 ->entries[dl->pdnum].state,
4897 cpu_to_be16(DDF_Failed))) {
4898 /* we can possibly use some of this */
4899 is_global = 1;
4900 }
4901 if ( ! (is_dedicated ||
4902 (is_global && global_ok))) {
4903 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4904 is_dedicated, is_global);
4905 continue;
4906 }
4907
4908 /* We are allowed to use this device - is there space?
4909 * We need a->info.component_size sectors */
4910 ex = get_extents(ddf, dl);
4911 if (!ex) {
4912 dprintf("cannot get extents\n");
4913 continue;
4914 }
4915 j = 0; pos = 0;
4916 esize = 0;
4917
4918 do {
4919 esize = ex[j].start - pos;
4920 if (esize >= a->info.component_size)
4921 break;
4922 pos = ex[j].start + ex[j].size;
4923 j++;
4924 } while (ex[j-1].size);
4925
4926 free(ex);
4927 if (esize < a->info.component_size) {
4928 dprintf("%x:%x has no room: %llu %llu\n",
4929 dl->major, dl->minor,
4930 esize, a->info.component_size);
4931 /* No room */
4932 continue;
4933 }
4934
4935 /* Cool, we have a device with some space at pos */
4936 di = xcalloc(1, sizeof(*di));
4937 di->disk.number = i;
4938 di->disk.raid_disk = i;
4939 di->disk.major = dl->major;
4940 di->disk.minor = dl->minor;
4941 di->disk.state = 0;
4942 di->recovery_start = 0;
4943 di->data_offset = pos;
4944 di->component_size = a->info.component_size;
4945 di->container_member = dl->pdnum;
4946 di->next = rv;
4947 rv = di;
4948 dprintf("%x:%x (%08x) to be %d at %llu\n",
4949 dl->major, dl->minor,
4950 be32_to_cpu(dl->disk.refnum), i, pos);
4951
4952 break;
4953 }
4954 if (!dl && ! global_ok) {
4955 /* not enough dedicated spares, try global */
4956 global_ok = 1;
4957 dl = ddf->dlist;
4958 goto again;
4959 }
4960 }
4961
4962 if (!rv)
4963 /* No spares found */
4964 return rv;
4965 /* Now 'rv' has a list of devices to return.
4966 * Create a metadata_update record to update the
4967 * phys_refnum and lba_offset values
4968 */
4969 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
4970 &n_bvd, &vcl);
4971 if (vc == NULL)
4972 return NULL;
4973
4974 mu = xmalloc(sizeof(*mu));
4975 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4976 free(mu);
4977 mu = NULL;
4978 }
4979
4980 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
4981 mu->buf = xmalloc(mu->len);
4982 mu->space = NULL;
4983 mu->space_list = NULL;
4984 mu->next = *updates;
4985 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
4986 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
4987 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
4988 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
4989
4990 vc = (struct vd_config*)mu->buf;
4991 for (di = rv ; di ; di = di->next) {
4992 unsigned int i_sec, i_prim;
4993 i_sec = di->disk.raid_disk
4994 / be16_to_cpu(vcl->conf.prim_elmnt_count);
4995 i_prim = di->disk.raid_disk
4996 % be16_to_cpu(vcl->conf.prim_elmnt_count);
4997 vc = (struct vd_config *)(mu->buf
4998 + i_sec * ddf->conf_rec_len * 512);
4999 for (dl = ddf->dlist; dl; dl = dl->next)
5000 if (dl->major == di->disk.major
5001 && dl->minor == di->disk.minor)
5002 break;
5003 if (!dl) {
5004 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5005 __func__, di->disk.raid_disk,
5006 di->disk.major, di->disk.minor);
5007 return NULL;
5008 }
5009 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5010 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5011 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5012 be32_to_cpu(vc->phys_refnum[i_prim]),
5013 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5014 }
5015 *updates = mu;
5016 return rv;
5017 }
5018 #endif /* MDASSEMBLE */
5019
5020 static int ddf_level_to_layout(int level)
5021 {
5022 switch(level) {
5023 case 0:
5024 case 1:
5025 return 0;
5026 case 5:
5027 return ALGORITHM_LEFT_SYMMETRIC;
5028 case 6:
5029 return ALGORITHM_ROTATING_N_CONTINUE;
5030 case 10:
5031 return 0x102;
5032 default:
5033 return UnSet;
5034 }
5035 }
5036
5037 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5038 {
5039 if (level && *level == UnSet)
5040 *level = LEVEL_CONTAINER;
5041
5042 if (level && layout && *layout == UnSet)
5043 *layout = ddf_level_to_layout(*level);
5044 }
5045
5046 struct superswitch super_ddf = {
5047 #ifndef MDASSEMBLE
5048 .examine_super = examine_super_ddf,
5049 .brief_examine_super = brief_examine_super_ddf,
5050 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5051 .export_examine_super = export_examine_super_ddf,
5052 .detail_super = detail_super_ddf,
5053 .brief_detail_super = brief_detail_super_ddf,
5054 .validate_geometry = validate_geometry_ddf,
5055 .write_init_super = write_init_super_ddf,
5056 .add_to_super = add_to_super_ddf,
5057 .remove_from_super = remove_from_super_ddf,
5058 .load_container = load_container_ddf,
5059 .copy_metadata = copy_metadata_ddf,
5060 .kill_subarray = kill_subarray_ddf,
5061 #endif
5062 .match_home = match_home_ddf,
5063 .uuid_from_super= uuid_from_super_ddf,
5064 .getinfo_super = getinfo_super_ddf,
5065 .update_super = update_super_ddf,
5066
5067 .avail_size = avail_size_ddf,
5068
5069 .compare_super = compare_super_ddf,
5070
5071 .load_super = load_super_ddf,
5072 .init_super = init_super_ddf,
5073 .store_super = store_super_ddf,
5074 .free_super = free_super_ddf,
5075 .match_metadata_desc = match_metadata_desc_ddf,
5076 .container_content = container_content_ddf,
5077 .default_geometry = default_geometry_ddf,
5078
5079 .external = 1,
5080
5081 #ifndef MDASSEMBLE
5082 /* for mdmon */
5083 .open_new = ddf_open_new,
5084 .set_array_state= ddf_set_array_state,
5085 .set_disk = ddf_set_disk,
5086 .sync_metadata = ddf_sync_metadata,
5087 .process_update = ddf_process_update,
5088 .prepare_update = ddf_prepare_update,
5089 .activate_spare = ddf_activate_spare,
5090 #endif
5091 .name = "ddf",
5092 };