]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: fix removal of failed devices.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 typedef struct __be16 {
62 __u16 _v16;
63 } be16;
64 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
65 #define be16_and(x, y) ((x)._v16 & (y)._v16)
66 #define be16_or(x, y) ((x)._v16 | (y)._v16)
67 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
68 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
69
70 typedef struct __be32 {
71 __u32 _v32;
72 } be32;
73 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
74
75 typedef struct __be64 {
76 __u64 _v64;
77 } be64;
78 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
79
80 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
81 static inline be16 cpu_to_be16(__u16 x)
82 {
83 be16 be = { ._v16 = __cpu_to_be16(x) };
84 return be;
85 }
86
87 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
88 static inline be32 cpu_to_be32(__u32 x)
89 {
90 be32 be = { ._v32 = __cpu_to_be32(x) };
91 return be;
92 }
93
94 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
95 static inline be64 cpu_to_be64(__u64 x)
96 {
97 be64 be = { ._v64 = __cpu_to_be64(x) };
98 return be;
99 }
100
101 /* Primary Raid Level (PRL) */
102 #define DDF_RAID0 0x00
103 #define DDF_RAID1 0x01
104 #define DDF_RAID3 0x03
105 #define DDF_RAID4 0x04
106 #define DDF_RAID5 0x05
107 #define DDF_RAID1E 0x11
108 #define DDF_JBOD 0x0f
109 #define DDF_CONCAT 0x1f
110 #define DDF_RAID5E 0x15
111 #define DDF_RAID5EE 0x25
112 #define DDF_RAID6 0x06
113
114 /* Raid Level Qualifier (RLQ) */
115 #define DDF_RAID0_SIMPLE 0x00
116 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
117 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
118 #define DDF_RAID3_0 0x00 /* parity in first extent */
119 #define DDF_RAID3_N 0x01 /* parity in last extent */
120 #define DDF_RAID4_0 0x00 /* parity in first extent */
121 #define DDF_RAID4_N 0x01 /* parity in last extent */
122 /* these apply to raid5e and raid5ee as well */
123 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
124 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
125 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
126 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
127
128 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
129 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
130
131 /* Secondary RAID Level (SRL) */
132 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
133 #define DDF_2MIRRORED 0x01
134 #define DDF_2CONCAT 0x02
135 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
136
137 /* Magic numbers */
138 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
139 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
140 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
141 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
142 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
143 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
144 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
145 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
146 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
147 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
148
149 #define DDF_GUID_LEN 24
150 #define DDF_REVISION_0 "01.00.00"
151 #define DDF_REVISION_2 "01.02.00"
152
153 struct ddf_header {
154 be32 magic; /* DDF_HEADER_MAGIC */
155 be32 crc;
156 char guid[DDF_GUID_LEN];
157 char revision[8]; /* 01.02.00 */
158 be32 seq; /* starts at '1' */
159 be32 timestamp;
160 __u8 openflag;
161 __u8 foreignflag;
162 __u8 enforcegroups;
163 __u8 pad0; /* 0xff */
164 __u8 pad1[12]; /* 12 * 0xff */
165 /* 64 bytes so far */
166 __u8 header_ext[32]; /* reserved: fill with 0xff */
167 be64 primary_lba;
168 be64 secondary_lba;
169 __u8 type;
170 __u8 pad2[3]; /* 0xff */
171 be32 workspace_len; /* sectors for vendor space -
172 * at least 32768(sectors) */
173 be64 workspace_lba;
174 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
175 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
176 be16 max_partitions; /* i.e. max num of configuration
177 record entries per disk */
178 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
179 *12/512) */
180 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
181 __u8 pad3[54]; /* 0xff */
182 /* 192 bytes so far */
183 be32 controller_section_offset;
184 be32 controller_section_length;
185 be32 phys_section_offset;
186 be32 phys_section_length;
187 be32 virt_section_offset;
188 be32 virt_section_length;
189 be32 config_section_offset;
190 be32 config_section_length;
191 be32 data_section_offset;
192 be32 data_section_length;
193 be32 bbm_section_offset;
194 be32 bbm_section_length;
195 be32 diag_space_offset;
196 be32 diag_space_length;
197 be32 vendor_offset;
198 be32 vendor_length;
199 /* 256 bytes so far */
200 __u8 pad4[256]; /* 0xff */
201 };
202
203 /* type field */
204 #define DDF_HEADER_ANCHOR 0x00
205 #define DDF_HEADER_PRIMARY 0x01
206 #define DDF_HEADER_SECONDARY 0x02
207
208 /* The content of the 'controller section' - global scope */
209 struct ddf_controller_data {
210 be32 magic; /* DDF_CONTROLLER_MAGIC */
211 be32 crc;
212 char guid[DDF_GUID_LEN];
213 struct controller_type {
214 be16 vendor_id;
215 be16 device_id;
216 be16 sub_vendor_id;
217 be16 sub_device_id;
218 } type;
219 char product_id[16];
220 __u8 pad[8]; /* 0xff */
221 __u8 vendor_data[448];
222 };
223
224 /* The content of phys_section - global scope */
225 struct phys_disk {
226 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
227 be32 crc;
228 be16 used_pdes;
229 be16 max_pdes;
230 __u8 pad[52];
231 struct phys_disk_entry {
232 char guid[DDF_GUID_LEN];
233 be32 refnum;
234 be16 type;
235 be16 state;
236 be64 config_size; /* DDF structures must be after here */
237 char path[18]; /* another horrible structure really */
238 __u8 pad[6];
239 } entries[0];
240 };
241
242 /* phys_disk_entry.type is a bitmap - bigendian remember */
243 #define DDF_Forced_PD_GUID 1
244 #define DDF_Active_in_VD 2
245 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
246 #define DDF_Spare 8 /* overrides Global_spare */
247 #define DDF_Foreign 16
248 #define DDF_Legacy 32 /* no DDF on this device */
249
250 #define DDF_Interface_mask 0xf00
251 #define DDF_Interface_SCSI 0x100
252 #define DDF_Interface_SAS 0x200
253 #define DDF_Interface_SATA 0x300
254 #define DDF_Interface_FC 0x400
255
256 /* phys_disk_entry.state is a bigendian bitmap */
257 #define DDF_Online 1
258 #define DDF_Failed 2 /* overrides 1,4,8 */
259 #define DDF_Rebuilding 4
260 #define DDF_Transition 8
261 #define DDF_SMART 16
262 #define DDF_ReadErrors 32
263 #define DDF_Missing 64
264
265 /* The content of the virt_section global scope */
266 struct virtual_disk {
267 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
268 be32 crc;
269 be16 populated_vdes;
270 be16 max_vdes;
271 __u8 pad[52];
272 struct virtual_entry {
273 char guid[DDF_GUID_LEN];
274 be16 unit;
275 __u16 pad0; /* 0xffff */
276 be16 guid_crc;
277 be16 type;
278 __u8 state;
279 __u8 init_state;
280 __u8 pad1[14];
281 char name[16];
282 } entries[0];
283 };
284
285 /* virtual_entry.type is a bitmap - bigendian */
286 #define DDF_Shared 1
287 #define DDF_Enforce_Groups 2
288 #define DDF_Unicode 4
289 #define DDF_Owner_Valid 8
290
291 /* virtual_entry.state is a bigendian bitmap */
292 #define DDF_state_mask 0x7
293 #define DDF_state_optimal 0x0
294 #define DDF_state_degraded 0x1
295 #define DDF_state_deleted 0x2
296 #define DDF_state_missing 0x3
297 #define DDF_state_failed 0x4
298 #define DDF_state_part_optimal 0x5
299
300 #define DDF_state_morphing 0x8
301 #define DDF_state_inconsistent 0x10
302
303 /* virtual_entry.init_state is a bigendian bitmap */
304 #define DDF_initstate_mask 0x03
305 #define DDF_init_not 0x00
306 #define DDF_init_quick 0x01 /* initialisation is progress.
307 * i.e. 'state_inconsistent' */
308 #define DDF_init_full 0x02
309
310 #define DDF_access_mask 0xc0
311 #define DDF_access_rw 0x00
312 #define DDF_access_ro 0x80
313 #define DDF_access_blocked 0xc0
314
315 /* The content of the config_section - local scope
316 * It has multiple records each config_record_len sectors
317 * They can be vd_config or spare_assign
318 */
319
320 struct vd_config {
321 be32 magic; /* DDF_VD_CONF_MAGIC */
322 be32 crc;
323 char guid[DDF_GUID_LEN];
324 be32 timestamp;
325 be32 seqnum;
326 __u8 pad0[24];
327 be16 prim_elmnt_count;
328 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
329 __u8 prl;
330 __u8 rlq;
331 __u8 sec_elmnt_count;
332 __u8 sec_elmnt_seq;
333 __u8 srl;
334 be64 blocks; /* blocks per component could be different
335 * on different component devices...(only
336 * for concat I hope) */
337 be64 array_blocks; /* blocks in array */
338 __u8 pad1[8];
339 be32 spare_refs[8];
340 __u8 cache_pol[8];
341 __u8 bg_rate;
342 __u8 pad2[3];
343 __u8 pad3[52];
344 __u8 pad4[192];
345 __u8 v0[32]; /* reserved- 0xff */
346 __u8 v1[32]; /* reserved- 0xff */
347 __u8 v2[16]; /* reserved- 0xff */
348 __u8 v3[16]; /* reserved- 0xff */
349 __u8 vendor[32];
350 be32 phys_refnum[0]; /* refnum of each disk in sequence */
351 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
352 bvd are always the same size */
353 };
354 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
355
356 /* vd_config.cache_pol[7] is a bitmap */
357 #define DDF_cache_writeback 1 /* else writethrough */
358 #define DDF_cache_wadaptive 2 /* only applies if writeback */
359 #define DDF_cache_readahead 4
360 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
361 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
362 #define DDF_cache_wallowed 32 /* enable write caching */
363 #define DDF_cache_rallowed 64 /* enable read caching */
364
365 struct spare_assign {
366 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
367 be32 crc;
368 be32 timestamp;
369 __u8 reserved[7];
370 __u8 type;
371 be16 populated; /* SAEs used */
372 be16 max; /* max SAEs */
373 __u8 pad[8];
374 struct spare_assign_entry {
375 char guid[DDF_GUID_LEN];
376 be16 secondary_element;
377 __u8 pad[6];
378 } spare_ents[0];
379 };
380 /* spare_assign.type is a bitmap */
381 #define DDF_spare_dedicated 0x1 /* else global */
382 #define DDF_spare_revertible 0x2 /* else committable */
383 #define DDF_spare_active 0x4 /* else not active */
384 #define DDF_spare_affinity 0x8 /* enclosure affinity */
385
386 /* The data_section contents - local scope */
387 struct disk_data {
388 be32 magic; /* DDF_PHYS_DATA_MAGIC */
389 be32 crc;
390 char guid[DDF_GUID_LEN];
391 be32 refnum; /* crc of some magic drive data ... */
392 __u8 forced_ref; /* set when above was not result of magic */
393 __u8 forced_guid; /* set if guid was forced rather than magic */
394 __u8 vendor[32];
395 __u8 pad[442];
396 };
397
398 /* bbm_section content */
399 struct bad_block_log {
400 be32 magic;
401 be32 crc;
402 be16 entry_count;
403 be32 spare_count;
404 __u8 pad[10];
405 be64 first_spare;
406 struct mapped_block {
407 be64 defective_start;
408 be32 replacement_start;
409 be16 remap_count;
410 __u8 pad[2];
411 } entries[0];
412 };
413
414 /* Struct for internally holding ddf structures */
415 /* The DDF structure stored on each device is potentially
416 * quite different, as some data is global and some is local.
417 * The global data is:
418 * - ddf header
419 * - controller_data
420 * - Physical disk records
421 * - Virtual disk records
422 * The local data is:
423 * - Configuration records
424 * - Physical Disk data section
425 * ( and Bad block and vendor which I don't care about yet).
426 *
427 * The local data is parsed into separate lists as it is read
428 * and reconstructed for writing. This means that we only need
429 * to make config changes once and they are automatically
430 * propagated to all devices.
431 * Note that the ddf_super has space of the conf and disk data
432 * for this disk and also for a list of all such data.
433 * The list is only used for the superblock that is being
434 * built in Create or Assemble to describe the whole array.
435 */
436 struct ddf_super {
437 struct ddf_header anchor, primary, secondary;
438 struct ddf_controller_data controller;
439 struct ddf_header *active;
440 struct phys_disk *phys;
441 struct virtual_disk *virt;
442 int pdsize, vdsize;
443 unsigned int max_part, mppe, conf_rec_len;
444 int currentdev;
445 int updates_pending;
446 struct vcl {
447 union {
448 char space[512];
449 struct {
450 struct vcl *next;
451 unsigned int vcnum; /* index into ->virt */
452 struct vd_config **other_bvds;
453 __u64 *block_sizes; /* NULL if all the same */
454 };
455 };
456 struct vd_config conf;
457 } *conflist, *currentconf;
458 struct dl {
459 union {
460 char space[512];
461 struct {
462 struct dl *next;
463 int major, minor;
464 char *devname;
465 int fd;
466 unsigned long long size; /* sectors */
467 be64 primary_lba; /* sectors */
468 be64 secondary_lba; /* sectors */
469 be64 workspace_lba; /* sectors */
470 int pdnum; /* index in ->phys */
471 struct spare_assign *spare;
472 void *mdupdate; /* hold metadata update */
473
474 /* These fields used by auto-layout */
475 int raiddisk; /* slot to fill in autolayout */
476 __u64 esize;
477 };
478 };
479 struct disk_data disk;
480 struct vcl *vlist[0]; /* max_part in size */
481 } *dlist, *add_list;
482 };
483
484 #ifndef offsetof
485 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
486 #endif
487
488 #if DEBUG
489 static int all_ff(const char *guid);
490 static void pr_state(struct ddf_super *ddf, const char *msg)
491 {
492 unsigned int i;
493 dprintf("%s/%s: ", __func__, msg);
494 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
495 if (all_ff(ddf->virt->entries[i].guid))
496 continue;
497 dprintf("%u(s=%02x i=%02x) ", i,
498 ddf->virt->entries[i].state,
499 ddf->virt->entries[i].init_state);
500 }
501 dprintf("\n");
502 }
503 #else
504 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
505 #endif
506
507 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
508 {
509 ddf->updates_pending = 1;
510 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
511 pr_state(ddf, func);
512 }
513
514 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
515
516 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
517 be32 refnum, unsigned int nmax,
518 const struct vd_config **bvd,
519 unsigned int *idx);
520
521 static be32 calc_crc(void *buf, int len)
522 {
523 /* crcs are always at the same place as in the ddf_header */
524 struct ddf_header *ddf = buf;
525 be32 oldcrc = ddf->crc;
526 __u32 newcrc;
527 ddf->crc = cpu_to_be32(0xffffffff);
528
529 newcrc = crc32(0, buf, len);
530 ddf->crc = oldcrc;
531 /* The crc is store (like everything) bigendian, so convert
532 * here for simplicity
533 */
534 return cpu_to_be32(newcrc);
535 }
536
537 #define DDF_INVALID_LEVEL 0xff
538 #define DDF_NO_SECONDARY 0xff
539 static int err_bad_md_layout(const mdu_array_info_t *array)
540 {
541 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
542 array->level, array->layout, array->raid_disks);
543 return -1;
544 }
545
546 static int layout_md2ddf(const mdu_array_info_t *array,
547 struct vd_config *conf)
548 {
549 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
550 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
551 __u8 sec_elmnt_count = 1;
552 __u8 srl = DDF_NO_SECONDARY;
553
554 switch (array->level) {
555 case LEVEL_LINEAR:
556 prl = DDF_CONCAT;
557 break;
558 case 0:
559 rlq = DDF_RAID0_SIMPLE;
560 prl = DDF_RAID0;
561 break;
562 case 1:
563 switch (array->raid_disks) {
564 case 2:
565 rlq = DDF_RAID1_SIMPLE;
566 break;
567 case 3:
568 rlq = DDF_RAID1_MULTI;
569 break;
570 default:
571 return err_bad_md_layout(array);
572 }
573 prl = DDF_RAID1;
574 break;
575 case 4:
576 if (array->layout != 0)
577 return err_bad_md_layout(array);
578 rlq = DDF_RAID4_N;
579 prl = DDF_RAID4;
580 break;
581 case 5:
582 switch (array->layout) {
583 case ALGORITHM_LEFT_ASYMMETRIC:
584 rlq = DDF_RAID5_N_RESTART;
585 break;
586 case ALGORITHM_RIGHT_ASYMMETRIC:
587 rlq = DDF_RAID5_0_RESTART;
588 break;
589 case ALGORITHM_LEFT_SYMMETRIC:
590 rlq = DDF_RAID5_N_CONTINUE;
591 break;
592 case ALGORITHM_RIGHT_SYMMETRIC:
593 /* not mentioned in standard */
594 default:
595 return err_bad_md_layout(array);
596 }
597 prl = DDF_RAID5;
598 break;
599 case 6:
600 switch (array->layout) {
601 case ALGORITHM_ROTATING_N_RESTART:
602 rlq = DDF_RAID5_N_RESTART;
603 break;
604 case ALGORITHM_ROTATING_ZERO_RESTART:
605 rlq = DDF_RAID6_0_RESTART;
606 break;
607 case ALGORITHM_ROTATING_N_CONTINUE:
608 rlq = DDF_RAID5_N_CONTINUE;
609 break;
610 default:
611 return err_bad_md_layout(array);
612 }
613 prl = DDF_RAID6;
614 break;
615 case 10:
616 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
617 rlq = DDF_RAID1_SIMPLE;
618 prim_elmnt_count = cpu_to_be16(2);
619 sec_elmnt_count = array->raid_disks / 2;
620 } else if (array->raid_disks % 3 == 0
621 && array->layout == 0x103) {
622 rlq = DDF_RAID1_MULTI;
623 prim_elmnt_count = cpu_to_be16(3);
624 sec_elmnt_count = array->raid_disks / 3;
625 } else
626 return err_bad_md_layout(array);
627 srl = DDF_2SPANNED;
628 prl = DDF_RAID1;
629 break;
630 default:
631 return err_bad_md_layout(array);
632 }
633 conf->prl = prl;
634 conf->prim_elmnt_count = prim_elmnt_count;
635 conf->rlq = rlq;
636 conf->srl = srl;
637 conf->sec_elmnt_count = sec_elmnt_count;
638 return 0;
639 }
640
641 static int err_bad_ddf_layout(const struct vd_config *conf)
642 {
643 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
644 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
645 return -1;
646 }
647
648 static int layout_ddf2md(const struct vd_config *conf,
649 mdu_array_info_t *array)
650 {
651 int level = LEVEL_UNSUPPORTED;
652 int layout = 0;
653 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
654
655 if (conf->sec_elmnt_count > 1) {
656 /* see also check_secondary() */
657 if (conf->prl != DDF_RAID1 ||
658 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
659 pr_err("Unsupported secondary RAID level %u/%u\n",
660 conf->prl, conf->srl);
661 return -1;
662 }
663 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
664 layout = 0x102;
665 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
666 layout = 0x103;
667 else
668 return err_bad_ddf_layout(conf);
669 raiddisks *= conf->sec_elmnt_count;
670 level = 10;
671 goto good;
672 }
673
674 switch (conf->prl) {
675 case DDF_CONCAT:
676 level = LEVEL_LINEAR;
677 break;
678 case DDF_RAID0:
679 if (conf->rlq != DDF_RAID0_SIMPLE)
680 return err_bad_ddf_layout(conf);
681 level = 0;
682 break;
683 case DDF_RAID1:
684 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
685 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
686 return err_bad_ddf_layout(conf);
687 level = 1;
688 break;
689 case DDF_RAID4:
690 if (conf->rlq != DDF_RAID4_N)
691 return err_bad_ddf_layout(conf);
692 level = 4;
693 break;
694 case DDF_RAID5:
695 switch (conf->rlq) {
696 case DDF_RAID5_N_RESTART:
697 layout = ALGORITHM_LEFT_ASYMMETRIC;
698 break;
699 case DDF_RAID5_0_RESTART:
700 layout = ALGORITHM_RIGHT_ASYMMETRIC;
701 break;
702 case DDF_RAID5_N_CONTINUE:
703 layout = ALGORITHM_LEFT_SYMMETRIC;
704 break;
705 default:
706 return err_bad_ddf_layout(conf);
707 }
708 level = 5;
709 break;
710 case DDF_RAID6:
711 switch (conf->rlq) {
712 case DDF_RAID5_N_RESTART:
713 layout = ALGORITHM_ROTATING_N_RESTART;
714 break;
715 case DDF_RAID6_0_RESTART:
716 layout = ALGORITHM_ROTATING_ZERO_RESTART;
717 break;
718 case DDF_RAID5_N_CONTINUE:
719 layout = ALGORITHM_ROTATING_N_CONTINUE;
720 break;
721 default:
722 return err_bad_ddf_layout(conf);
723 }
724 level = 6;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 };
729
730 good:
731 array->level = level;
732 array->layout = layout;
733 array->raid_disks = raiddisks;
734 return 0;
735 }
736
737 static int load_ddf_header(int fd, unsigned long long lba,
738 unsigned long long size,
739 int type,
740 struct ddf_header *hdr, struct ddf_header *anchor)
741 {
742 /* read a ddf header (primary or secondary) from fd/lba
743 * and check that it is consistent with anchor
744 * Need to check:
745 * magic, crc, guid, rev, and LBA's header_type, and
746 * everything after header_type must be the same
747 */
748 if (lba >= size-1)
749 return 0;
750
751 if (lseek64(fd, lba<<9, 0) < 0)
752 return 0;
753
754 if (read(fd, hdr, 512) != 512)
755 return 0;
756
757 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
758 pr_err("%s: bad header magic\n", __func__);
759 return 0;
760 }
761 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
762 pr_err("%s: bad CRC\n", __func__);
763 return 0;
764 }
765 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
766 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
767 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
768 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
769 hdr->type != type ||
770 memcmp(anchor->pad2, hdr->pad2, 512 -
771 offsetof(struct ddf_header, pad2)) != 0) {
772 pr_err("%s: header mismatch\n", __func__);
773 return 0;
774 }
775
776 /* Looks good enough to me... */
777 return 1;
778 }
779
780 static void *load_section(int fd, struct ddf_super *super, void *buf,
781 be32 offset_be, be32 len_be, int check)
782 {
783 unsigned long long offset = be32_to_cpu(offset_be);
784 unsigned long long len = be32_to_cpu(len_be);
785 int dofree = (buf == NULL);
786
787 if (check)
788 if (len != 2 && len != 8 && len != 32
789 && len != 128 && len != 512)
790 return NULL;
791
792 if (len > 1024)
793 return NULL;
794 if (buf) {
795 /* All pre-allocated sections are a single block */
796 if (len != 1)
797 return NULL;
798 } else if (posix_memalign(&buf, 512, len<<9) != 0)
799 buf = NULL;
800
801 if (!buf)
802 return NULL;
803
804 if (super->active->type == 1)
805 offset += be64_to_cpu(super->active->primary_lba);
806 else
807 offset += be64_to_cpu(super->active->secondary_lba);
808
809 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
810 if (dofree)
811 free(buf);
812 return NULL;
813 }
814 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
815 if (dofree)
816 free(buf);
817 return NULL;
818 }
819 return buf;
820 }
821
822 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
823 {
824 unsigned long long dsize;
825
826 get_dev_size(fd, NULL, &dsize);
827
828 if (lseek64(fd, dsize-512, 0) < 0) {
829 if (devname)
830 pr_err("Cannot seek to anchor block on %s: %s\n",
831 devname, strerror(errno));
832 return 1;
833 }
834 if (read(fd, &super->anchor, 512) != 512) {
835 if (devname)
836 pr_err("Cannot read anchor block on %s: %s\n",
837 devname, strerror(errno));
838 return 1;
839 }
840 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
841 if (devname)
842 pr_err("no DDF anchor found on %s\n",
843 devname);
844 return 2;
845 }
846 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
847 if (devname)
848 pr_err("bad CRC on anchor on %s\n",
849 devname);
850 return 2;
851 }
852 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
853 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
854 if (devname)
855 pr_err("can only support super revision"
856 " %.8s and earlier, not %.8s on %s\n",
857 DDF_REVISION_2, super->anchor.revision,devname);
858 return 2;
859 }
860 super->active = NULL;
861 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
862 dsize >> 9, 1,
863 &super->primary, &super->anchor) == 0) {
864 if (devname)
865 pr_err("Failed to load primary DDF header "
866 "on %s\n", devname);
867 } else
868 super->active = &super->primary;
869
870 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
871 dsize >> 9, 2,
872 &super->secondary, &super->anchor)) {
873 if (super->active == NULL
874 || (be32_to_cpu(super->primary.seq)
875 < be32_to_cpu(super->secondary.seq) &&
876 !super->secondary.openflag)
877 || (be32_to_cpu(super->primary.seq)
878 == be32_to_cpu(super->secondary.seq) &&
879 super->primary.openflag && !super->secondary.openflag)
880 )
881 super->active = &super->secondary;
882 } else if (devname)
883 pr_err("Failed to load secondary DDF header on %s\n",
884 devname);
885 if (super->active == NULL)
886 return 2;
887 return 0;
888 }
889
890 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
891 {
892 void *ok;
893 ok = load_section(fd, super, &super->controller,
894 super->active->controller_section_offset,
895 super->active->controller_section_length,
896 0);
897 super->phys = load_section(fd, super, NULL,
898 super->active->phys_section_offset,
899 super->active->phys_section_length,
900 1);
901 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
902
903 super->virt = load_section(fd, super, NULL,
904 super->active->virt_section_offset,
905 super->active->virt_section_length,
906 1);
907 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
908 if (!ok ||
909 !super->phys ||
910 !super->virt) {
911 free(super->phys);
912 free(super->virt);
913 super->phys = NULL;
914 super->virt = NULL;
915 return 2;
916 }
917 super->conflist = NULL;
918 super->dlist = NULL;
919
920 super->max_part = be16_to_cpu(super->active->max_partitions);
921 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
922 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
923 return 0;
924 }
925
926 #define DDF_UNUSED_BVD 0xff
927 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
928 {
929 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
930 unsigned int i, vdsize;
931 void *p;
932 if (n_vds == 0) {
933 vcl->other_bvds = NULL;
934 return 0;
935 }
936 vdsize = ddf->conf_rec_len * 512;
937 if (posix_memalign(&p, 512, n_vds *
938 (vdsize + sizeof(struct vd_config *))) != 0)
939 return -1;
940 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
941 for (i = 0; i < n_vds; i++) {
942 vcl->other_bvds[i] = p + i * vdsize;
943 memset(vcl->other_bvds[i], 0, vdsize);
944 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
945 }
946 return 0;
947 }
948
949 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
950 unsigned int len)
951 {
952 int i;
953 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
954 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
955 break;
956
957 if (i < vcl->conf.sec_elmnt_count-1) {
958 if (be32_to_cpu(vd->seqnum) <=
959 be32_to_cpu(vcl->other_bvds[i]->seqnum))
960 return;
961 } else {
962 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
963 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
964 break;
965 if (i == vcl->conf.sec_elmnt_count-1) {
966 pr_err("no space for sec level config %u, count is %u\n",
967 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
968 return;
969 }
970 }
971 memcpy(vcl->other_bvds[i], vd, len);
972 }
973
974 static int load_ddf_local(int fd, struct ddf_super *super,
975 char *devname, int keep)
976 {
977 struct dl *dl;
978 struct stat stb;
979 char *conf;
980 unsigned int i;
981 unsigned int confsec;
982 int vnum;
983 unsigned int max_virt_disks = be16_to_cpu
984 (super->active->max_vd_entries);
985 unsigned long long dsize;
986
987 /* First the local disk info */
988 if (posix_memalign((void**)&dl, 512,
989 sizeof(*dl) +
990 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
991 pr_err("%s could not allocate disk info buffer\n",
992 __func__);
993 return 1;
994 }
995
996 load_section(fd, super, &dl->disk,
997 super->active->data_section_offset,
998 super->active->data_section_length,
999 0);
1000 dl->devname = devname ? xstrdup(devname) : NULL;
1001
1002 fstat(fd, &stb);
1003 dl->major = major(stb.st_rdev);
1004 dl->minor = minor(stb.st_rdev);
1005 dl->next = super->dlist;
1006 dl->fd = keep ? fd : -1;
1007
1008 dl->size = 0;
1009 if (get_dev_size(fd, devname, &dsize))
1010 dl->size = dsize >> 9;
1011 /* If the disks have different sizes, the LBAs will differ
1012 * between phys disks.
1013 * At this point here, the values in super->active must be valid
1014 * for this phys disk. */
1015 dl->primary_lba = super->active->primary_lba;
1016 dl->secondary_lba = super->active->secondary_lba;
1017 dl->workspace_lba = super->active->workspace_lba;
1018 dl->spare = NULL;
1019 for (i = 0 ; i < super->max_part ; i++)
1020 dl->vlist[i] = NULL;
1021 super->dlist = dl;
1022 dl->pdnum = -1;
1023 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1024 if (memcmp(super->phys->entries[i].guid,
1025 dl->disk.guid, DDF_GUID_LEN) == 0)
1026 dl->pdnum = i;
1027
1028 /* Now the config list. */
1029 /* 'conf' is an array of config entries, some of which are
1030 * probably invalid. Those which are good need to be copied into
1031 * the conflist
1032 */
1033
1034 conf = load_section(fd, super, NULL,
1035 super->active->config_section_offset,
1036 super->active->config_section_length,
1037 0);
1038
1039 vnum = 0;
1040 for (confsec = 0;
1041 confsec < be32_to_cpu(super->active->config_section_length);
1042 confsec += super->conf_rec_len) {
1043 struct vd_config *vd =
1044 (struct vd_config *)((char*)conf + confsec*512);
1045 struct vcl *vcl;
1046
1047 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1048 if (dl->spare)
1049 continue;
1050 if (posix_memalign((void**)&dl->spare, 512,
1051 super->conf_rec_len*512) != 0) {
1052 pr_err("%s could not allocate spare info buf\n",
1053 __func__);
1054 return 1;
1055 }
1056
1057 memcpy(dl->spare, vd, super->conf_rec_len*512);
1058 continue;
1059 }
1060 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1061 continue;
1062 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1063 if (memcmp(vcl->conf.guid,
1064 vd->guid, DDF_GUID_LEN) == 0)
1065 break;
1066 }
1067
1068 if (vcl) {
1069 dl->vlist[vnum++] = vcl;
1070 if (vcl->other_bvds != NULL &&
1071 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1072 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1073 continue;
1074 }
1075 if (be32_to_cpu(vd->seqnum) <=
1076 be32_to_cpu(vcl->conf.seqnum))
1077 continue;
1078 } else {
1079 if (posix_memalign((void**)&vcl, 512,
1080 (super->conf_rec_len*512 +
1081 offsetof(struct vcl, conf))) != 0) {
1082 pr_err("%s could not allocate vcl buf\n",
1083 __func__);
1084 return 1;
1085 }
1086 vcl->next = super->conflist;
1087 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1088 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1089 if (alloc_other_bvds(super, vcl) != 0) {
1090 pr_err("%s could not allocate other bvds\n",
1091 __func__);
1092 free(vcl);
1093 return 1;
1094 };
1095 super->conflist = vcl;
1096 dl->vlist[vnum++] = vcl;
1097 }
1098 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1099 for (i=0; i < max_virt_disks ; i++)
1100 if (memcmp(super->virt->entries[i].guid,
1101 vcl->conf.guid, DDF_GUID_LEN)==0)
1102 break;
1103 if (i < max_virt_disks)
1104 vcl->vcnum = i;
1105 }
1106 free(conf);
1107
1108 return 0;
1109 }
1110
1111 #ifndef MDASSEMBLE
1112 static int load_super_ddf_all(struct supertype *st, int fd,
1113 void **sbp, char *devname);
1114 #endif
1115
1116 static void free_super_ddf(struct supertype *st);
1117
1118 static int load_super_ddf(struct supertype *st, int fd,
1119 char *devname)
1120 {
1121 unsigned long long dsize;
1122 struct ddf_super *super;
1123 int rv;
1124
1125 if (get_dev_size(fd, devname, &dsize) == 0)
1126 return 1;
1127
1128 if (!st->ignore_hw_compat && test_partition(fd))
1129 /* DDF is not allowed on partitions */
1130 return 1;
1131
1132 /* 32M is a lower bound */
1133 if (dsize <= 32*1024*1024) {
1134 if (devname)
1135 pr_err("%s is too small for ddf: "
1136 "size is %llu sectors.\n",
1137 devname, dsize>>9);
1138 return 1;
1139 }
1140 if (dsize & 511) {
1141 if (devname)
1142 pr_err("%s is an odd size for ddf: "
1143 "size is %llu bytes.\n",
1144 devname, dsize);
1145 return 1;
1146 }
1147
1148 free_super_ddf(st);
1149
1150 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1151 pr_err("malloc of %zu failed.\n",
1152 sizeof(*super));
1153 return 1;
1154 }
1155 memset(super, 0, sizeof(*super));
1156
1157 rv = load_ddf_headers(fd, super, devname);
1158 if (rv) {
1159 free(super);
1160 return rv;
1161 }
1162
1163 /* Have valid headers and have chosen the best. Let's read in the rest*/
1164
1165 rv = load_ddf_global(fd, super, devname);
1166
1167 if (rv) {
1168 if (devname)
1169 pr_err("Failed to load all information "
1170 "sections on %s\n", devname);
1171 free(super);
1172 return rv;
1173 }
1174
1175 rv = load_ddf_local(fd, super, devname, 0);
1176
1177 if (rv) {
1178 if (devname)
1179 pr_err("Failed to load all information "
1180 "sections on %s\n", devname);
1181 free(super);
1182 return rv;
1183 }
1184
1185 /* Should possibly check the sections .... */
1186
1187 st->sb = super;
1188 if (st->ss == NULL) {
1189 st->ss = &super_ddf;
1190 st->minor_version = 0;
1191 st->max_devs = 512;
1192 }
1193 return 0;
1194
1195 }
1196
1197 static void free_super_ddf(struct supertype *st)
1198 {
1199 struct ddf_super *ddf = st->sb;
1200 if (ddf == NULL)
1201 return;
1202 free(ddf->phys);
1203 free(ddf->virt);
1204 while (ddf->conflist) {
1205 struct vcl *v = ddf->conflist;
1206 ddf->conflist = v->next;
1207 if (v->block_sizes)
1208 free(v->block_sizes);
1209 if (v->other_bvds)
1210 /*
1211 v->other_bvds[0] points to beginning of buffer,
1212 see alloc_other_bvds()
1213 */
1214 free(v->other_bvds[0]);
1215 free(v);
1216 }
1217 while (ddf->dlist) {
1218 struct dl *d = ddf->dlist;
1219 ddf->dlist = d->next;
1220 if (d->fd >= 0)
1221 close(d->fd);
1222 if (d->spare)
1223 free(d->spare);
1224 free(d);
1225 }
1226 while (ddf->add_list) {
1227 struct dl *d = ddf->add_list;
1228 ddf->add_list = d->next;
1229 if (d->fd >= 0)
1230 close(d->fd);
1231 if (d->spare)
1232 free(d->spare);
1233 free(d);
1234 }
1235 free(ddf);
1236 st->sb = NULL;
1237 }
1238
1239 static struct supertype *match_metadata_desc_ddf(char *arg)
1240 {
1241 /* 'ddf' only support containers */
1242 struct supertype *st;
1243 if (strcmp(arg, "ddf") != 0 &&
1244 strcmp(arg, "default") != 0
1245 )
1246 return NULL;
1247
1248 st = xcalloc(1, sizeof(*st));
1249 st->ss = &super_ddf;
1250 st->max_devs = 512;
1251 st->minor_version = 0;
1252 st->sb = NULL;
1253 return st;
1254 }
1255
1256 #ifndef MDASSEMBLE
1257
1258 static mapping_t ddf_state[] = {
1259 { "Optimal", 0},
1260 { "Degraded", 1},
1261 { "Deleted", 2},
1262 { "Missing", 3},
1263 { "Failed", 4},
1264 { "Partially Optimal", 5},
1265 { "-reserved-", 6},
1266 { "-reserved-", 7},
1267 { NULL, 0}
1268 };
1269
1270 static mapping_t ddf_init_state[] = {
1271 { "Not Initialised", 0},
1272 { "QuickInit in Progress", 1},
1273 { "Fully Initialised", 2},
1274 { "*UNKNOWN*", 3},
1275 { NULL, 0}
1276 };
1277 static mapping_t ddf_access[] = {
1278 { "Read/Write", 0},
1279 { "Reserved", 1},
1280 { "Read Only", 2},
1281 { "Blocked (no access)", 3},
1282 { NULL ,0}
1283 };
1284
1285 static mapping_t ddf_level[] = {
1286 { "RAID0", DDF_RAID0},
1287 { "RAID1", DDF_RAID1},
1288 { "RAID3", DDF_RAID3},
1289 { "RAID4", DDF_RAID4},
1290 { "RAID5", DDF_RAID5},
1291 { "RAID1E",DDF_RAID1E},
1292 { "JBOD", DDF_JBOD},
1293 { "CONCAT",DDF_CONCAT},
1294 { "RAID5E",DDF_RAID5E},
1295 { "RAID5EE",DDF_RAID5EE},
1296 { "RAID6", DDF_RAID6},
1297 { NULL, 0}
1298 };
1299 static mapping_t ddf_sec_level[] = {
1300 { "Striped", DDF_2STRIPED},
1301 { "Mirrored", DDF_2MIRRORED},
1302 { "Concat", DDF_2CONCAT},
1303 { "Spanned", DDF_2SPANNED},
1304 { NULL, 0}
1305 };
1306 #endif
1307
1308 static int all_ff(const char *guid)
1309 {
1310 int i;
1311 for (i = 0; i < DDF_GUID_LEN; i++)
1312 if (guid[i] != (char)0xff)
1313 return 0;
1314 return 1;
1315 }
1316
1317 static const char *guid_str(const char *guid)
1318 {
1319 static char buf[DDF_GUID_LEN*2+1];
1320 int i;
1321 char *p = buf;
1322 for (i = 0; i < DDF_GUID_LEN; i++) {
1323 unsigned char c = guid[i];
1324 if (c >= 32 && c < 127)
1325 p += sprintf(p, "%c", c);
1326 else
1327 p += sprintf(p, "%02x", c);
1328 }
1329 *p = '\0';
1330 return (const char *) buf;
1331 }
1332
1333 #ifndef MDASSEMBLE
1334 static void print_guid(char *guid, int tstamp)
1335 {
1336 /* A GUIDs are part (or all) ASCII and part binary.
1337 * They tend to be space padded.
1338 * We print the GUID in HEX, then in parentheses add
1339 * any initial ASCII sequence, and a possible
1340 * time stamp from bytes 16-19
1341 */
1342 int l = DDF_GUID_LEN;
1343 int i;
1344
1345 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1346 if ((i&3)==0 && i != 0) printf(":");
1347 printf("%02X", guid[i]&255);
1348 }
1349
1350 printf("\n (");
1351 while (l && guid[l-1] == ' ')
1352 l--;
1353 for (i=0 ; i<l ; i++) {
1354 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1355 fputc(guid[i], stdout);
1356 else
1357 break;
1358 }
1359 if (tstamp) {
1360 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1361 char tbuf[100];
1362 struct tm *tm;
1363 tm = localtime(&then);
1364 strftime(tbuf, 100, " %D %T",tm);
1365 fputs(tbuf, stdout);
1366 }
1367 printf(")");
1368 }
1369
1370 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1371 {
1372 int crl = sb->conf_rec_len;
1373 struct vcl *vcl;
1374
1375 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1376 unsigned int i;
1377 struct vd_config *vc = &vcl->conf;
1378
1379 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1380 continue;
1381 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1382 continue;
1383
1384 /* Ok, we know about this VD, let's give more details */
1385 printf(" Raid Devices[%d] : %d (", n,
1386 be16_to_cpu(vc->prim_elmnt_count));
1387 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1388 int j;
1389 int cnt = be16_to_cpu(sb->phys->used_pdes);
1390 for (j=0; j<cnt; j++)
1391 if (be32_eq(vc->phys_refnum[i],
1392 sb->phys->entries[j].refnum))
1393 break;
1394 if (i) printf(" ");
1395 if (j < cnt)
1396 printf("%d", j);
1397 else
1398 printf("--");
1399 }
1400 printf(")\n");
1401 if (vc->chunk_shift != 255)
1402 printf(" Chunk Size[%d] : %d sectors\n", n,
1403 1 << vc->chunk_shift);
1404 printf(" Raid Level[%d] : %s\n", n,
1405 map_num(ddf_level, vc->prl)?:"-unknown-");
1406 if (vc->sec_elmnt_count != 1) {
1407 printf(" Secondary Position[%d] : %d of %d\n", n,
1408 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1409 printf(" Secondary Level[%d] : %s\n", n,
1410 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1411 }
1412 printf(" Device Size[%d] : %llu\n", n,
1413 be64_to_cpu(vc->blocks)/2);
1414 printf(" Array Size[%d] : %llu\n", n,
1415 be64_to_cpu(vc->array_blocks)/2);
1416 }
1417 }
1418
1419 static void examine_vds(struct ddf_super *sb)
1420 {
1421 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1422 unsigned int i;
1423 printf(" Virtual Disks : %d\n", cnt);
1424
1425 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1426 struct virtual_entry *ve = &sb->virt->entries[i];
1427 if (all_ff(ve->guid))
1428 continue;
1429 printf("\n");
1430 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1431 printf("\n");
1432 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1433 printf(" state[%d] : %s, %s%s\n", i,
1434 map_num(ddf_state, ve->state & 7),
1435 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1436 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1437 printf(" init state[%d] : %s\n", i,
1438 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1439 printf(" access[%d] : %s\n", i,
1440 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1441 printf(" Name[%d] : %.16s\n", i, ve->name);
1442 examine_vd(i, sb, ve->guid);
1443 }
1444 if (cnt) printf("\n");
1445 }
1446
1447 static void examine_pds(struct ddf_super *sb)
1448 {
1449 int cnt = be16_to_cpu(sb->phys->used_pdes);
1450 int i;
1451 struct dl *dl;
1452 printf(" Physical Disks : %d\n", cnt);
1453 printf(" Number RefNo Size Device Type/State\n");
1454
1455 for (i=0 ; i<cnt ; i++) {
1456 struct phys_disk_entry *pd = &sb->phys->entries[i];
1457 int type = be16_to_cpu(pd->type);
1458 int state = be16_to_cpu(pd->state);
1459
1460 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1461 //printf("\n");
1462 printf(" %3d %08x ", i,
1463 be32_to_cpu(pd->refnum));
1464 printf("%8lluK ",
1465 be64_to_cpu(pd->config_size)>>1);
1466 for (dl = sb->dlist; dl ; dl = dl->next) {
1467 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1468 char *dv = map_dev(dl->major, dl->minor, 0);
1469 if (dv) {
1470 printf("%-15s", dv);
1471 break;
1472 }
1473 }
1474 }
1475 if (!dl)
1476 printf("%15s","");
1477 printf(" %s%s%s%s%s",
1478 (type&2) ? "active":"",
1479 (type&4) ? "Global-Spare":"",
1480 (type&8) ? "spare" : "",
1481 (type&16)? ", foreign" : "",
1482 (type&32)? "pass-through" : "");
1483 if (state & DDF_Failed)
1484 /* This over-rides these three */
1485 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1486 printf("/%s%s%s%s%s%s%s",
1487 (state&1)? "Online": "Offline",
1488 (state&2)? ", Failed": "",
1489 (state&4)? ", Rebuilding": "",
1490 (state&8)? ", in-transition": "",
1491 (state&16)? ", SMART-errors": "",
1492 (state&32)? ", Unrecovered-Read-Errors": "",
1493 (state&64)? ", Missing" : "");
1494 printf("\n");
1495 }
1496 }
1497
1498 static void examine_super_ddf(struct supertype *st, char *homehost)
1499 {
1500 struct ddf_super *sb = st->sb;
1501
1502 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1503 printf(" Version : %.8s\n", sb->anchor.revision);
1504 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1505 printf("\n");
1506 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1507 printf("\n");
1508 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1509 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1510 DDF_HEADER_MAGIC)
1511 ?"yes" : "no");
1512 examine_vds(sb);
1513 examine_pds(sb);
1514 }
1515
1516 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1517
1518 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1519 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1520
1521 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1522 {
1523 /*
1524 * Figure out the VD number for this supertype.
1525 * Returns DDF_CONTAINER for the container itself,
1526 * and DDF_NOTFOUND on error.
1527 */
1528 struct ddf_super *ddf = st->sb;
1529 struct mdinfo *sra;
1530 char *sub, *end;
1531 unsigned int vcnum;
1532
1533 if (*st->container_devnm == '\0')
1534 return DDF_CONTAINER;
1535
1536 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1537 if (!sra || sra->array.major_version != -1 ||
1538 sra->array.minor_version != -2 ||
1539 !is_subarray(sra->text_version))
1540 return DDF_NOTFOUND;
1541
1542 sub = strchr(sra->text_version + 1, '/');
1543 if (sub != NULL)
1544 vcnum = strtoul(sub + 1, &end, 10);
1545 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1546 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1547 return DDF_NOTFOUND;
1548
1549 return vcnum;
1550 }
1551
1552 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1553 {
1554 /* We just write a generic DDF ARRAY entry
1555 */
1556 struct mdinfo info;
1557 char nbuf[64];
1558 getinfo_super_ddf(st, &info, NULL);
1559 fname_from_uuid(st, &info, nbuf, ':');
1560
1561 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1562 }
1563
1564 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1565 {
1566 /* We just write a generic DDF ARRAY entry
1567 */
1568 struct ddf_super *ddf = st->sb;
1569 struct mdinfo info;
1570 unsigned int i;
1571 char nbuf[64];
1572 getinfo_super_ddf(st, &info, NULL);
1573 fname_from_uuid(st, &info, nbuf, ':');
1574
1575 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1576 struct virtual_entry *ve = &ddf->virt->entries[i];
1577 struct vcl vcl;
1578 char nbuf1[64];
1579 if (all_ff(ve->guid))
1580 continue;
1581 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1582 ddf->currentconf =&vcl;
1583 uuid_from_super_ddf(st, info.uuid);
1584 fname_from_uuid(st, &info, nbuf1, ':');
1585 printf("ARRAY container=%s member=%d UUID=%s\n",
1586 nbuf+5, i, nbuf1+5);
1587 }
1588 }
1589
1590 static void export_examine_super_ddf(struct supertype *st)
1591 {
1592 struct mdinfo info;
1593 char nbuf[64];
1594 getinfo_super_ddf(st, &info, NULL);
1595 fname_from_uuid(st, &info, nbuf, ':');
1596 printf("MD_METADATA=ddf\n");
1597 printf("MD_LEVEL=container\n");
1598 printf("MD_UUID=%s\n", nbuf+5);
1599 }
1600
1601 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1602 {
1603 void *buf;
1604 unsigned long long dsize, offset;
1605 int bytes;
1606 struct ddf_header *ddf;
1607 int written = 0;
1608
1609 /* The meta consists of an anchor, a primary, and a secondary.
1610 * This all lives at the end of the device.
1611 * So it is easiest to find the earliest of primary and
1612 * secondary, and copy everything from there.
1613 *
1614 * Anchor is 512 from end It contains primary_lba and secondary_lba
1615 * we choose one of those
1616 */
1617
1618 if (posix_memalign(&buf, 4096, 4096) != 0)
1619 return 1;
1620
1621 if (!get_dev_size(from, NULL, &dsize))
1622 goto err;
1623
1624 if (lseek64(from, dsize-512, 0) < 0)
1625 goto err;
1626 if (read(from, buf, 512) != 512)
1627 goto err;
1628 ddf = buf;
1629 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1630 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1631 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1632 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1633 goto err;
1634
1635 offset = dsize - 512;
1636 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1637 offset = be64_to_cpu(ddf->primary_lba) << 9;
1638 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1639 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1640
1641 bytes = dsize - offset;
1642
1643 if (lseek64(from, offset, 0) < 0 ||
1644 lseek64(to, offset, 0) < 0)
1645 goto err;
1646 while (written < bytes) {
1647 int n = bytes - written;
1648 if (n > 4096)
1649 n = 4096;
1650 if (read(from, buf, n) != n)
1651 goto err;
1652 if (write(to, buf, n) != n)
1653 goto err;
1654 written += n;
1655 }
1656 free(buf);
1657 return 0;
1658 err:
1659 free(buf);
1660 return 1;
1661 }
1662
1663 static void detail_super_ddf(struct supertype *st, char *homehost)
1664 {
1665 /* FIXME later
1666 * Could print DDF GUID
1667 * Need to find which array
1668 * If whole, briefly list all arrays
1669 * If one, give name
1670 */
1671 }
1672
1673 static void brief_detail_super_ddf(struct supertype *st)
1674 {
1675 struct mdinfo info;
1676 char nbuf[64];
1677 struct ddf_super *ddf = st->sb;
1678 unsigned int vcnum = get_vd_num_of_subarray(st);
1679 if (vcnum == DDF_CONTAINER)
1680 uuid_from_super_ddf(st, info.uuid);
1681 else if (vcnum == DDF_NOTFOUND)
1682 return;
1683 else
1684 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1685 fname_from_uuid(st, &info, nbuf,':');
1686 printf(" UUID=%s", nbuf + 5);
1687 }
1688 #endif
1689
1690 static int match_home_ddf(struct supertype *st, char *homehost)
1691 {
1692 /* It matches 'this' host if the controller is a
1693 * Linux-MD controller with vendor_data matching
1694 * the hostname
1695 */
1696 struct ddf_super *ddf = st->sb;
1697 unsigned int len;
1698
1699 if (!homehost)
1700 return 0;
1701 len = strlen(homehost);
1702
1703 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1704 len < sizeof(ddf->controller.vendor_data) &&
1705 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1706 ddf->controller.vendor_data[len] == 0);
1707 }
1708
1709 #ifndef MDASSEMBLE
1710 static int find_index_in_bvd(const struct ddf_super *ddf,
1711 const struct vd_config *conf, unsigned int n,
1712 unsigned int *n_bvd)
1713 {
1714 /*
1715 * Find the index of the n-th valid physical disk in this BVD
1716 */
1717 unsigned int i, j;
1718 for (i = 0, j = 0; i < ddf->mppe &&
1719 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1720 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1721 if (n == j) {
1722 *n_bvd = i;
1723 return 1;
1724 }
1725 j++;
1726 }
1727 }
1728 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1729 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1730 return 0;
1731 }
1732
1733 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1734 unsigned int n,
1735 unsigned int *n_bvd, struct vcl **vcl)
1736 {
1737 struct vcl *v;
1738
1739 for (v = ddf->conflist; v; v = v->next) {
1740 unsigned int nsec, ibvd = 0;
1741 struct vd_config *conf;
1742 if (inst != v->vcnum)
1743 continue;
1744 conf = &v->conf;
1745 if (conf->sec_elmnt_count == 1) {
1746 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1747 *vcl = v;
1748 return conf;
1749 } else
1750 goto bad;
1751 }
1752 if (v->other_bvds == NULL) {
1753 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1754 __func__, conf->sec_elmnt_count);
1755 goto bad;
1756 }
1757 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1758 if (conf->sec_elmnt_seq != nsec) {
1759 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1760 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1761 == nsec)
1762 break;
1763 }
1764 if (ibvd == conf->sec_elmnt_count)
1765 goto bad;
1766 conf = v->other_bvds[ibvd-1];
1767 }
1768 if (!find_index_in_bvd(ddf, conf,
1769 n - nsec*conf->sec_elmnt_count, n_bvd))
1770 goto bad;
1771 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1772 , __func__, n, *n_bvd, ibvd, inst);
1773 *vcl = v;
1774 return conf;
1775 }
1776 bad:
1777 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1778 return NULL;
1779 }
1780 #endif
1781
1782 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1783 {
1784 /* Find the entry in phys_disk which has the given refnum
1785 * and return it's index
1786 */
1787 unsigned int i;
1788 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1789 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1790 return i;
1791 return -1;
1792 }
1793
1794 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1795 {
1796 char buf[20];
1797 struct sha1_ctx ctx;
1798 sha1_init_ctx(&ctx);
1799 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1800 sha1_finish_ctx(&ctx, buf);
1801 memcpy(uuid, buf, 4*4);
1802 }
1803
1804 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1805 {
1806 /* The uuid returned here is used for:
1807 * uuid to put into bitmap file (Create, Grow)
1808 * uuid for backup header when saving critical section (Grow)
1809 * comparing uuids when re-adding a device into an array
1810 * In these cases the uuid required is that of the data-array,
1811 * not the device-set.
1812 * uuid to recognise same set when adding a missing device back
1813 * to an array. This is a uuid for the device-set.
1814 *
1815 * For each of these we can make do with a truncated
1816 * or hashed uuid rather than the original, as long as
1817 * everyone agrees.
1818 * In the case of SVD we assume the BVD is of interest,
1819 * though that might be the case if a bitmap were made for
1820 * a mirrored SVD - worry about that later.
1821 * So we need to find the VD configuration record for the
1822 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1823 * The first 16 bytes of the sha1 of these is used.
1824 */
1825 struct ddf_super *ddf = st->sb;
1826 struct vcl *vcl = ddf->currentconf;
1827 char *guid;
1828
1829 if (vcl)
1830 guid = vcl->conf.guid;
1831 else
1832 guid = ddf->anchor.guid;
1833 uuid_from_ddf_guid(guid, uuid);
1834 }
1835
1836 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1837
1838 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1839 {
1840 struct ddf_super *ddf = st->sb;
1841 int map_disks = info->array.raid_disks;
1842 __u32 *cptr;
1843
1844 if (ddf->currentconf) {
1845 getinfo_super_ddf_bvd(st, info, map);
1846 return;
1847 }
1848 memset(info, 0, sizeof(*info));
1849
1850 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1851 info->array.level = LEVEL_CONTAINER;
1852 info->array.layout = 0;
1853 info->array.md_minor = -1;
1854 cptr = (__u32 *)(ddf->anchor.guid + 16);
1855 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1856
1857 info->array.utime = 0;
1858 info->array.chunk_size = 0;
1859 info->container_enough = 1;
1860
1861 info->disk.major = 0;
1862 info->disk.minor = 0;
1863 if (ddf->dlist) {
1864 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1865 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1866
1867 info->data_offset = be64_to_cpu(ddf->phys->
1868 entries[info->disk.raid_disk].
1869 config_size);
1870 info->component_size = ddf->dlist->size - info->data_offset;
1871 } else {
1872 info->disk.number = -1;
1873 info->disk.raid_disk = -1;
1874 // info->disk.raid_disk = find refnum in the table and use index;
1875 }
1876 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1877
1878 info->recovery_start = MaxSector;
1879 info->reshape_active = 0;
1880 info->recovery_blocked = 0;
1881 info->name[0] = 0;
1882
1883 info->array.major_version = -1;
1884 info->array.minor_version = -2;
1885 strcpy(info->text_version, "ddf");
1886 info->safe_mode_delay = 0;
1887
1888 uuid_from_super_ddf(st, info->uuid);
1889
1890 if (map) {
1891 int i;
1892 for (i = 0 ; i < map_disks; i++) {
1893 if (i < info->array.raid_disks &&
1894 (be16_to_cpu(ddf->phys->entries[i].state)
1895 & DDF_Online) &&
1896 !(be16_to_cpu(ddf->phys->entries[i].state)
1897 & DDF_Failed))
1898 map[i] = 1;
1899 else
1900 map[i] = 0;
1901 }
1902 }
1903 }
1904
1905 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1906 {
1907 struct ddf_super *ddf = st->sb;
1908 struct vcl *vc = ddf->currentconf;
1909 int cd = ddf->currentdev;
1910 int n_prim;
1911 int j;
1912 struct dl *dl;
1913 int map_disks = info->array.raid_disks;
1914 __u32 *cptr;
1915 struct vd_config *conf;
1916
1917 memset(info, 0, sizeof(*info));
1918 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1919 return;
1920 info->array.md_minor = -1;
1921 cptr = (__u32 *)(vc->conf.guid + 16);
1922 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1923 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1924 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1925 info->custom_array_size = 0;
1926
1927 conf = &vc->conf;
1928 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1929 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1930 int ibvd = cd / n_prim - 1;
1931 cd %= n_prim;
1932 conf = vc->other_bvds[ibvd];
1933 }
1934
1935 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1936 info->data_offset =
1937 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
1938 if (vc->block_sizes)
1939 info->component_size = vc->block_sizes[cd];
1940 else
1941 info->component_size = be64_to_cpu(conf->blocks);
1942 }
1943
1944 for (dl = ddf->dlist; dl ; dl = dl->next)
1945 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
1946 break;
1947
1948 info->disk.major = 0;
1949 info->disk.minor = 0;
1950 info->disk.state = 0;
1951 if (dl) {
1952 info->disk.major = dl->major;
1953 info->disk.minor = dl->minor;
1954 info->disk.raid_disk = cd + conf->sec_elmnt_seq
1955 * be16_to_cpu(conf->prim_elmnt_count);
1956 info->disk.number = dl->pdnum;
1957 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1958 }
1959
1960 info->container_member = ddf->currentconf->vcnum;
1961
1962 info->recovery_start = MaxSector;
1963 info->resync_start = 0;
1964 info->reshape_active = 0;
1965 info->recovery_blocked = 0;
1966 if (!(ddf->virt->entries[info->container_member].state
1967 & DDF_state_inconsistent) &&
1968 (ddf->virt->entries[info->container_member].init_state
1969 & DDF_initstate_mask)
1970 == DDF_init_full)
1971 info->resync_start = MaxSector;
1972
1973 uuid_from_super_ddf(st, info->uuid);
1974
1975 info->array.major_version = -1;
1976 info->array.minor_version = -2;
1977 sprintf(info->text_version, "/%s/%d",
1978 st->container_devnm,
1979 info->container_member);
1980 info->safe_mode_delay = 200;
1981
1982 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1983 info->name[16]=0;
1984 for(j=0; j<16; j++)
1985 if (info->name[j] == ' ')
1986 info->name[j] = 0;
1987
1988 if (map)
1989 for (j = 0; j < map_disks; j++) {
1990 map[j] = 0;
1991 if (j < info->array.raid_disks) {
1992 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1993 if (i >= 0 &&
1994 (be16_to_cpu(ddf->phys->entries[i].state)
1995 & DDF_Online) &&
1996 !(be16_to_cpu(ddf->phys->entries[i].state)
1997 & DDF_Failed))
1998 map[i] = 1;
1999 }
2000 }
2001 }
2002
2003 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2004 char *update,
2005 char *devname, int verbose,
2006 int uuid_set, char *homehost)
2007 {
2008 /* For 'assemble' and 'force' we need to return non-zero if any
2009 * change was made. For others, the return value is ignored.
2010 * Update options are:
2011 * force-one : This device looks a bit old but needs to be included,
2012 * update age info appropriately.
2013 * assemble: clear any 'faulty' flag to allow this device to
2014 * be assembled.
2015 * force-array: Array is degraded but being forced, mark it clean
2016 * if that will be needed to assemble it.
2017 *
2018 * newdev: not used ????
2019 * grow: Array has gained a new device - this is currently for
2020 * linear only
2021 * resync: mark as dirty so a resync will happen.
2022 * uuid: Change the uuid of the array to match what is given
2023 * homehost: update the recorded homehost
2024 * name: update the name - preserving the homehost
2025 * _reshape_progress: record new reshape_progress position.
2026 *
2027 * Following are not relevant for this version:
2028 * sparc2.2 : update from old dodgey metadata
2029 * super-minor: change the preferred_minor number
2030 * summaries: update redundant counters.
2031 */
2032 int rv = 0;
2033 // struct ddf_super *ddf = st->sb;
2034 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2035 // struct virtual_entry *ve = find_ve(ddf);
2036
2037 /* we don't need to handle "force-*" or "assemble" as
2038 * there is no need to 'trick' the kernel. We the metadata is
2039 * first updated to activate the array, all the implied modifications
2040 * will just happen.
2041 */
2042
2043 if (strcmp(update, "grow") == 0) {
2044 /* FIXME */
2045 } else if (strcmp(update, "resync") == 0) {
2046 // info->resync_checkpoint = 0;
2047 } else if (strcmp(update, "homehost") == 0) {
2048 /* homehost is stored in controller->vendor_data,
2049 * or it is when we are the vendor
2050 */
2051 // if (info->vendor_is_local)
2052 // strcpy(ddf->controller.vendor_data, homehost);
2053 rv = -1;
2054 } else if (strcmp(update, "name") == 0) {
2055 /* name is stored in virtual_entry->name */
2056 // memset(ve->name, ' ', 16);
2057 // strncpy(ve->name, info->name, 16);
2058 rv = -1;
2059 } else if (strcmp(update, "_reshape_progress") == 0) {
2060 /* We don't support reshape yet */
2061 } else if (strcmp(update, "assemble") == 0 ) {
2062 /* Do nothing, just succeed */
2063 rv = 0;
2064 } else
2065 rv = -1;
2066
2067 // update_all_csum(ddf);
2068
2069 return rv;
2070 }
2071
2072 static void make_header_guid(char *guid)
2073 {
2074 be32 stamp;
2075 /* Create a DDF Header of Virtual Disk GUID */
2076
2077 /* 24 bytes of fiction required.
2078 * first 8 are a 'vendor-id' - "Linux-MD"
2079 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2080 * Remaining 8 random number plus timestamp
2081 */
2082 memcpy(guid, T10, sizeof(T10));
2083 stamp = cpu_to_be32(0xdeadbeef);
2084 memcpy(guid+8, &stamp, 4);
2085 stamp = cpu_to_be32(0);
2086 memcpy(guid+12, &stamp, 4);
2087 stamp = cpu_to_be32(time(0) - DECADE);
2088 memcpy(guid+16, &stamp, 4);
2089 stamp._v32 = random32();
2090 memcpy(guid+20, &stamp, 4);
2091 }
2092
2093 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2094 {
2095 unsigned int i;
2096 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2097 if (all_ff(ddf->virt->entries[i].guid))
2098 return i;
2099 }
2100 return DDF_NOTFOUND;
2101 }
2102
2103 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2104 const char *name)
2105 {
2106 unsigned int i;
2107 if (name == NULL)
2108 return DDF_NOTFOUND;
2109 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2110 if (all_ff(ddf->virt->entries[i].guid))
2111 continue;
2112 if (!strncmp(name, ddf->virt->entries[i].name,
2113 sizeof(ddf->virt->entries[i].name)))
2114 return i;
2115 }
2116 return DDF_NOTFOUND;
2117 }
2118
2119 #ifndef MDASSEMBLE
2120 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2121 const char *guid)
2122 {
2123 unsigned int i;
2124 if (guid == NULL || all_ff(guid))
2125 return DDF_NOTFOUND;
2126 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2127 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2128 return i;
2129 return DDF_NOTFOUND;
2130 }
2131 #endif
2132
2133 static int init_super_ddf_bvd(struct supertype *st,
2134 mdu_array_info_t *info,
2135 unsigned long long size,
2136 char *name, char *homehost,
2137 int *uuid, unsigned long long data_offset);
2138
2139 static int init_super_ddf(struct supertype *st,
2140 mdu_array_info_t *info,
2141 unsigned long long size, char *name, char *homehost,
2142 int *uuid, unsigned long long data_offset)
2143 {
2144 /* This is primarily called by Create when creating a new array.
2145 * We will then get add_to_super called for each component, and then
2146 * write_init_super called to write it out to each device.
2147 * For DDF, Create can create on fresh devices or on a pre-existing
2148 * array.
2149 * To create on a pre-existing array a different method will be called.
2150 * This one is just for fresh drives.
2151 *
2152 * We need to create the entire 'ddf' structure which includes:
2153 * DDF headers - these are easy.
2154 * Controller data - a Sector describing this controller .. not that
2155 * this is a controller exactly.
2156 * Physical Disk Record - one entry per device, so
2157 * leave plenty of space.
2158 * Virtual Disk Records - again, just leave plenty of space.
2159 * This just lists VDs, doesn't give details
2160 * Config records - describes the VDs that use this disk
2161 * DiskData - describes 'this' device.
2162 * BadBlockManagement - empty
2163 * Diag Space - empty
2164 * Vendor Logs - Could we put bitmaps here?
2165 *
2166 */
2167 struct ddf_super *ddf;
2168 char hostname[17];
2169 int hostlen;
2170 int max_phys_disks, max_virt_disks;
2171 unsigned long long sector;
2172 int clen;
2173 int i;
2174 int pdsize, vdsize;
2175 struct phys_disk *pd;
2176 struct virtual_disk *vd;
2177
2178 if (data_offset != INVALID_SECTORS) {
2179 pr_err("data-offset not supported by DDF\n");
2180 return 0;
2181 }
2182
2183 if (st->sb)
2184 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2185 data_offset);
2186
2187 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2188 pr_err("%s could not allocate superblock\n", __func__);
2189 return 0;
2190 }
2191 memset(ddf, 0, sizeof(*ddf));
2192 ddf->dlist = NULL; /* no physical disks yet */
2193 ddf->conflist = NULL; /* No virtual disks yet */
2194 st->sb = ddf;
2195
2196 if (info == NULL) {
2197 /* zeroing superblock */
2198 return 0;
2199 }
2200
2201 /* At least 32MB *must* be reserved for the ddf. So let's just
2202 * start 32MB from the end, and put the primary header there.
2203 * Don't do secondary for now.
2204 * We don't know exactly where that will be yet as it could be
2205 * different on each device. To just set up the lengths.
2206 *
2207 */
2208
2209 ddf->anchor.magic = DDF_HEADER_MAGIC;
2210 make_header_guid(ddf->anchor.guid);
2211
2212 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2213 ddf->anchor.seq = cpu_to_be32(1);
2214 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2215 ddf->anchor.openflag = 0xFF;
2216 ddf->anchor.foreignflag = 0;
2217 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2218 ddf->anchor.pad0 = 0xff;
2219 memset(ddf->anchor.pad1, 0xff, 12);
2220 memset(ddf->anchor.header_ext, 0xff, 32);
2221 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2222 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2223 ddf->anchor.type = DDF_HEADER_ANCHOR;
2224 memset(ddf->anchor.pad2, 0xff, 3);
2225 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2226 /* Put this at bottom of 32M reserved.. */
2227 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2228 max_phys_disks = 1023; /* Should be enough */
2229 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2230 max_virt_disks = 255;
2231 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2232 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2233 ddf->max_part = 64;
2234 ddf->mppe = 256;
2235 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2236 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2237 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2238 memset(ddf->anchor.pad3, 0xff, 54);
2239 /* controller sections is one sector long immediately
2240 * after the ddf header */
2241 sector = 1;
2242 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2243 ddf->anchor.controller_section_length = cpu_to_be32(1);
2244 sector += 1;
2245
2246 /* phys is 8 sectors after that */
2247 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2248 sizeof(struct phys_disk_entry)*max_phys_disks,
2249 512);
2250 switch(pdsize/512) {
2251 case 2: case 8: case 32: case 128: case 512: break;
2252 default: abort();
2253 }
2254 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2255 ddf->anchor.phys_section_length =
2256 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2257 sector += pdsize/512;
2258
2259 /* virt is another 32 sectors */
2260 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2261 sizeof(struct virtual_entry) * max_virt_disks,
2262 512);
2263 switch(vdsize/512) {
2264 case 2: case 8: case 32: case 128: case 512: break;
2265 default: abort();
2266 }
2267 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2268 ddf->anchor.virt_section_length =
2269 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2270 sector += vdsize/512;
2271
2272 clen = ddf->conf_rec_len * (ddf->max_part+1);
2273 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2274 ddf->anchor.config_section_length = cpu_to_be32(clen);
2275 sector += clen;
2276
2277 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2278 ddf->anchor.data_section_length = cpu_to_be32(1);
2279 sector += 1;
2280
2281 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2282 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2283 ddf->anchor.diag_space_length = cpu_to_be32(0);
2284 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2285 ddf->anchor.vendor_length = cpu_to_be32(0);
2286 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2287
2288 memset(ddf->anchor.pad4, 0xff, 256);
2289
2290 memcpy(&ddf->primary, &ddf->anchor, 512);
2291 memcpy(&ddf->secondary, &ddf->anchor, 512);
2292
2293 ddf->primary.openflag = 1; /* I guess.. */
2294 ddf->primary.type = DDF_HEADER_PRIMARY;
2295
2296 ddf->secondary.openflag = 1; /* I guess.. */
2297 ddf->secondary.type = DDF_HEADER_SECONDARY;
2298
2299 ddf->active = &ddf->primary;
2300
2301 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2302
2303 /* 24 more bytes of fiction required.
2304 * first 8 are a 'vendor-id' - "Linux-MD"
2305 * Remaining 16 are serial number.... maybe a hostname would do?
2306 */
2307 memcpy(ddf->controller.guid, T10, sizeof(T10));
2308 gethostname(hostname, sizeof(hostname));
2309 hostname[sizeof(hostname) - 1] = 0;
2310 hostlen = strlen(hostname);
2311 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2312 for (i = strlen(T10) ; i+hostlen < 24; i++)
2313 ddf->controller.guid[i] = ' ';
2314
2315 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2316 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2317 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2318 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2319 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2320 memset(ddf->controller.pad, 0xff, 8);
2321 memset(ddf->controller.vendor_data, 0xff, 448);
2322 if (homehost && strlen(homehost) < 440)
2323 strcpy((char*)ddf->controller.vendor_data, homehost);
2324
2325 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2326 pr_err("%s could not allocate pd\n", __func__);
2327 return 0;
2328 }
2329 ddf->phys = pd;
2330 ddf->pdsize = pdsize;
2331
2332 memset(pd, 0xff, pdsize);
2333 memset(pd, 0, sizeof(*pd));
2334 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2335 pd->used_pdes = cpu_to_be16(0);
2336 pd->max_pdes = cpu_to_be16(max_phys_disks);
2337 memset(pd->pad, 0xff, 52);
2338 for (i = 0; i < max_phys_disks; i++)
2339 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2340
2341 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2342 pr_err("%s could not allocate vd\n", __func__);
2343 return 0;
2344 }
2345 ddf->virt = vd;
2346 ddf->vdsize = vdsize;
2347 memset(vd, 0, vdsize);
2348 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2349 vd->populated_vdes = cpu_to_be16(0);
2350 vd->max_vdes = cpu_to_be16(max_virt_disks);
2351 memset(vd->pad, 0xff, 52);
2352
2353 for (i=0; i<max_virt_disks; i++)
2354 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2355
2356 st->sb = ddf;
2357 ddf_set_updates_pending(ddf);
2358 return 1;
2359 }
2360
2361 static int chunk_to_shift(int chunksize)
2362 {
2363 return ffs(chunksize/512)-1;
2364 }
2365
2366 #ifndef MDASSEMBLE
2367 struct extent {
2368 unsigned long long start, size;
2369 };
2370 static int cmp_extent(const void *av, const void *bv)
2371 {
2372 const struct extent *a = av;
2373 const struct extent *b = bv;
2374 if (a->start < b->start)
2375 return -1;
2376 if (a->start > b->start)
2377 return 1;
2378 return 0;
2379 }
2380
2381 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2382 {
2383 /* find a list of used extents on the give physical device
2384 * (dnum) of the given ddf.
2385 * Return a malloced array of 'struct extent'
2386
2387 * FIXME ignore DDF_Legacy devices?
2388
2389 */
2390 struct extent *rv;
2391 int n = 0;
2392 unsigned int i;
2393
2394 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2395
2396 for (i = 0; i < ddf->max_part; i++) {
2397 const struct vd_config *bvd;
2398 unsigned int ibvd;
2399 struct vcl *v = dl->vlist[i];
2400 if (v == NULL ||
2401 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2402 &bvd, &ibvd) == DDF_NOTFOUND)
2403 continue;
2404 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2405 rv[n].size = be64_to_cpu(bvd->blocks);
2406 n++;
2407 }
2408 qsort(rv, n, sizeof(*rv), cmp_extent);
2409
2410 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2411 rv[n].size = 0;
2412 return rv;
2413 }
2414 #endif
2415
2416 static int init_super_ddf_bvd(struct supertype *st,
2417 mdu_array_info_t *info,
2418 unsigned long long size,
2419 char *name, char *homehost,
2420 int *uuid, unsigned long long data_offset)
2421 {
2422 /* We are creating a BVD inside a pre-existing container.
2423 * so st->sb is already set.
2424 * We need to create a new vd_config and a new virtual_entry
2425 */
2426 struct ddf_super *ddf = st->sb;
2427 unsigned int venum, i;
2428 struct virtual_entry *ve;
2429 struct vcl *vcl;
2430 struct vd_config *vc;
2431
2432 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2433 pr_err("This ddf already has an array called %s\n", name);
2434 return 0;
2435 }
2436 venum = find_unused_vde(ddf);
2437 if (venum == DDF_NOTFOUND) {
2438 pr_err("Cannot find spare slot for virtual disk\n");
2439 return 0;
2440 }
2441 ve = &ddf->virt->entries[venum];
2442
2443 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2444 * timestamp, random number
2445 */
2446 make_header_guid(ve->guid);
2447 ve->unit = cpu_to_be16(info->md_minor);
2448 ve->pad0 = 0xFFFF;
2449 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2450 DDF_GUID_LEN);
2451 ve->type = cpu_to_be16(0);
2452 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2453 if (info->state & 1) /* clean */
2454 ve->init_state = DDF_init_full;
2455 else
2456 ve->init_state = DDF_init_not;
2457
2458 memset(ve->pad1, 0xff, 14);
2459 memset(ve->name, ' ', 16);
2460 if (name)
2461 strncpy(ve->name, name, 16);
2462 ddf->virt->populated_vdes =
2463 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2464
2465 /* Now create a new vd_config */
2466 if (posix_memalign((void**)&vcl, 512,
2467 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2468 pr_err("%s could not allocate vd_config\n", __func__);
2469 return 0;
2470 }
2471 vcl->vcnum = venum;
2472 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2473 vc = &vcl->conf;
2474
2475 vc->magic = DDF_VD_CONF_MAGIC;
2476 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2477 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2478 vc->seqnum = cpu_to_be32(1);
2479 memset(vc->pad0, 0xff, 24);
2480 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2481 if (layout_md2ddf(info, vc) == -1 ||
2482 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2483 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2484 __func__, info->level, info->layout, info->raid_disks);
2485 free(vcl);
2486 return 0;
2487 }
2488 vc->sec_elmnt_seq = 0;
2489 if (alloc_other_bvds(ddf, vcl) != 0) {
2490 pr_err("%s could not allocate other bvds\n",
2491 __func__);
2492 free(vcl);
2493 return 0;
2494 }
2495 vc->blocks = cpu_to_be64(info->size * 2);
2496 vc->array_blocks = cpu_to_be64(
2497 calc_array_size(info->level, info->raid_disks, info->layout,
2498 info->chunk_size, info->size*2));
2499 memset(vc->pad1, 0xff, 8);
2500 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2501 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2502 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2503 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2504 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2505 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2506 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2507 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2508 memset(vc->cache_pol, 0, 8);
2509 vc->bg_rate = 0x80;
2510 memset(vc->pad2, 0xff, 3);
2511 memset(vc->pad3, 0xff, 52);
2512 memset(vc->pad4, 0xff, 192);
2513 memset(vc->v0, 0xff, 32);
2514 memset(vc->v1, 0xff, 32);
2515 memset(vc->v2, 0xff, 16);
2516 memset(vc->v3, 0xff, 16);
2517 memset(vc->vendor, 0xff, 32);
2518
2519 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2520 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2521
2522 for (i = 1; i < vc->sec_elmnt_count; i++) {
2523 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2524 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2525 }
2526
2527 vcl->next = ddf->conflist;
2528 ddf->conflist = vcl;
2529 ddf->currentconf = vcl;
2530 ddf_set_updates_pending(ddf);
2531 return 1;
2532 }
2533
2534
2535 #ifndef MDASSEMBLE
2536 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2537
2538 static void add_to_super_ddf_bvd(struct supertype *st,
2539 mdu_disk_info_t *dk, int fd, char *devname)
2540 {
2541 /* fd and devname identify a device with-in the ddf container (st).
2542 * dk identifies a location in the new BVD.
2543 * We need to find suitable free space in that device and update
2544 * the phys_refnum and lba_offset for the newly created vd_config.
2545 * We might also want to update the type in the phys_disk
2546 * section.
2547 *
2548 * Alternately: fd == -1 and we have already chosen which device to
2549 * use and recorded in dlist->raid_disk;
2550 */
2551 struct dl *dl;
2552 struct ddf_super *ddf = st->sb;
2553 struct vd_config *vc;
2554 unsigned int i;
2555 unsigned long long blocks, pos, esize;
2556 struct extent *ex;
2557 unsigned int raid_disk = dk->raid_disk;
2558
2559 if (fd == -1) {
2560 for (dl = ddf->dlist; dl ; dl = dl->next)
2561 if (dl->raiddisk == dk->raid_disk)
2562 break;
2563 } else {
2564 for (dl = ddf->dlist; dl ; dl = dl->next)
2565 if (dl->major == dk->major &&
2566 dl->minor == dk->minor)
2567 break;
2568 }
2569 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2570 return;
2571
2572 vc = &ddf->currentconf->conf;
2573 if (vc->sec_elmnt_count > 1) {
2574 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2575 if (raid_disk >= n)
2576 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2577 raid_disk %= n;
2578 }
2579
2580 ex = get_extents(ddf, dl);
2581 if (!ex)
2582 return;
2583
2584 i = 0; pos = 0;
2585 blocks = be64_to_cpu(vc->blocks);
2586 if (ddf->currentconf->block_sizes)
2587 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2588
2589 do {
2590 esize = ex[i].start - pos;
2591 if (esize >= blocks)
2592 break;
2593 pos = ex[i].start + ex[i].size;
2594 i++;
2595 } while (ex[i-1].size);
2596
2597 free(ex);
2598 if (esize < blocks)
2599 return;
2600
2601 ddf->currentdev = dk->raid_disk;
2602 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2603 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2604
2605 for (i = 0; i < ddf->max_part ; i++)
2606 if (dl->vlist[i] == NULL)
2607 break;
2608 if (i == ddf->max_part)
2609 return;
2610 dl->vlist[i] = ddf->currentconf;
2611
2612 if (fd >= 0)
2613 dl->fd = fd;
2614 if (devname)
2615 dl->devname = devname;
2616
2617 /* Check if we can mark array as optimal yet */
2618 i = ddf->currentconf->vcnum;
2619 ddf->virt->entries[i].state =
2620 (ddf->virt->entries[i].state & ~DDF_state_mask)
2621 | get_svd_state(ddf, ddf->currentconf);
2622 be16_clear(ddf->phys->entries[dl->pdnum].type,
2623 cpu_to_be16(DDF_Global_Spare));
2624 be16_set(ddf->phys->entries[dl->pdnum].type,
2625 cpu_to_be16(DDF_Active_in_VD));
2626 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2627 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2628 ddf->currentconf->vcnum, guid_str(vc->guid),
2629 dk->raid_disk);
2630 ddf_set_updates_pending(ddf);
2631 }
2632
2633 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2634 {
2635 unsigned int i;
2636 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2637 if (all_ff(ddf->phys->entries[i].guid))
2638 return i;
2639 }
2640 return DDF_NOTFOUND;
2641 }
2642
2643 /* add a device to a container, either while creating it or while
2644 * expanding a pre-existing container
2645 */
2646 static int add_to_super_ddf(struct supertype *st,
2647 mdu_disk_info_t *dk, int fd, char *devname,
2648 unsigned long long data_offset)
2649 {
2650 struct ddf_super *ddf = st->sb;
2651 struct dl *dd;
2652 time_t now;
2653 struct tm *tm;
2654 unsigned long long size;
2655 struct phys_disk_entry *pde;
2656 unsigned int n, i;
2657 struct stat stb;
2658 __u32 *tptr;
2659
2660 if (ddf->currentconf) {
2661 add_to_super_ddf_bvd(st, dk, fd, devname);
2662 return 0;
2663 }
2664
2665 /* This is device numbered dk->number. We need to create
2666 * a phys_disk entry and a more detailed disk_data entry.
2667 */
2668 fstat(fd, &stb);
2669 n = find_unused_pde(ddf);
2670 if (n == DDF_NOTFOUND) {
2671 pr_err("%s: No free slot in array, cannot add disk\n",
2672 __func__);
2673 return 1;
2674 }
2675 pde = &ddf->phys->entries[n];
2676 get_dev_size(fd, NULL, &size);
2677 if (size <= 32*1024*1024) {
2678 pr_err("%s: device size must be at least 32MB\n",
2679 __func__);
2680 return 1;
2681 }
2682 size >>= 9;
2683
2684 if (posix_memalign((void**)&dd, 512,
2685 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2686 pr_err("%s could allocate buffer for new disk, aborting\n",
2687 __func__);
2688 return 1;
2689 }
2690 dd->major = major(stb.st_rdev);
2691 dd->minor = minor(stb.st_rdev);
2692 dd->devname = devname;
2693 dd->fd = fd;
2694 dd->spare = NULL;
2695
2696 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2697 now = time(0);
2698 tm = localtime(&now);
2699 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2700 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2701 tptr = (__u32 *)(dd->disk.guid + 16);
2702 *tptr++ = random32();
2703 *tptr = random32();
2704
2705 do {
2706 /* Cannot be bothered finding a CRC of some irrelevant details*/
2707 dd->disk.refnum._v32 = random32();
2708 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2709 i > 0; i--)
2710 if (be32_eq(ddf->phys->entries[i-1].refnum,
2711 dd->disk.refnum))
2712 break;
2713 } while (i > 0);
2714
2715 dd->disk.forced_ref = 1;
2716 dd->disk.forced_guid = 1;
2717 memset(dd->disk.vendor, ' ', 32);
2718 memcpy(dd->disk.vendor, "Linux", 5);
2719 memset(dd->disk.pad, 0xff, 442);
2720 for (i = 0; i < ddf->max_part ; i++)
2721 dd->vlist[i] = NULL;
2722
2723 dd->pdnum = n;
2724
2725 if (st->update_tail) {
2726 int len = (sizeof(struct phys_disk) +
2727 sizeof(struct phys_disk_entry));
2728 struct phys_disk *pd;
2729
2730 pd = xmalloc(len);
2731 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2732 pd->used_pdes = cpu_to_be16(n);
2733 pde = &pd->entries[0];
2734 dd->mdupdate = pd;
2735 } else
2736 ddf->phys->used_pdes = cpu_to_be16(
2737 1 + be16_to_cpu(ddf->phys->used_pdes));
2738
2739 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2740 pde->refnum = dd->disk.refnum;
2741 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2742 pde->state = cpu_to_be16(DDF_Online);
2743 dd->size = size;
2744 /*
2745 * If there is already a device in dlist, try to reserve the same
2746 * amount of workspace. Otherwise, use 32MB.
2747 * We checked disk size above already.
2748 */
2749 #define __calc_lba(new, old, lba, mb) do { \
2750 unsigned long long dif; \
2751 if ((old) != NULL) \
2752 dif = (old)->size - be64_to_cpu((old)->lba); \
2753 else \
2754 dif = (new)->size; \
2755 if ((new)->size > dif) \
2756 (new)->lba = cpu_to_be64((new)->size - dif); \
2757 else \
2758 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2759 } while (0)
2760 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2761 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2762 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2763 pde->config_size = dd->workspace_lba;
2764
2765 sprintf(pde->path, "%17.17s","Information: nil") ;
2766 memset(pde->pad, 0xff, 6);
2767
2768 if (st->update_tail) {
2769 dd->next = ddf->add_list;
2770 ddf->add_list = dd;
2771 } else {
2772 dd->next = ddf->dlist;
2773 ddf->dlist = dd;
2774 ddf_set_updates_pending(ddf);
2775 }
2776
2777 return 0;
2778 }
2779
2780 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2781 {
2782 struct ddf_super *ddf = st->sb;
2783 struct dl *dl;
2784
2785 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2786 * disappeared from the container.
2787 * We need to arrange that it disappears from the metadata and
2788 * internal data structures too.
2789 * Most of the work is done by ddf_process_update which edits
2790 * the metadata and closes the file handle and attaches the memory
2791 * where free_updates will free it.
2792 */
2793 for (dl = ddf->dlist; dl ; dl = dl->next)
2794 if (dl->major == dk->major &&
2795 dl->minor == dk->minor)
2796 break;
2797 if (!dl)
2798 return -1;
2799
2800 if (st->update_tail) {
2801 int len = (sizeof(struct phys_disk) +
2802 sizeof(struct phys_disk_entry));
2803 struct phys_disk *pd;
2804
2805 pd = xmalloc(len);
2806 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2807 pd->used_pdes = cpu_to_be16(dl->pdnum);
2808 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2809 append_metadata_update(st, pd, len);
2810 }
2811 return 0;
2812 }
2813 #endif
2814
2815 /*
2816 * This is the write_init_super method for a ddf container. It is
2817 * called when creating a container or adding another device to a
2818 * container.
2819 */
2820 #define NULL_CONF_SZ 4096
2821
2822 static char *null_aligned;
2823 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
2824 int update)
2825 {
2826 unsigned long long sector;
2827 struct ddf_header *header;
2828 int fd, i, n_config, conf_size;
2829 int ret = 0;
2830
2831 if (null_aligned == NULL) {
2832 if (posix_memalign((void **)&null_aligned, 4096, NULL_CONF_SZ)
2833 != 0)
2834 return 0;
2835 memset(null_aligned, 0xff, NULL_CONF_SZ);
2836 }
2837
2838 fd = d->fd;
2839
2840 switch (type) {
2841 case DDF_HEADER_PRIMARY:
2842 header = &ddf->primary;
2843 sector = be64_to_cpu(header->primary_lba);
2844 break;
2845 case DDF_HEADER_SECONDARY:
2846 header = &ddf->secondary;
2847 sector = be64_to_cpu(header->secondary_lba);
2848 break;
2849 default:
2850 return 0;
2851 }
2852
2853 header->type = type;
2854 header->openflag = 1;
2855 header->crc = calc_crc(header, 512);
2856
2857 lseek64(fd, sector<<9, 0);
2858 if (write(fd, header, 512) < 0)
2859 goto out;
2860
2861 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2862 if (write(fd, &ddf->controller, 512) < 0)
2863 goto out;
2864
2865 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2866 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2867 goto out;
2868 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2869 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2870 goto out;
2871
2872 /* Now write lots of config records. */
2873 n_config = ddf->max_part;
2874 conf_size = ddf->conf_rec_len * 512;
2875 for (i = 0 ; i <= n_config ; i++) {
2876 struct vcl *c;
2877 struct vd_config *vdc = NULL;
2878 if (i == n_config) {
2879 c = (struct vcl *)d->spare;
2880 if (c)
2881 vdc = &c->conf;
2882 } else {
2883 unsigned int dummy;
2884 c = d->vlist[i];
2885 if (c)
2886 get_pd_index_from_refnum(
2887 c, d->disk.refnum,
2888 ddf->mppe,
2889 (const struct vd_config **)&vdc,
2890 &dummy);
2891 }
2892 if (c) {
2893 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2894 i, be32_to_cpu(d->disk.refnum),
2895 guid_str(vdc->guid),
2896 vdc->sec_elmnt_seq);
2897 vdc->seqnum = header->seq;
2898 vdc->crc = calc_crc(vdc, conf_size);
2899 if (write(fd, vdc, conf_size) < 0)
2900 break;
2901 } else if (!update) {
2902 unsigned int togo = conf_size;
2903 while (togo > NULL_CONF_SZ) {
2904 if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
2905 break;
2906 togo -= NULL_CONF_SZ;
2907 }
2908 if (write(fd, null_aligned, togo) < 0)
2909 break;
2910 } else
2911 lseek(fd, conf_size, SEEK_CUR);
2912 }
2913 if (i <= n_config)
2914 goto out;
2915
2916 d->disk.crc = calc_crc(&d->disk, 512);
2917 if (write(fd, &d->disk, 512) < 0)
2918 goto out;
2919
2920 ret = 1;
2921 out:
2922 header->openflag = 0;
2923 header->crc = calc_crc(header, 512);
2924
2925 lseek64(fd, sector<<9, 0);
2926 if (write(fd, header, 512) < 0)
2927 ret = 0;
2928
2929 return ret;
2930 }
2931
2932 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d,
2933 int update)
2934 {
2935 unsigned long long size;
2936 int fd = d->fd;
2937 if (fd < 0)
2938 return 0;
2939
2940 /* We need to fill in the primary, (secondary) and workspace
2941 * lba's in the headers, set their checksums,
2942 * Also checksum phys, virt....
2943 *
2944 * Then write everything out, finally the anchor is written.
2945 */
2946 get_dev_size(fd, NULL, &size);
2947 size /= 512;
2948 if (be64_to_cpu(d->workspace_lba) != 0ULL)
2949 ddf->anchor.workspace_lba = d->workspace_lba;
2950 else
2951 ddf->anchor.workspace_lba =
2952 cpu_to_be64(size - 32*1024*2);
2953 if (be64_to_cpu(d->primary_lba) != 0ULL)
2954 ddf->anchor.primary_lba = d->primary_lba;
2955 else
2956 ddf->anchor.primary_lba =
2957 cpu_to_be64(size - 16*1024*2);
2958 if (be64_to_cpu(d->secondary_lba) != 0ULL)
2959 ddf->anchor.secondary_lba = d->secondary_lba;
2960 else
2961 ddf->anchor.secondary_lba =
2962 cpu_to_be64(size - 32*1024*2);
2963 ddf->anchor.seq = ddf->active->seq;
2964 memcpy(&ddf->primary, &ddf->anchor, 512);
2965 memcpy(&ddf->secondary, &ddf->anchor, 512);
2966
2967 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2968 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
2969 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2970
2971 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY, update))
2972 return 0;
2973
2974 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY, update))
2975 return 0;
2976
2977 lseek64(fd, (size-1)*512, SEEK_SET);
2978 if (write(fd, &ddf->anchor, 512) < 0)
2979 return 0;
2980
2981 return 1;
2982 }
2983
2984 #ifndef MDASSEMBLE
2985 static int __write_init_super_ddf(struct supertype *st, int update)
2986 {
2987 struct ddf_super *ddf = st->sb;
2988 struct dl *d;
2989 int attempts = 0;
2990 int successes = 0;
2991
2992 pr_state(ddf, __func__);
2993
2994 /* try to write updated metadata,
2995 * if we catch a failure move on to the next disk
2996 */
2997 for (d = ddf->dlist; d; d=d->next) {
2998 attempts++;
2999 successes += _write_super_to_disk(ddf, d, update);
3000 }
3001
3002 return attempts != successes;
3003 }
3004
3005 static int write_init_super_ddf(struct supertype *st)
3006 {
3007 struct ddf_super *ddf = st->sb;
3008 struct vcl *currentconf = ddf->currentconf;
3009
3010 /* we are done with currentconf reset it to point st at the container */
3011 ddf->currentconf = NULL;
3012
3013 if (st->update_tail) {
3014 /* queue the virtual_disk and vd_config as metadata updates */
3015 struct virtual_disk *vd;
3016 struct vd_config *vc;
3017 int len, tlen;
3018 unsigned int i;
3019
3020 if (!currentconf) {
3021 int len = (sizeof(struct phys_disk) +
3022 sizeof(struct phys_disk_entry));
3023
3024 /* adding a disk to the container. */
3025 if (!ddf->add_list)
3026 return 0;
3027
3028 append_metadata_update(st, ddf->add_list->mdupdate, len);
3029 ddf->add_list->mdupdate = NULL;
3030 return 0;
3031 }
3032
3033 /* Newly created VD */
3034
3035 /* First the virtual disk. We have a slightly fake header */
3036 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3037 vd = xmalloc(len);
3038 *vd = *ddf->virt;
3039 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3040 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3041 append_metadata_update(st, vd, len);
3042
3043 /* Then the vd_config */
3044 len = ddf->conf_rec_len * 512;
3045 tlen = len * currentconf->conf.sec_elmnt_count;
3046 vc = xmalloc(tlen);
3047 memcpy(vc, &currentconf->conf, len);
3048 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3049 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3050 len);
3051 append_metadata_update(st, vc, tlen);
3052
3053 /* FIXME I need to close the fds! */
3054 return 0;
3055 } else {
3056 struct dl *d;
3057 if (!currentconf)
3058 for (d = ddf->dlist; d; d=d->next)
3059 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3060 return __write_init_super_ddf(st, 0);
3061 }
3062 }
3063
3064 #endif
3065
3066 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3067 unsigned long long data_offset)
3068 {
3069 /* We must reserve the last 32Meg */
3070 if (devsize <= 32*1024*2)
3071 return 0;
3072 return devsize - 32*1024*2;
3073 }
3074
3075 #ifndef MDASSEMBLE
3076
3077 static int reserve_space(struct supertype *st, int raiddisks,
3078 unsigned long long size, int chunk,
3079 unsigned long long *freesize)
3080 {
3081 /* Find 'raiddisks' spare extents at least 'size' big (but
3082 * only caring about multiples of 'chunk') and remember
3083 * them.
3084 * If the cannot be found, fail.
3085 */
3086 struct dl *dl;
3087 struct ddf_super *ddf = st->sb;
3088 int cnt = 0;
3089
3090 for (dl = ddf->dlist; dl ; dl=dl->next) {
3091 dl->raiddisk = -1;
3092 dl->esize = 0;
3093 }
3094 /* Now find largest extent on each device */
3095 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3096 struct extent *e = get_extents(ddf, dl);
3097 unsigned long long pos = 0;
3098 int i = 0;
3099 int found = 0;
3100 unsigned long long minsize = size;
3101
3102 if (size == 0)
3103 minsize = chunk;
3104
3105 if (!e)
3106 continue;
3107 do {
3108 unsigned long long esize;
3109 esize = e[i].start - pos;
3110 if (esize >= minsize) {
3111 found = 1;
3112 minsize = esize;
3113 }
3114 pos = e[i].start + e[i].size;
3115 i++;
3116 } while (e[i-1].size);
3117 if (found) {
3118 cnt++;
3119 dl->esize = minsize;
3120 }
3121 free(e);
3122 }
3123 if (cnt < raiddisks) {
3124 pr_err("not enough devices with space to create array.\n");
3125 return 0; /* No enough free spaces large enough */
3126 }
3127 if (size == 0) {
3128 /* choose the largest size of which there are at least 'raiddisk' */
3129 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3130 struct dl *dl2;
3131 if (dl->esize <= size)
3132 continue;
3133 /* This is bigger than 'size', see if there are enough */
3134 cnt = 0;
3135 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3136 if (dl2->esize >= dl->esize)
3137 cnt++;
3138 if (cnt >= raiddisks)
3139 size = dl->esize;
3140 }
3141 if (chunk) {
3142 size = size / chunk;
3143 size *= chunk;
3144 }
3145 *freesize = size;
3146 if (size < 32) {
3147 pr_err("not enough spare devices to create array.\n");
3148 return 0;
3149 }
3150 }
3151 /* We have a 'size' of which there are enough spaces.
3152 * We simply do a first-fit */
3153 cnt = 0;
3154 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3155 if (dl->esize < size)
3156 continue;
3157
3158 dl->raiddisk = cnt;
3159 cnt++;
3160 }
3161 return 1;
3162 }
3163
3164 static int
3165 validate_geometry_ddf_container(struct supertype *st,
3166 int level, int layout, int raiddisks,
3167 int chunk, unsigned long long size,
3168 unsigned long long data_offset,
3169 char *dev, unsigned long long *freesize,
3170 int verbose);
3171
3172 static int validate_geometry_ddf_bvd(struct supertype *st,
3173 int level, int layout, int raiddisks,
3174 int *chunk, unsigned long long size,
3175 unsigned long long data_offset,
3176 char *dev, unsigned long long *freesize,
3177 int verbose);
3178
3179 static int validate_geometry_ddf(struct supertype *st,
3180 int level, int layout, int raiddisks,
3181 int *chunk, unsigned long long size,
3182 unsigned long long data_offset,
3183 char *dev, unsigned long long *freesize,
3184 int verbose)
3185 {
3186 int fd;
3187 struct mdinfo *sra;
3188 int cfd;
3189
3190 /* ddf potentially supports lots of things, but it depends on
3191 * what devices are offered (and maybe kernel version?)
3192 * If given unused devices, we will make a container.
3193 * If given devices in a container, we will make a BVD.
3194 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3195 */
3196
3197 if (*chunk == UnSet)
3198 *chunk = DEFAULT_CHUNK;
3199
3200 if (level == -1000000) level = LEVEL_CONTAINER;
3201 if (level == LEVEL_CONTAINER) {
3202 /* Must be a fresh device to add to a container */
3203 return validate_geometry_ddf_container(st, level, layout,
3204 raiddisks, *chunk,
3205 size, data_offset, dev,
3206 freesize,
3207 verbose);
3208 }
3209
3210 if (!dev) {
3211 mdu_array_info_t array = {
3212 .level = level, .layout = layout,
3213 .raid_disks = raiddisks
3214 };
3215 struct vd_config conf;
3216 if (layout_md2ddf(&array, &conf) == -1) {
3217 if (verbose)
3218 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3219 level, layout, raiddisks);
3220 return 0;
3221 }
3222 /* Should check layout? etc */
3223
3224 if (st->sb && freesize) {
3225 /* --create was given a container to create in.
3226 * So we need to check that there are enough
3227 * free spaces and return the amount of space.
3228 * We may as well remember which drives were
3229 * chosen so that add_to_super/getinfo_super
3230 * can return them.
3231 */
3232 return reserve_space(st, raiddisks, size, *chunk, freesize);
3233 }
3234 return 1;
3235 }
3236
3237 if (st->sb) {
3238 /* A container has already been opened, so we are
3239 * creating in there. Maybe a BVD, maybe an SVD.
3240 * Should make a distinction one day.
3241 */
3242 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3243 chunk, size, data_offset, dev,
3244 freesize,
3245 verbose);
3246 }
3247 /* This is the first device for the array.
3248 * If it is a container, we read it in and do automagic allocations,
3249 * no other devices should be given.
3250 * Otherwise it must be a member device of a container, and we
3251 * do manual allocation.
3252 * Later we should check for a BVD and make an SVD.
3253 */
3254 fd = open(dev, O_RDONLY|O_EXCL, 0);
3255 if (fd >= 0) {
3256 sra = sysfs_read(fd, NULL, GET_VERSION);
3257 close(fd);
3258 if (sra && sra->array.major_version == -1 &&
3259 strcmp(sra->text_version, "ddf") == 0) {
3260
3261 /* load super */
3262 /* find space for 'n' devices. */
3263 /* remember the devices */
3264 /* Somehow return the fact that we have enough */
3265 }
3266
3267 if (verbose)
3268 pr_err("ddf: Cannot create this array "
3269 "on device %s - a container is required.\n",
3270 dev);
3271 return 0;
3272 }
3273 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3274 if (verbose)
3275 pr_err("ddf: Cannot open %s: %s\n",
3276 dev, strerror(errno));
3277 return 0;
3278 }
3279 /* Well, it is in use by someone, maybe a 'ddf' container. */
3280 cfd = open_container(fd);
3281 if (cfd < 0) {
3282 close(fd);
3283 if (verbose)
3284 pr_err("ddf: Cannot use %s: %s\n",
3285 dev, strerror(EBUSY));
3286 return 0;
3287 }
3288 sra = sysfs_read(cfd, NULL, GET_VERSION);
3289 close(fd);
3290 if (sra && sra->array.major_version == -1 &&
3291 strcmp(sra->text_version, "ddf") == 0) {
3292 /* This is a member of a ddf container. Load the container
3293 * and try to create a bvd
3294 */
3295 struct ddf_super *ddf;
3296 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3297 st->sb = ddf;
3298 strcpy(st->container_devnm, fd2devnm(cfd));
3299 close(cfd);
3300 return validate_geometry_ddf_bvd(st, level, layout,
3301 raiddisks, chunk, size,
3302 data_offset,
3303 dev, freesize,
3304 verbose);
3305 }
3306 close(cfd);
3307 } else /* device may belong to a different container */
3308 return 0;
3309
3310 return 1;
3311 }
3312
3313 static int
3314 validate_geometry_ddf_container(struct supertype *st,
3315 int level, int layout, int raiddisks,
3316 int chunk, unsigned long long size,
3317 unsigned long long data_offset,
3318 char *dev, unsigned long long *freesize,
3319 int verbose)
3320 {
3321 int fd;
3322 unsigned long long ldsize;
3323
3324 if (level != LEVEL_CONTAINER)
3325 return 0;
3326 if (!dev)
3327 return 1;
3328
3329 fd = open(dev, O_RDONLY|O_EXCL, 0);
3330 if (fd < 0) {
3331 if (verbose)
3332 pr_err("ddf: Cannot open %s: %s\n",
3333 dev, strerror(errno));
3334 return 0;
3335 }
3336 if (!get_dev_size(fd, dev, &ldsize)) {
3337 close(fd);
3338 return 0;
3339 }
3340 close(fd);
3341
3342 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3343 if (*freesize == 0)
3344 return 0;
3345
3346 return 1;
3347 }
3348
3349 static int validate_geometry_ddf_bvd(struct supertype *st,
3350 int level, int layout, int raiddisks,
3351 int *chunk, unsigned long long size,
3352 unsigned long long data_offset,
3353 char *dev, unsigned long long *freesize,
3354 int verbose)
3355 {
3356 struct stat stb;
3357 struct ddf_super *ddf = st->sb;
3358 struct dl *dl;
3359 unsigned long long pos = 0;
3360 unsigned long long maxsize;
3361 struct extent *e;
3362 int i;
3363 /* ddf/bvd supports lots of things, but not containers */
3364 if (level == LEVEL_CONTAINER) {
3365 if (verbose)
3366 pr_err("DDF cannot create a container within an container\n");
3367 return 0;
3368 }
3369 /* We must have the container info already read in. */
3370 if (!ddf)
3371 return 0;
3372
3373 if (!dev) {
3374 /* General test: make sure there is space for
3375 * 'raiddisks' device extents of size 'size'.
3376 */
3377 unsigned long long minsize = size;
3378 int dcnt = 0;
3379 if (minsize == 0)
3380 minsize = 8;
3381 for (dl = ddf->dlist; dl ; dl = dl->next)
3382 {
3383 int found = 0;
3384 pos = 0;
3385
3386 i = 0;
3387 e = get_extents(ddf, dl);
3388 if (!e) continue;
3389 do {
3390 unsigned long long esize;
3391 esize = e[i].start - pos;
3392 if (esize >= minsize)
3393 found = 1;
3394 pos = e[i].start + e[i].size;
3395 i++;
3396 } while (e[i-1].size);
3397 if (found)
3398 dcnt++;
3399 free(e);
3400 }
3401 if (dcnt < raiddisks) {
3402 if (verbose)
3403 pr_err("ddf: Not enough devices with "
3404 "space for this array (%d < %d)\n",
3405 dcnt, raiddisks);
3406 return 0;
3407 }
3408 return 1;
3409 }
3410 /* This device must be a member of the set */
3411 if (stat(dev, &stb) < 0)
3412 return 0;
3413 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3414 return 0;
3415 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3416 if (dl->major == (int)major(stb.st_rdev) &&
3417 dl->minor == (int)minor(stb.st_rdev))
3418 break;
3419 }
3420 if (!dl) {
3421 if (verbose)
3422 pr_err("ddf: %s is not in the "
3423 "same DDF set\n",
3424 dev);
3425 return 0;
3426 }
3427 e = get_extents(ddf, dl);
3428 maxsize = 0;
3429 i = 0;
3430 if (e) do {
3431 unsigned long long esize;
3432 esize = e[i].start - pos;
3433 if (esize >= maxsize)
3434 maxsize = esize;
3435 pos = e[i].start + e[i].size;
3436 i++;
3437 } while (e[i-1].size);
3438 *freesize = maxsize;
3439 // FIXME here I am
3440
3441 return 1;
3442 }
3443
3444 static int load_super_ddf_all(struct supertype *st, int fd,
3445 void **sbp, char *devname)
3446 {
3447 struct mdinfo *sra;
3448 struct ddf_super *super;
3449 struct mdinfo *sd, *best = NULL;
3450 int bestseq = 0;
3451 int seq;
3452 char nm[20];
3453 int dfd;
3454
3455 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3456 if (!sra)
3457 return 1;
3458 if (sra->array.major_version != -1 ||
3459 sra->array.minor_version != -2 ||
3460 strcmp(sra->text_version, "ddf") != 0)
3461 return 1;
3462
3463 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3464 return 1;
3465 memset(super, 0, sizeof(*super));
3466
3467 /* first, try each device, and choose the best ddf */
3468 for (sd = sra->devs ; sd ; sd = sd->next) {
3469 int rv;
3470 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3471 dfd = dev_open(nm, O_RDONLY);
3472 if (dfd < 0)
3473 return 2;
3474 rv = load_ddf_headers(dfd, super, NULL);
3475 close(dfd);
3476 if (rv == 0) {
3477 seq = be32_to_cpu(super->active->seq);
3478 if (super->active->openflag)
3479 seq--;
3480 if (!best || seq > bestseq) {
3481 bestseq = seq;
3482 best = sd;
3483 }
3484 }
3485 }
3486 if (!best)
3487 return 1;
3488 /* OK, load this ddf */
3489 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3490 dfd = dev_open(nm, O_RDONLY);
3491 if (dfd < 0)
3492 return 1;
3493 load_ddf_headers(dfd, super, NULL);
3494 load_ddf_global(dfd, super, NULL);
3495 close(dfd);
3496 /* Now we need the device-local bits */
3497 for (sd = sra->devs ; sd ; sd = sd->next) {
3498 int rv;
3499
3500 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3501 dfd = dev_open(nm, O_RDWR);
3502 if (dfd < 0)
3503 return 2;
3504 rv = load_ddf_headers(dfd, super, NULL);
3505 if (rv == 0)
3506 rv = load_ddf_local(dfd, super, NULL, 1);
3507 if (rv)
3508 return 1;
3509 }
3510
3511 *sbp = super;
3512 if (st->ss == NULL) {
3513 st->ss = &super_ddf;
3514 st->minor_version = 0;
3515 st->max_devs = 512;
3516 }
3517 strcpy(st->container_devnm, fd2devnm(fd));
3518 return 0;
3519 }
3520
3521 static int load_container_ddf(struct supertype *st, int fd,
3522 char *devname)
3523 {
3524 return load_super_ddf_all(st, fd, &st->sb, devname);
3525 }
3526
3527 #endif /* MDASSEMBLE */
3528
3529 static int check_secondary(const struct vcl *vc)
3530 {
3531 const struct vd_config *conf = &vc->conf;
3532 int i;
3533
3534 /* The only DDF secondary RAID level md can support is
3535 * RAID 10, if the stripe sizes and Basic volume sizes
3536 * are all equal.
3537 * Other configurations could in theory be supported by exposing
3538 * the BVDs to user space and using device mapper for the secondary
3539 * mapping. So far we don't support that.
3540 */
3541
3542 __u64 sec_elements[4] = {0, 0, 0, 0};
3543 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3544 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3545
3546 if (vc->other_bvds == NULL) {
3547 pr_err("No BVDs for secondary RAID found\n");
3548 return -1;
3549 }
3550 if (conf->prl != DDF_RAID1) {
3551 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3552 return -1;
3553 }
3554 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3555 pr_err("Secondary RAID level %d is unsupported\n",
3556 conf->srl);
3557 return -1;
3558 }
3559 __set_sec_seen(conf->sec_elmnt_seq);
3560 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3561 const struct vd_config *bvd = vc->other_bvds[i];
3562 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3563 continue;
3564 if (bvd->srl != conf->srl) {
3565 pr_err("Inconsistent secondary RAID level across BVDs\n");
3566 return -1;
3567 }
3568 if (bvd->prl != conf->prl) {
3569 pr_err("Different RAID levels for BVDs are unsupported\n");
3570 return -1;
3571 }
3572 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3573 pr_err("All BVDs must have the same number of primary elements\n");
3574 return -1;
3575 }
3576 if (bvd->chunk_shift != conf->chunk_shift) {
3577 pr_err("Different strip sizes for BVDs are unsupported\n");
3578 return -1;
3579 }
3580 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3581 pr_err("Different BVD sizes are unsupported\n");
3582 return -1;
3583 }
3584 __set_sec_seen(bvd->sec_elmnt_seq);
3585 }
3586 for (i = 0; i < conf->sec_elmnt_count; i++) {
3587 if (!__was_sec_seen(i)) {
3588 pr_err("BVD %d is missing\n", i);
3589 return -1;
3590 }
3591 }
3592 return 0;
3593 }
3594
3595 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3596 be32 refnum, unsigned int nmax,
3597 const struct vd_config **bvd,
3598 unsigned int *idx)
3599 {
3600 unsigned int i, j, n, sec, cnt;
3601
3602 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3603 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3604
3605 for (i = 0, j = 0 ; i < nmax ; i++) {
3606 /* j counts valid entries for this BVD */
3607 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3608 j++;
3609 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3610 *bvd = &vc->conf;
3611 *idx = i;
3612 return sec * cnt + j - 1;
3613 }
3614 }
3615 if (vc->other_bvds == NULL)
3616 goto bad;
3617
3618 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3619 struct vd_config *vd = vc->other_bvds[n-1];
3620 sec = vd->sec_elmnt_seq;
3621 if (sec == DDF_UNUSED_BVD)
3622 continue;
3623 for (i = 0, j = 0 ; i < nmax ; i++) {
3624 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3625 j++;
3626 if (be32_eq(vd->phys_refnum[i], refnum)) {
3627 *bvd = vd;
3628 *idx = i;
3629 return sec * cnt + j - 1;
3630 }
3631 }
3632 }
3633 bad:
3634 *bvd = NULL;
3635 return DDF_NOTFOUND;
3636 }
3637
3638 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3639 {
3640 /* Given a container loaded by load_super_ddf_all,
3641 * extract information about all the arrays into
3642 * an mdinfo tree.
3643 *
3644 * For each vcl in conflist: create an mdinfo, fill it in,
3645 * then look for matching devices (phys_refnum) in dlist
3646 * and create appropriate device mdinfo.
3647 */
3648 struct ddf_super *ddf = st->sb;
3649 struct mdinfo *rest = NULL;
3650 struct vcl *vc;
3651
3652 for (vc = ddf->conflist ; vc ; vc=vc->next)
3653 {
3654 unsigned int i;
3655 unsigned int j;
3656 struct mdinfo *this;
3657 char *ep;
3658 __u32 *cptr;
3659 unsigned int pd;
3660
3661 if (subarray &&
3662 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3663 *ep != '\0'))
3664 continue;
3665
3666 if (vc->conf.sec_elmnt_count > 1) {
3667 if (check_secondary(vc) != 0)
3668 continue;
3669 }
3670
3671 this = xcalloc(1, sizeof(*this));
3672 this->next = rest;
3673 rest = this;
3674
3675 if (layout_ddf2md(&vc->conf, &this->array))
3676 continue;
3677 this->array.md_minor = -1;
3678 this->array.major_version = -1;
3679 this->array.minor_version = -2;
3680 cptr = (__u32 *)(vc->conf.guid + 16);
3681 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3682 this->array.utime = DECADE +
3683 be32_to_cpu(vc->conf.timestamp);
3684 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3685
3686 i = vc->vcnum;
3687 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3688 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3689 DDF_init_full) {
3690 this->array.state = 0;
3691 this->resync_start = 0;
3692 } else {
3693 this->array.state = 1;
3694 this->resync_start = MaxSector;
3695 }
3696 memcpy(this->name, ddf->virt->entries[i].name, 16);
3697 this->name[16]=0;
3698 for(j=0; j<16; j++)
3699 if (this->name[j] == ' ')
3700 this->name[j] = 0;
3701
3702 memset(this->uuid, 0, sizeof(this->uuid));
3703 this->component_size = be64_to_cpu(vc->conf.blocks);
3704 this->array.size = this->component_size / 2;
3705 this->container_member = i;
3706
3707 ddf->currentconf = vc;
3708 uuid_from_super_ddf(st, this->uuid);
3709 if (!subarray)
3710 ddf->currentconf = NULL;
3711
3712 sprintf(this->text_version, "/%s/%d",
3713 st->container_devnm, this->container_member);
3714
3715 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3716 struct mdinfo *dev;
3717 struct dl *d;
3718 const struct vd_config *bvd;
3719 unsigned int iphys;
3720 int stt;
3721
3722 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3723 == 0xFFFFFFFF)
3724 continue;
3725
3726 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3727 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3728 != DDF_Online)
3729 continue;
3730
3731 i = get_pd_index_from_refnum(
3732 vc, ddf->phys->entries[pd].refnum,
3733 ddf->mppe, &bvd, &iphys);
3734 if (i == DDF_NOTFOUND)
3735 continue;
3736
3737 this->array.working_disks++;
3738
3739 for (d = ddf->dlist; d ; d=d->next)
3740 if (be32_eq(d->disk.refnum,
3741 ddf->phys->entries[pd].refnum))
3742 break;
3743 if (d == NULL)
3744 /* Haven't found that one yet, maybe there are others */
3745 continue;
3746
3747 dev = xcalloc(1, sizeof(*dev));
3748 dev->next = this->devs;
3749 this->devs = dev;
3750
3751 dev->disk.number = be32_to_cpu(d->disk.refnum);
3752 dev->disk.major = d->major;
3753 dev->disk.minor = d->minor;
3754 dev->disk.raid_disk = i;
3755 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3756 dev->recovery_start = MaxSector;
3757
3758 dev->events = be32_to_cpu(ddf->primary.seq);
3759 dev->data_offset =
3760 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3761 dev->component_size = be64_to_cpu(bvd->blocks);
3762 if (d->devname)
3763 strcpy(dev->name, d->devname);
3764 }
3765 }
3766 return rest;
3767 }
3768
3769 static int store_super_ddf(struct supertype *st, int fd)
3770 {
3771 struct ddf_super *ddf = st->sb;
3772 unsigned long long dsize;
3773 void *buf;
3774 int rc;
3775
3776 if (!ddf)
3777 return 1;
3778
3779 if (!get_dev_size(fd, NULL, &dsize))
3780 return 1;
3781
3782 if (ddf->dlist || ddf->conflist) {
3783 struct stat sta;
3784 struct dl *dl;
3785 int ofd, ret;
3786
3787 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3788 pr_err("%s: file descriptor for invalid device\n",
3789 __func__);
3790 return 1;
3791 }
3792 for (dl = ddf->dlist; dl; dl = dl->next)
3793 if (dl->major == (int)major(sta.st_rdev) &&
3794 dl->minor == (int)minor(sta.st_rdev))
3795 break;
3796 if (!dl) {
3797 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3798 (int)major(sta.st_rdev),
3799 (int)minor(sta.st_rdev));
3800 return 1;
3801 }
3802 ofd = dl->fd;
3803 dl->fd = fd;
3804 ret = (_write_super_to_disk(ddf, dl, 0) != 1);
3805 dl->fd = ofd;
3806 return ret;
3807 }
3808
3809 if (posix_memalign(&buf, 512, 512) != 0)
3810 return 1;
3811 memset(buf, 0, 512);
3812
3813 lseek64(fd, dsize-512, 0);
3814 rc = write(fd, buf, 512);
3815 free(buf);
3816 if (rc < 0)
3817 return 1;
3818 return 0;
3819 }
3820
3821 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3822 {
3823 /*
3824 * return:
3825 * 0 same, or first was empty, and second was copied
3826 * 1 second had wrong number
3827 * 2 wrong uuid
3828 * 3 wrong other info
3829 */
3830 struct ddf_super *first = st->sb;
3831 struct ddf_super *second = tst->sb;
3832 struct dl *dl1, *dl2;
3833 struct vcl *vl1, *vl2;
3834 unsigned int max_vds, max_pds, pd, vd;
3835
3836 if (!first) {
3837 st->sb = tst->sb;
3838 tst->sb = NULL;
3839 return 0;
3840 }
3841
3842 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3843 return 2;
3844
3845 if (!be32_eq(first->anchor.seq, second->anchor.seq)) {
3846 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3847 be32_to_cpu(first->anchor.seq),
3848 be32_to_cpu(second->anchor.seq));
3849 return 3;
3850 }
3851 if (first->max_part != second->max_part ||
3852 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3853 !be16_eq(first->virt->populated_vdes,
3854 second->virt->populated_vdes)) {
3855 dprintf("%s: PD/VD number mismatch\n", __func__);
3856 return 3;
3857 }
3858
3859 max_pds = be16_to_cpu(first->phys->used_pdes);
3860 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3861 for (pd = 0; pd < max_pds; pd++)
3862 if (be32_eq(first->phys->entries[pd].refnum,
3863 dl2->disk.refnum))
3864 break;
3865 if (pd == max_pds) {
3866 dprintf("%s: no match for disk %08x\n", __func__,
3867 be32_to_cpu(dl2->disk.refnum));
3868 return 3;
3869 }
3870 }
3871
3872 max_vds = be16_to_cpu(first->active->max_vd_entries);
3873 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3874 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3875 continue;
3876 for (vd = 0; vd < max_vds; vd++)
3877 if (!memcmp(first->virt->entries[vd].guid,
3878 vl2->conf.guid, DDF_GUID_LEN))
3879 break;
3880 if (vd == max_vds) {
3881 dprintf("%s: no match for VD config\n", __func__);
3882 return 3;
3883 }
3884 }
3885 /* FIXME should I look at anything else? */
3886
3887 /*
3888 At this point we are fairly sure that the meta data matches.
3889 But the new disk may contain additional local data.
3890 Add it to the super block.
3891 */
3892 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3893 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3894 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3895 DDF_GUID_LEN))
3896 break;
3897 if (vl1) {
3898 if (vl1->other_bvds != NULL &&
3899 vl1->conf.sec_elmnt_seq !=
3900 vl2->conf.sec_elmnt_seq) {
3901 dprintf("%s: adding BVD %u\n", __func__,
3902 vl2->conf.sec_elmnt_seq);
3903 add_other_bvd(vl1, &vl2->conf,
3904 first->conf_rec_len*512);
3905 }
3906 continue;
3907 }
3908
3909 if (posix_memalign((void **)&vl1, 512,
3910 (first->conf_rec_len*512 +
3911 offsetof(struct vcl, conf))) != 0) {
3912 pr_err("%s could not allocate vcl buf\n",
3913 __func__);
3914 return 3;
3915 }
3916
3917 vl1->next = first->conflist;
3918 vl1->block_sizes = NULL;
3919 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3920 if (alloc_other_bvds(first, vl1) != 0) {
3921 pr_err("%s could not allocate other bvds\n",
3922 __func__);
3923 free(vl1);
3924 return 3;
3925 }
3926 for (vd = 0; vd < max_vds; vd++)
3927 if (!memcmp(first->virt->entries[vd].guid,
3928 vl1->conf.guid, DDF_GUID_LEN))
3929 break;
3930 vl1->vcnum = vd;
3931 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3932 first->conflist = vl1;
3933 }
3934
3935 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3936 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3937 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3938 break;
3939 if (dl1)
3940 continue;
3941
3942 if (posix_memalign((void **)&dl1, 512,
3943 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3944 != 0) {
3945 pr_err("%s could not allocate disk info buffer\n",
3946 __func__);
3947 return 3;
3948 }
3949 memcpy(dl1, dl2, sizeof(*dl1));
3950 dl1->mdupdate = NULL;
3951 dl1->next = first->dlist;
3952 dl1->fd = -1;
3953 for (pd = 0; pd < max_pds; pd++)
3954 if (be32_eq(first->phys->entries[pd].refnum,
3955 dl1->disk.refnum))
3956 break;
3957 dl1->pdnum = pd;
3958 if (dl2->spare) {
3959 if (posix_memalign((void **)&dl1->spare, 512,
3960 first->conf_rec_len*512) != 0) {
3961 pr_err("%s could not allocate spare info buf\n",
3962 __func__);
3963 return 3;
3964 }
3965 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3966 }
3967 for (vd = 0 ; vd < first->max_part ; vd++) {
3968 if (!dl2->vlist[vd]) {
3969 dl1->vlist[vd] = NULL;
3970 continue;
3971 }
3972 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3973 if (!memcmp(vl1->conf.guid,
3974 dl2->vlist[vd]->conf.guid,
3975 DDF_GUID_LEN))
3976 break;
3977 dl1->vlist[vd] = vl1;
3978 }
3979 }
3980 first->dlist = dl1;
3981 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3982 be32_to_cpu(dl1->disk.refnum));
3983 }
3984
3985 return 0;
3986 }
3987
3988 #ifndef MDASSEMBLE
3989 /*
3990 * A new array 'a' has been started which claims to be instance 'inst'
3991 * within container 'c'.
3992 * We need to confirm that the array matches the metadata in 'c' so
3993 * that we don't corrupt any metadata.
3994 */
3995 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3996 {
3997 struct ddf_super *ddf = c->sb;
3998 int n = atoi(inst);
3999 if (all_ff(ddf->virt->entries[n].guid)) {
4000 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4001 return -ENODEV;
4002 }
4003 dprintf("ddf: open_new %d\n", n);
4004 a->info.container_member = n;
4005 return 0;
4006 }
4007
4008 /*
4009 * The array 'a' is to be marked clean in the metadata.
4010 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4011 * clean up to the point (in sectors). If that cannot be recorded in the
4012 * metadata, then leave it as dirty.
4013 *
4014 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4015 * !global! virtual_disk.virtual_entry structure.
4016 */
4017 static int ddf_set_array_state(struct active_array *a, int consistent)
4018 {
4019 struct ddf_super *ddf = a->container->sb;
4020 int inst = a->info.container_member;
4021 int old = ddf->virt->entries[inst].state;
4022 if (consistent == 2) {
4023 /* Should check if a recovery should be started FIXME */
4024 consistent = 1;
4025 if (!is_resync_complete(&a->info))
4026 consistent = 0;
4027 }
4028 if (consistent)
4029 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4030 else
4031 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4032 if (old != ddf->virt->entries[inst].state)
4033 ddf_set_updates_pending(ddf);
4034
4035 old = ddf->virt->entries[inst].init_state;
4036 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4037 if (is_resync_complete(&a->info))
4038 ddf->virt->entries[inst].init_state |= DDF_init_full;
4039 else if (a->info.resync_start == 0)
4040 ddf->virt->entries[inst].init_state |= DDF_init_not;
4041 else
4042 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4043 if (old != ddf->virt->entries[inst].init_state)
4044 ddf_set_updates_pending(ddf);
4045
4046 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4047 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4048 consistent?"clean":"dirty",
4049 a->info.resync_start);
4050 return consistent;
4051 }
4052
4053 static int get_bvd_state(const struct ddf_super *ddf,
4054 const struct vd_config *vc)
4055 {
4056 unsigned int i, n_bvd, working = 0;
4057 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4058 int pd, st, state;
4059 for (i = 0; i < n_prim; i++) {
4060 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4061 continue;
4062 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4063 if (pd < 0)
4064 continue;
4065 st = be16_to_cpu(ddf->phys->entries[pd].state);
4066 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4067 == DDF_Online)
4068 working++;
4069 }
4070
4071 state = DDF_state_degraded;
4072 if (working == n_prim)
4073 state = DDF_state_optimal;
4074 else
4075 switch (vc->prl) {
4076 case DDF_RAID0:
4077 case DDF_CONCAT:
4078 case DDF_JBOD:
4079 state = DDF_state_failed;
4080 break;
4081 case DDF_RAID1:
4082 if (working == 0)
4083 state = DDF_state_failed;
4084 else if (working >= 2)
4085 state = DDF_state_part_optimal;
4086 break;
4087 case DDF_RAID4:
4088 case DDF_RAID5:
4089 if (working < n_prim - 1)
4090 state = DDF_state_failed;
4091 break;
4092 case DDF_RAID6:
4093 if (working < n_prim - 2)
4094 state = DDF_state_failed;
4095 else if (working == n_prim - 1)
4096 state = DDF_state_part_optimal;
4097 break;
4098 }
4099 return state;
4100 }
4101
4102 static int secondary_state(int state, int other, int seclevel)
4103 {
4104 if (state == DDF_state_optimal && other == DDF_state_optimal)
4105 return DDF_state_optimal;
4106 if (seclevel == DDF_2MIRRORED) {
4107 if (state == DDF_state_optimal || other == DDF_state_optimal)
4108 return DDF_state_part_optimal;
4109 if (state == DDF_state_failed && other == DDF_state_failed)
4110 return DDF_state_failed;
4111 return DDF_state_degraded;
4112 } else {
4113 if (state == DDF_state_failed || other == DDF_state_failed)
4114 return DDF_state_failed;
4115 if (state == DDF_state_degraded || other == DDF_state_degraded)
4116 return DDF_state_degraded;
4117 return DDF_state_part_optimal;
4118 }
4119 }
4120
4121 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4122 {
4123 int state = get_bvd_state(ddf, &vcl->conf);
4124 unsigned int i;
4125 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4126 state = secondary_state(
4127 state,
4128 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4129 vcl->conf.srl);
4130 }
4131 return state;
4132 }
4133
4134 /*
4135 * The state of each disk is stored in the global phys_disk structure
4136 * in phys_disk.entries[n].state.
4137 * This makes various combinations awkward.
4138 * - When a device fails in any array, it must be failed in all arrays
4139 * that include a part of this device.
4140 * - When a component is rebuilding, we cannot include it officially in the
4141 * array unless this is the only array that uses the device.
4142 *
4143 * So: when transitioning:
4144 * Online -> failed, just set failed flag. monitor will propagate
4145 * spare -> online, the device might need to be added to the array.
4146 * spare -> failed, just set failed. Don't worry if in array or not.
4147 */
4148 static void ddf_set_disk(struct active_array *a, int n, int state)
4149 {
4150 struct ddf_super *ddf = a->container->sb;
4151 unsigned int inst = a->info.container_member, n_bvd;
4152 struct vcl *vcl;
4153 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4154 &n_bvd, &vcl);
4155 int pd;
4156 struct mdinfo *mdi;
4157 struct dl *dl;
4158
4159 dprintf("%s: %d to %x\n", __func__, n, state);
4160 if (vc == NULL) {
4161 dprintf("ddf: cannot find instance %d!!\n", inst);
4162 return;
4163 }
4164 /* Find the matching slot in 'info'. */
4165 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4166 if (mdi->disk.raid_disk == n)
4167 break;
4168 if (!mdi) {
4169 pr_err("%s: cannot find raid disk %d\n",
4170 __func__, n);
4171 return;
4172 }
4173
4174 /* and find the 'dl' entry corresponding to that. */
4175 for (dl = ddf->dlist; dl; dl = dl->next)
4176 if (mdi->state_fd >= 0 &&
4177 mdi->disk.major == dl->major &&
4178 mdi->disk.minor == dl->minor)
4179 break;
4180 if (!dl) {
4181 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4182 __func__, n,
4183 mdi->disk.major, mdi->disk.minor);
4184 return;
4185 }
4186
4187 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4188 if (pd < 0 || pd != dl->pdnum) {
4189 /* disk doesn't currently exist or has changed.
4190 * If it is now in_sync, insert it. */
4191 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4192 __func__, dl->pdnum, dl->major, dl->minor,
4193 be32_to_cpu(dl->disk.refnum));
4194 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4195 __func__, inst, n_bvd,
4196 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4197 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4198 pd = dl->pdnum; /* FIXME: is this really correct ? */
4199 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4200 LBA_OFFSET(ddf, vc)[n_bvd] =
4201 cpu_to_be64(mdi->data_offset);
4202 be16_clear(ddf->phys->entries[pd].type,
4203 cpu_to_be16(DDF_Global_Spare));
4204 be16_set(ddf->phys->entries[pd].type,
4205 cpu_to_be16(DDF_Active_in_VD));
4206 ddf_set_updates_pending(ddf);
4207 }
4208 } else {
4209 be16 old = ddf->phys->entries[pd].state;
4210 if (state & DS_FAULTY)
4211 be16_set(ddf->phys->entries[pd].state,
4212 cpu_to_be16(DDF_Failed));
4213 if (state & DS_INSYNC) {
4214 be16_set(ddf->phys->entries[pd].state,
4215 cpu_to_be16(DDF_Online));
4216 be16_clear(ddf->phys->entries[pd].state,
4217 cpu_to_be16(DDF_Rebuilding));
4218 }
4219 if (!be16_eq(old, ddf->phys->entries[pd].state))
4220 ddf_set_updates_pending(ddf);
4221 }
4222
4223 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4224 be32_to_cpu(dl->disk.refnum), state,
4225 be16_to_cpu(ddf->phys->entries[pd].state));
4226
4227 /* Now we need to check the state of the array and update
4228 * virtual_disk.entries[n].state.
4229 * It needs to be one of "optimal", "degraded", "failed".
4230 * I don't understand 'deleted' or 'missing'.
4231 */
4232 state = get_svd_state(ddf, vcl);
4233
4234 if (ddf->virt->entries[inst].state !=
4235 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4236 | state)) {
4237
4238 ddf->virt->entries[inst].state =
4239 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4240 | state;
4241 ddf_set_updates_pending(ddf);
4242 }
4243
4244 }
4245
4246 static void ddf_sync_metadata(struct supertype *st)
4247 {
4248
4249 /*
4250 * Write all data to all devices.
4251 * Later, we might be able to track whether only local changes
4252 * have been made, or whether any global data has been changed,
4253 * but ddf is sufficiently weird that it probably always
4254 * changes global data ....
4255 */
4256 struct ddf_super *ddf = st->sb;
4257 if (!ddf->updates_pending)
4258 return;
4259 ddf->updates_pending = 0;
4260 __write_init_super_ddf(st, 1);
4261 dprintf("ddf: sync_metadata\n");
4262 }
4263
4264 static int del_from_conflist(struct vcl **list, const char *guid)
4265 {
4266 struct vcl **p;
4267 int found = 0;
4268 for (p = list; p && *p; p = &((*p)->next))
4269 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4270 found = 1;
4271 *p = (*p)->next;
4272 }
4273 return found;
4274 }
4275
4276 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4277 {
4278 struct dl *dl;
4279 unsigned int vdnum, i;
4280 vdnum = find_vde_by_guid(ddf, guid);
4281 if (vdnum == DDF_NOTFOUND) {
4282 pr_err("%s: could not find VD %s\n", __func__,
4283 guid_str(guid));
4284 return -1;
4285 }
4286 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4287 pr_err("%s: could not find conf %s\n", __func__,
4288 guid_str(guid));
4289 return -1;
4290 }
4291 for (dl = ddf->dlist; dl; dl = dl->next)
4292 for (i = 0; i < ddf->max_part; i++)
4293 if (dl->vlist[i] != NULL &&
4294 !memcmp(dl->vlist[i]->conf.guid, guid,
4295 DDF_GUID_LEN))
4296 dl->vlist[i] = NULL;
4297 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4298 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4299 return 0;
4300 }
4301
4302 static int kill_subarray_ddf(struct supertype *st)
4303 {
4304 struct ddf_super *ddf = st->sb;
4305 /*
4306 * currentconf is set in container_content_ddf,
4307 * called with subarray arg
4308 */
4309 struct vcl *victim = ddf->currentconf;
4310 struct vd_config *conf;
4311 ddf->currentconf = NULL;
4312 unsigned int vdnum;
4313 if (!victim) {
4314 pr_err("%s: nothing to kill\n", __func__);
4315 return -1;
4316 }
4317 conf = &victim->conf;
4318 vdnum = find_vde_by_guid(ddf, conf->guid);
4319 if (vdnum == DDF_NOTFOUND) {
4320 pr_err("%s: could not find VD %s\n", __func__,
4321 guid_str(conf->guid));
4322 return -1;
4323 }
4324 if (st->update_tail) {
4325 struct virtual_disk *vd;
4326 int len = sizeof(struct virtual_disk)
4327 + sizeof(struct virtual_entry);
4328 vd = xmalloc(len);
4329 if (vd == NULL) {
4330 pr_err("%s: failed to allocate %d bytes\n", __func__,
4331 len);
4332 return -1;
4333 }
4334 memset(vd, 0 , len);
4335 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4336 vd->populated_vdes = cpu_to_be16(0);
4337 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4338 /* we use DDF_state_deleted as marker */
4339 vd->entries[0].state = DDF_state_deleted;
4340 append_metadata_update(st, vd, len);
4341 } else {
4342 _kill_subarray_ddf(ddf, conf->guid);
4343 ddf_set_updates_pending(ddf);
4344 ddf_sync_metadata(st);
4345 }
4346 return 0;
4347 }
4348
4349 static void copy_matching_bvd(struct ddf_super *ddf,
4350 struct vd_config *conf,
4351 const struct metadata_update *update)
4352 {
4353 unsigned int mppe =
4354 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4355 unsigned int len = ddf->conf_rec_len * 512;
4356 char *p;
4357 struct vd_config *vc;
4358 for (p = update->buf; p < update->buf + update->len; p += len) {
4359 vc = (struct vd_config *) p;
4360 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4361 memcpy(conf->phys_refnum, vc->phys_refnum,
4362 mppe * (sizeof(__u32) + sizeof(__u64)));
4363 return;
4364 }
4365 }
4366 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4367 conf->sec_elmnt_seq, guid_str(conf->guid));
4368 }
4369
4370 static void ddf_process_update(struct supertype *st,
4371 struct metadata_update *update)
4372 {
4373 /* Apply this update to the metadata.
4374 * The first 4 bytes are a DDF_*_MAGIC which guides
4375 * our actions.
4376 * Possible update are:
4377 * DDF_PHYS_RECORDS_MAGIC
4378 * Add a new physical device or remove an old one.
4379 * Changes to this record only happen implicitly.
4380 * used_pdes is the device number.
4381 * DDF_VIRT_RECORDS_MAGIC
4382 * Add a new VD. Possibly also change the 'access' bits.
4383 * populated_vdes is the entry number.
4384 * DDF_VD_CONF_MAGIC
4385 * New or updated VD. the VIRT_RECORD must already
4386 * exist. For an update, phys_refnum and lba_offset
4387 * (at least) are updated, and the VD_CONF must
4388 * be written to precisely those devices listed with
4389 * a phys_refnum.
4390 * DDF_SPARE_ASSIGN_MAGIC
4391 * replacement Spare Assignment Record... but for which device?
4392 *
4393 * So, e.g.:
4394 * - to create a new array, we send a VIRT_RECORD and
4395 * a VD_CONF. Then assemble and start the array.
4396 * - to activate a spare we send a VD_CONF to add the phys_refnum
4397 * and offset. This will also mark the spare as active with
4398 * a spare-assignment record.
4399 */
4400 struct ddf_super *ddf = st->sb;
4401 be32 *magic = (be32 *)update->buf;
4402 struct phys_disk *pd;
4403 struct virtual_disk *vd;
4404 struct vd_config *vc;
4405 struct vcl *vcl;
4406 struct dl *dl;
4407 unsigned int ent;
4408 unsigned int pdnum, pd2, len;
4409
4410 dprintf("Process update %x\n", be32_to_cpu(*magic));
4411
4412 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4413
4414 if (update->len != (sizeof(struct phys_disk) +
4415 sizeof(struct phys_disk_entry)))
4416 return;
4417 pd = (struct phys_disk*)update->buf;
4418
4419 ent = be16_to_cpu(pd->used_pdes);
4420 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4421 return;
4422 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4423 struct dl **dlp;
4424 /* removing this disk. */
4425 be16_set(ddf->phys->entries[ent].state,
4426 cpu_to_be16(DDF_Missing));
4427 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4428 struct dl *dl = *dlp;
4429 if (dl->pdnum == (signed)ent) {
4430 close(dl->fd);
4431 dl->fd = -1;
4432 /* FIXME this doesn't free
4433 * dl->devname */
4434 update->space = dl;
4435 *dlp = dl->next;
4436 break;
4437 }
4438 }
4439 ddf_set_updates_pending(ddf);
4440 return;
4441 }
4442 if (!all_ff(ddf->phys->entries[ent].guid))
4443 return;
4444 ddf->phys->entries[ent] = pd->entries[0];
4445 ddf->phys->used_pdes = cpu_to_be16
4446 (1 + be16_to_cpu(ddf->phys->used_pdes));
4447 ddf_set_updates_pending(ddf);
4448 if (ddf->add_list) {
4449 struct active_array *a;
4450 struct dl *al = ddf->add_list;
4451 ddf->add_list = al->next;
4452
4453 al->next = ddf->dlist;
4454 ddf->dlist = al;
4455
4456 /* As a device has been added, we should check
4457 * for any degraded devices that might make
4458 * use of this spare */
4459 for (a = st->arrays ; a; a=a->next)
4460 a->check_degraded = 1;
4461 }
4462 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4463
4464 if (update->len != (sizeof(struct virtual_disk) +
4465 sizeof(struct virtual_entry)))
4466 return;
4467 vd = (struct virtual_disk*)update->buf;
4468
4469 if (vd->entries[0].state == DDF_state_deleted) {
4470 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4471 return;
4472 } else {
4473
4474 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4475 if (ent != DDF_NOTFOUND) {
4476 dprintf("%s: VD %s exists already in slot %d\n",
4477 __func__, guid_str(vd->entries[0].guid),
4478 ent);
4479 return;
4480 }
4481 ent = find_unused_vde(ddf);
4482 if (ent == DDF_NOTFOUND)
4483 return;
4484 ddf->virt->entries[ent] = vd->entries[0];
4485 ddf->virt->populated_vdes =
4486 cpu_to_be16(
4487 1 + be16_to_cpu(
4488 ddf->virt->populated_vdes));
4489 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4490 __func__, guid_str(vd->entries[0].guid), ent,
4491 ddf->virt->entries[ent].state,
4492 ddf->virt->entries[ent].init_state);
4493 }
4494 ddf_set_updates_pending(ddf);
4495 }
4496
4497 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4498 vc = (struct vd_config*)update->buf;
4499 len = ddf->conf_rec_len * 512;
4500 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4501 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4502 __func__, guid_str(vc->guid), update->len,
4503 vc->sec_elmnt_count);
4504 return;
4505 }
4506 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4507 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4508 break;
4509 dprintf("%s: conf update for %s (%s)\n", __func__,
4510 guid_str(vc->guid), (vcl ? "old" : "new"));
4511 if (vcl) {
4512 /* An update, just copy the phys_refnum and lba_offset
4513 * fields
4514 */
4515 unsigned int i;
4516 unsigned int k;
4517 copy_matching_bvd(ddf, &vcl->conf, update);
4518 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4519 dprintf("BVD %u has %08x at %llu\n", 0,
4520 be32_to_cpu(vcl->conf.phys_refnum[k]),
4521 be64_to_cpu(LBA_OFFSET(ddf,
4522 &vcl->conf)[k]));
4523 for (i = 1; i < vc->sec_elmnt_count; i++) {
4524 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4525 update);
4526 for (k = 0; k < be16_to_cpu(
4527 vc->prim_elmnt_count); k++)
4528 dprintf("BVD %u has %08x at %llu\n", i,
4529 be32_to_cpu
4530 (vcl->other_bvds[i-1]->
4531 phys_refnum[k]),
4532 be64_to_cpu
4533 (LBA_OFFSET
4534 (ddf,
4535 vcl->other_bvds[i-1])[k]));
4536 }
4537 } else {
4538 /* A new VD_CONF */
4539 unsigned int i;
4540 if (!update->space)
4541 return;
4542 vcl = update->space;
4543 update->space = NULL;
4544 vcl->next = ddf->conflist;
4545 memcpy(&vcl->conf, vc, len);
4546 ent = find_vde_by_guid(ddf, vc->guid);
4547 if (ent == DDF_NOTFOUND)
4548 return;
4549 vcl->vcnum = ent;
4550 ddf->conflist = vcl;
4551 for (i = 1; i < vc->sec_elmnt_count; i++)
4552 memcpy(vcl->other_bvds[i-1],
4553 update->buf + len * i, len);
4554 }
4555 /* Set DDF_Transition on all Failed devices - to help
4556 * us detect those that are no longer in use
4557 */
4558 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4559 pdnum++)
4560 if (be16_and(ddf->phys->entries[pdnum].state,
4561 cpu_to_be16(DDF_Failed)))
4562 be16_set(ddf->phys->entries[pdnum].state,
4563 cpu_to_be16(DDF_Transition));
4564 /* Now make sure vlist is correct for each dl. */
4565 for (dl = ddf->dlist; dl; dl = dl->next) {
4566 unsigned int vn = 0;
4567 int in_degraded = 0;
4568 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4569 unsigned int dn, ibvd;
4570 const struct vd_config *conf;
4571 int vstate;
4572 dn = get_pd_index_from_refnum(vcl,
4573 dl->disk.refnum,
4574 ddf->mppe,
4575 &conf, &ibvd);
4576 if (dn == DDF_NOTFOUND)
4577 continue;
4578 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4579 dl->pdnum,
4580 be32_to_cpu(dl->disk.refnum),
4581 guid_str(conf->guid),
4582 conf->sec_elmnt_seq, vn);
4583 /* Clear the Transition flag */
4584 if (be16_and
4585 (ddf->phys->entries[dl->pdnum].state,
4586 cpu_to_be16(DDF_Failed)))
4587 be16_clear(ddf->phys
4588 ->entries[dl->pdnum].state,
4589 cpu_to_be16(DDF_Transition));
4590 dl->vlist[vn++] = vcl;
4591 vstate = ddf->virt->entries[vcl->vcnum].state
4592 & DDF_state_mask;
4593 if (vstate == DDF_state_degraded ||
4594 vstate == DDF_state_part_optimal)
4595 in_degraded = 1;
4596 }
4597 while (vn < ddf->max_part)
4598 dl->vlist[vn++] = NULL;
4599 if (dl->vlist[0]) {
4600 be16_clear(ddf->phys->entries[dl->pdnum].type,
4601 cpu_to_be16(DDF_Global_Spare));
4602 if (!be16_and(ddf->phys
4603 ->entries[dl->pdnum].type,
4604 cpu_to_be16(DDF_Active_in_VD))) {
4605 be16_set(ddf->phys
4606 ->entries[dl->pdnum].type,
4607 cpu_to_be16(DDF_Active_in_VD));
4608 if (in_degraded)
4609 be16_set(ddf->phys
4610 ->entries[dl->pdnum]
4611 .state,
4612 cpu_to_be16
4613 (DDF_Rebuilding));
4614 }
4615 }
4616 if (dl->spare) {
4617 be16_clear(ddf->phys->entries[dl->pdnum].type,
4618 cpu_to_be16(DDF_Global_Spare));
4619 be16_set(ddf->phys->entries[dl->pdnum].type,
4620 cpu_to_be16(DDF_Spare));
4621 }
4622 if (!dl->vlist[0] && !dl->spare) {
4623 be16_set(ddf->phys->entries[dl->pdnum].type,
4624 cpu_to_be16(DDF_Global_Spare));
4625 be16_clear(ddf->phys->entries[dl->pdnum].type,
4626 cpu_to_be16(DDF_Spare));
4627 be16_clear(ddf->phys->entries[dl->pdnum].type,
4628 cpu_to_be16(DDF_Active_in_VD));
4629 }
4630 }
4631
4632 /* Now remove any 'Failed' devices that are not part
4633 * of any VD. They will have the Transition flag set.
4634 * Once done, we need to update all dl->pdnum numbers.
4635 */
4636 pd2 = 0;
4637 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4638 pdnum++) {
4639 if (be16_and(ddf->phys->entries[pdnum].state,
4640 cpu_to_be16(DDF_Failed))
4641 && be16_and(ddf->phys->entries[pdnum].state,
4642 cpu_to_be16(DDF_Transition))) {
4643 /* skip this one unless in dlist*/
4644 for (dl = ddf->dlist; dl; dl = dl->next)
4645 if (dl->pdnum == (int)pdnum)
4646 break;
4647 if (!dl)
4648 continue;
4649 }
4650 if (pdnum == pd2)
4651 pd2++;
4652 else {
4653 ddf->phys->entries[pd2] =
4654 ddf->phys->entries[pdnum];
4655 for (dl = ddf->dlist; dl; dl = dl->next)
4656 if (dl->pdnum == (int)pdnum)
4657 dl->pdnum = pd2;
4658 pd2++;
4659 }
4660 }
4661 ddf->phys->used_pdes = cpu_to_be16(pd2);
4662 while (pd2 < pdnum) {
4663 memset(ddf->phys->entries[pd2].guid, 0xff,
4664 DDF_GUID_LEN);
4665 pd2++;
4666 }
4667
4668 ddf_set_updates_pending(ddf);
4669 }
4670 /* case DDF_SPARE_ASSIGN_MAGIC */
4671 }
4672
4673 static void ddf_prepare_update(struct supertype *st,
4674 struct metadata_update *update)
4675 {
4676 /* This update arrived at managemon.
4677 * We are about to pass it to monitor.
4678 * If a malloc is needed, do it here.
4679 */
4680 struct ddf_super *ddf = st->sb;
4681 be32 *magic = (be32 *)update->buf;
4682 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4683 struct vcl *vcl;
4684 struct vd_config *conf = (struct vd_config *) update->buf;
4685 if (posix_memalign(&update->space, 512,
4686 offsetof(struct vcl, conf)
4687 + ddf->conf_rec_len * 512) != 0) {
4688 update->space = NULL;
4689 return;
4690 }
4691 vcl = update->space;
4692 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4693 if (alloc_other_bvds(ddf, vcl) != 0) {
4694 free(update->space);
4695 update->space = NULL;
4696 }
4697 }
4698 }
4699
4700 /*
4701 * Check degraded state of a RAID10.
4702 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4703 */
4704 static int raid10_degraded(struct mdinfo *info)
4705 {
4706 int n_prim, n_bvds;
4707 int i;
4708 struct mdinfo *d;
4709 char *found;
4710 int ret = -1;
4711
4712 n_prim = info->array.layout & ~0x100;
4713 n_bvds = info->array.raid_disks / n_prim;
4714 found = xmalloc(n_bvds);
4715 if (found == NULL)
4716 return ret;
4717 memset(found, 0, n_bvds);
4718 for (d = info->devs; d; d = d->next) {
4719 i = d->disk.raid_disk / n_prim;
4720 if (i >= n_bvds) {
4721 pr_err("%s: BUG: invalid raid disk\n", __func__);
4722 goto out;
4723 }
4724 if (d->state_fd > 0)
4725 found[i]++;
4726 }
4727 ret = 2;
4728 for (i = 0; i < n_bvds; i++)
4729 if (!found[i]) {
4730 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4731 ret = 0;
4732 goto out;
4733 } else if (found[i] < n_prim) {
4734 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4735 n_bvds);
4736 ret = 1;
4737 }
4738 out:
4739 free(found);
4740 return ret;
4741 }
4742
4743 /*
4744 * Check if the array 'a' is degraded but not failed.
4745 * If it is, find as many spares as are available and needed and
4746 * arrange for their inclusion.
4747 * We only choose devices which are not already in the array,
4748 * and prefer those with a spare-assignment to this array.
4749 * otherwise we choose global spares - assuming always that
4750 * there is enough room.
4751 * For each spare that we assign, we return an 'mdinfo' which
4752 * describes the position for the device in the array.
4753 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4754 * the new phys_refnum and lba_offset values.
4755 *
4756 * Only worry about BVDs at the moment.
4757 */
4758 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4759 struct metadata_update **updates)
4760 {
4761 int working = 0;
4762 struct mdinfo *d;
4763 struct ddf_super *ddf = a->container->sb;
4764 int global_ok = 0;
4765 struct mdinfo *rv = NULL;
4766 struct mdinfo *di;
4767 struct metadata_update *mu;
4768 struct dl *dl;
4769 int i;
4770 unsigned int j;
4771 struct vcl *vcl;
4772 struct vd_config *vc;
4773 unsigned int n_bvd;
4774
4775 for (d = a->info.devs ; d ; d = d->next) {
4776 if ((d->curr_state & DS_FAULTY) &&
4777 d->state_fd >= 0)
4778 /* wait for Removal to happen */
4779 return NULL;
4780 if (d->state_fd >= 0)
4781 working ++;
4782 }
4783
4784 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4785 a->info.array.raid_disks,
4786 a->info.array.level);
4787 if (working == a->info.array.raid_disks)
4788 return NULL; /* array not degraded */
4789 switch (a->info.array.level) {
4790 case 1:
4791 if (working == 0)
4792 return NULL; /* failed */
4793 break;
4794 case 4:
4795 case 5:
4796 if (working < a->info.array.raid_disks - 1)
4797 return NULL; /* failed */
4798 break;
4799 case 6:
4800 if (working < a->info.array.raid_disks - 2)
4801 return NULL; /* failed */
4802 break;
4803 case 10:
4804 if (raid10_degraded(&a->info) < 1)
4805 return NULL;
4806 break;
4807 default: /* concat or stripe */
4808 return NULL; /* failed */
4809 }
4810
4811 /* For each slot, if it is not working, find a spare */
4812 dl = ddf->dlist;
4813 for (i = 0; i < a->info.array.raid_disks; i++) {
4814 for (d = a->info.devs ; d ; d = d->next)
4815 if (d->disk.raid_disk == i)
4816 break;
4817 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4818 if (d && (d->state_fd >= 0))
4819 continue;
4820
4821 /* OK, this device needs recovery. Find a spare */
4822 again:
4823 for ( ; dl ; dl = dl->next) {
4824 unsigned long long esize;
4825 unsigned long long pos;
4826 struct mdinfo *d2;
4827 int is_global = 0;
4828 int is_dedicated = 0;
4829 struct extent *ex;
4830 unsigned int j;
4831 be16 state = ddf->phys->entries[dl->pdnum].state;
4832 if (be16_and(state,
4833 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4834 !be16_and(state,
4835 cpu_to_be16(DDF_Online)))
4836 continue;
4837
4838 /* If in this array, skip */
4839 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4840 if (d2->state_fd >= 0 &&
4841 d2->disk.major == dl->major &&
4842 d2->disk.minor == dl->minor) {
4843 dprintf("%x:%x (%08x) already in array\n",
4844 dl->major, dl->minor,
4845 be32_to_cpu(dl->disk.refnum));
4846 break;
4847 }
4848 if (d2)
4849 continue;
4850 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4851 cpu_to_be16(DDF_Spare))) {
4852 /* Check spare assign record */
4853 if (dl->spare) {
4854 if (dl->spare->type & DDF_spare_dedicated) {
4855 /* check spare_ents for guid */
4856 for (j = 0 ;
4857 j < be16_to_cpu
4858 (dl->spare
4859 ->populated);
4860 j++) {
4861 if (memcmp(dl->spare->spare_ents[j].guid,
4862 ddf->virt->entries[a->info.container_member].guid,
4863 DDF_GUID_LEN) == 0)
4864 is_dedicated = 1;
4865 }
4866 } else
4867 is_global = 1;
4868 }
4869 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4870 cpu_to_be16(DDF_Global_Spare))) {
4871 is_global = 1;
4872 } else if (!be16_and(ddf->phys
4873 ->entries[dl->pdnum].state,
4874 cpu_to_be16(DDF_Failed))) {
4875 /* we can possibly use some of this */
4876 is_global = 1;
4877 }
4878 if ( ! (is_dedicated ||
4879 (is_global && global_ok))) {
4880 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4881 is_dedicated, is_global);
4882 continue;
4883 }
4884
4885 /* We are allowed to use this device - is there space?
4886 * We need a->info.component_size sectors */
4887 ex = get_extents(ddf, dl);
4888 if (!ex) {
4889 dprintf("cannot get extents\n");
4890 continue;
4891 }
4892 j = 0; pos = 0;
4893 esize = 0;
4894
4895 do {
4896 esize = ex[j].start - pos;
4897 if (esize >= a->info.component_size)
4898 break;
4899 pos = ex[j].start + ex[j].size;
4900 j++;
4901 } while (ex[j-1].size);
4902
4903 free(ex);
4904 if (esize < a->info.component_size) {
4905 dprintf("%x:%x has no room: %llu %llu\n",
4906 dl->major, dl->minor,
4907 esize, a->info.component_size);
4908 /* No room */
4909 continue;
4910 }
4911
4912 /* Cool, we have a device with some space at pos */
4913 di = xcalloc(1, sizeof(*di));
4914 di->disk.number = i;
4915 di->disk.raid_disk = i;
4916 di->disk.major = dl->major;
4917 di->disk.minor = dl->minor;
4918 di->disk.state = 0;
4919 di->recovery_start = 0;
4920 di->data_offset = pos;
4921 di->component_size = a->info.component_size;
4922 di->container_member = dl->pdnum;
4923 di->next = rv;
4924 rv = di;
4925 dprintf("%x:%x (%08x) to be %d at %llu\n",
4926 dl->major, dl->minor,
4927 be32_to_cpu(dl->disk.refnum), i, pos);
4928
4929 break;
4930 }
4931 if (!dl && ! global_ok) {
4932 /* not enough dedicated spares, try global */
4933 global_ok = 1;
4934 dl = ddf->dlist;
4935 goto again;
4936 }
4937 }
4938
4939 if (!rv)
4940 /* No spares found */
4941 return rv;
4942 /* Now 'rv' has a list of devices to return.
4943 * Create a metadata_update record to update the
4944 * phys_refnum and lba_offset values
4945 */
4946 vc = find_vdcr(ddf, a->info.container_member, di->disk.raid_disk,
4947 &n_bvd, &vcl);
4948 if (vc == NULL)
4949 return NULL;
4950
4951 mu = xmalloc(sizeof(*mu));
4952 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4953 free(mu);
4954 mu = NULL;
4955 }
4956
4957 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
4958 mu->buf = xmalloc(mu->len);
4959 mu->space = NULL;
4960 mu->space_list = NULL;
4961 mu->next = *updates;
4962 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
4963 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
4964 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
4965 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
4966
4967 vc = (struct vd_config*)mu->buf;
4968 for (di = rv ; di ; di = di->next) {
4969 unsigned int i_sec, i_prim;
4970 i_sec = di->disk.raid_disk
4971 / be16_to_cpu(vcl->conf.prim_elmnt_count);
4972 i_prim = di->disk.raid_disk
4973 % be16_to_cpu(vcl->conf.prim_elmnt_count);
4974 vc = (struct vd_config *)(mu->buf
4975 + i_sec * ddf->conf_rec_len * 512);
4976 for (dl = ddf->dlist; dl; dl = dl->next)
4977 if (dl->major == di->disk.major
4978 && dl->minor == di->disk.minor)
4979 break;
4980 if (!dl) {
4981 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
4982 __func__, di->disk.raid_disk,
4983 di->disk.major, di->disk.minor);
4984 return NULL;
4985 }
4986 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
4987 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
4988 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
4989 be32_to_cpu(vc->phys_refnum[i_prim]),
4990 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
4991 }
4992 *updates = mu;
4993 return rv;
4994 }
4995 #endif /* MDASSEMBLE */
4996
4997 static int ddf_level_to_layout(int level)
4998 {
4999 switch(level) {
5000 case 0:
5001 case 1:
5002 return 0;
5003 case 5:
5004 return ALGORITHM_LEFT_SYMMETRIC;
5005 case 6:
5006 return ALGORITHM_ROTATING_N_CONTINUE;
5007 case 10:
5008 return 0x102;
5009 default:
5010 return UnSet;
5011 }
5012 }
5013
5014 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5015 {
5016 if (level && *level == UnSet)
5017 *level = LEVEL_CONTAINER;
5018
5019 if (level && layout && *layout == UnSet)
5020 *layout = ddf_level_to_layout(*level);
5021 }
5022
5023 struct superswitch super_ddf = {
5024 #ifndef MDASSEMBLE
5025 .examine_super = examine_super_ddf,
5026 .brief_examine_super = brief_examine_super_ddf,
5027 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5028 .export_examine_super = export_examine_super_ddf,
5029 .detail_super = detail_super_ddf,
5030 .brief_detail_super = brief_detail_super_ddf,
5031 .validate_geometry = validate_geometry_ddf,
5032 .write_init_super = write_init_super_ddf,
5033 .add_to_super = add_to_super_ddf,
5034 .remove_from_super = remove_from_super_ddf,
5035 .load_container = load_container_ddf,
5036 .copy_metadata = copy_metadata_ddf,
5037 .kill_subarray = kill_subarray_ddf,
5038 #endif
5039 .match_home = match_home_ddf,
5040 .uuid_from_super= uuid_from_super_ddf,
5041 .getinfo_super = getinfo_super_ddf,
5042 .update_super = update_super_ddf,
5043
5044 .avail_size = avail_size_ddf,
5045
5046 .compare_super = compare_super_ddf,
5047
5048 .load_super = load_super_ddf,
5049 .init_super = init_super_ddf,
5050 .store_super = store_super_ddf,
5051 .free_super = free_super_ddf,
5052 .match_metadata_desc = match_metadata_desc_ddf,
5053 .container_content = container_content_ddf,
5054 .default_geometry = default_geometry_ddf,
5055
5056 .external = 1,
5057
5058 #ifndef MDASSEMBLE
5059 /* for mdmon */
5060 .open_new = ddf_open_new,
5061 .set_array_state= ddf_set_array_state,
5062 .set_disk = ddf_set_disk,
5063 .sync_metadata = ddf_sync_metadata,
5064 .process_update = ddf_process_update,
5065 .prepare_update = ddf_prepare_update,
5066 .activate_spare = ddf_activate_spare,
5067 #endif
5068 .name = "ddf",
5069 };