]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: ddf_process_update: Fix vlist treatment for SVDs
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 /* Primary Raid Level (PRL) */
62 #define DDF_RAID0 0x00
63 #define DDF_RAID1 0x01
64 #define DDF_RAID3 0x03
65 #define DDF_RAID4 0x04
66 #define DDF_RAID5 0x05
67 #define DDF_RAID1E 0x11
68 #define DDF_JBOD 0x0f
69 #define DDF_CONCAT 0x1f
70 #define DDF_RAID5E 0x15
71 #define DDF_RAID5EE 0x25
72 #define DDF_RAID6 0x06
73
74 /* Raid Level Qualifier (RLQ) */
75 #define DDF_RAID0_SIMPLE 0x00
76 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
77 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
78 #define DDF_RAID3_0 0x00 /* parity in first extent */
79 #define DDF_RAID3_N 0x01 /* parity in last extent */
80 #define DDF_RAID4_0 0x00 /* parity in first extent */
81 #define DDF_RAID4_N 0x01 /* parity in last extent */
82 /* these apply to raid5e and raid5ee as well */
83 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
84 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
85 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
86 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
87
88 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
89 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
90
91 /* Secondary RAID Level (SRL) */
92 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
93 #define DDF_2MIRRORED 0x01
94 #define DDF_2CONCAT 0x02
95 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
96
97 /* Magic numbers */
98 #define DDF_HEADER_MAGIC __cpu_to_be32(0xDE11DE11)
99 #define DDF_CONTROLLER_MAGIC __cpu_to_be32(0xAD111111)
100 #define DDF_PHYS_RECORDS_MAGIC __cpu_to_be32(0x22222222)
101 #define DDF_PHYS_DATA_MAGIC __cpu_to_be32(0x33333333)
102 #define DDF_VIRT_RECORDS_MAGIC __cpu_to_be32(0xDDDDDDDD)
103 #define DDF_VD_CONF_MAGIC __cpu_to_be32(0xEEEEEEEE)
104 #define DDF_SPARE_ASSIGN_MAGIC __cpu_to_be32(0x55555555)
105 #define DDF_VU_CONF_MAGIC __cpu_to_be32(0x88888888)
106 #define DDF_VENDOR_LOG_MAGIC __cpu_to_be32(0x01dBEEF0)
107 #define DDF_BBM_LOG_MAGIC __cpu_to_be32(0xABADB10C)
108
109 #define DDF_GUID_LEN 24
110 #define DDF_REVISION_0 "01.00.00"
111 #define DDF_REVISION_2 "01.02.00"
112
113 struct ddf_header {
114 __u32 magic; /* DDF_HEADER_MAGIC */
115 __u32 crc;
116 char guid[DDF_GUID_LEN];
117 char revision[8]; /* 01.02.00 */
118 __u32 seq; /* starts at '1' */
119 __u32 timestamp;
120 __u8 openflag;
121 __u8 foreignflag;
122 __u8 enforcegroups;
123 __u8 pad0; /* 0xff */
124 __u8 pad1[12]; /* 12 * 0xff */
125 /* 64 bytes so far */
126 __u8 header_ext[32]; /* reserved: fill with 0xff */
127 __u64 primary_lba;
128 __u64 secondary_lba;
129 __u8 type;
130 __u8 pad2[3]; /* 0xff */
131 __u32 workspace_len; /* sectors for vendor space -
132 * at least 32768(sectors) */
133 __u64 workspace_lba;
134 __u16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
135 __u16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
136 __u16 max_partitions; /* i.e. max num of configuration
137 record entries per disk */
138 __u16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
139 *12/512) */
140 __u16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
141 __u8 pad3[54]; /* 0xff */
142 /* 192 bytes so far */
143 __u32 controller_section_offset;
144 __u32 controller_section_length;
145 __u32 phys_section_offset;
146 __u32 phys_section_length;
147 __u32 virt_section_offset;
148 __u32 virt_section_length;
149 __u32 config_section_offset;
150 __u32 config_section_length;
151 __u32 data_section_offset;
152 __u32 data_section_length;
153 __u32 bbm_section_offset;
154 __u32 bbm_section_length;
155 __u32 diag_space_offset;
156 __u32 diag_space_length;
157 __u32 vendor_offset;
158 __u32 vendor_length;
159 /* 256 bytes so far */
160 __u8 pad4[256]; /* 0xff */
161 };
162
163 /* type field */
164 #define DDF_HEADER_ANCHOR 0x00
165 #define DDF_HEADER_PRIMARY 0x01
166 #define DDF_HEADER_SECONDARY 0x02
167
168 /* The content of the 'controller section' - global scope */
169 struct ddf_controller_data {
170 __u32 magic; /* DDF_CONTROLLER_MAGIC */
171 __u32 crc;
172 char guid[DDF_GUID_LEN];
173 struct controller_type {
174 __u16 vendor_id;
175 __u16 device_id;
176 __u16 sub_vendor_id;
177 __u16 sub_device_id;
178 } type;
179 char product_id[16];
180 __u8 pad[8]; /* 0xff */
181 __u8 vendor_data[448];
182 };
183
184 /* The content of phys_section - global scope */
185 struct phys_disk {
186 __u32 magic; /* DDF_PHYS_RECORDS_MAGIC */
187 __u32 crc;
188 __u16 used_pdes;
189 __u16 max_pdes;
190 __u8 pad[52];
191 struct phys_disk_entry {
192 char guid[DDF_GUID_LEN];
193 __u32 refnum;
194 __u16 type;
195 __u16 state;
196 __u64 config_size; /* DDF structures must be after here */
197 char path[18]; /* another horrible structure really */
198 __u8 pad[6];
199 } entries[0];
200 };
201
202 /* phys_disk_entry.type is a bitmap - bigendian remember */
203 #define DDF_Forced_PD_GUID 1
204 #define DDF_Active_in_VD 2
205 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
206 #define DDF_Spare 8 /* overrides Global_spare */
207 #define DDF_Foreign 16
208 #define DDF_Legacy 32 /* no DDF on this device */
209
210 #define DDF_Interface_mask 0xf00
211 #define DDF_Interface_SCSI 0x100
212 #define DDF_Interface_SAS 0x200
213 #define DDF_Interface_SATA 0x300
214 #define DDF_Interface_FC 0x400
215
216 /* phys_disk_entry.state is a bigendian bitmap */
217 #define DDF_Online 1
218 #define DDF_Failed 2 /* overrides 1,4,8 */
219 #define DDF_Rebuilding 4
220 #define DDF_Transition 8
221 #define DDF_SMART 16
222 #define DDF_ReadErrors 32
223 #define DDF_Missing 64
224
225 /* The content of the virt_section global scope */
226 struct virtual_disk {
227 __u32 magic; /* DDF_VIRT_RECORDS_MAGIC */
228 __u32 crc;
229 __u16 populated_vdes;
230 __u16 max_vdes;
231 __u8 pad[52];
232 struct virtual_entry {
233 char guid[DDF_GUID_LEN];
234 __u16 unit;
235 __u16 pad0; /* 0xffff */
236 __u16 guid_crc;
237 __u16 type;
238 __u8 state;
239 __u8 init_state;
240 __u8 pad1[14];
241 char name[16];
242 } entries[0];
243 };
244
245 /* virtual_entry.type is a bitmap - bigendian */
246 #define DDF_Shared 1
247 #define DDF_Enforce_Groups 2
248 #define DDF_Unicode 4
249 #define DDF_Owner_Valid 8
250
251 /* virtual_entry.state is a bigendian bitmap */
252 #define DDF_state_mask 0x7
253 #define DDF_state_optimal 0x0
254 #define DDF_state_degraded 0x1
255 #define DDF_state_deleted 0x2
256 #define DDF_state_missing 0x3
257 #define DDF_state_failed 0x4
258 #define DDF_state_part_optimal 0x5
259
260 #define DDF_state_morphing 0x8
261 #define DDF_state_inconsistent 0x10
262
263 /* virtual_entry.init_state is a bigendian bitmap */
264 #define DDF_initstate_mask 0x03
265 #define DDF_init_not 0x00
266 #define DDF_init_quick 0x01 /* initialisation is progress.
267 * i.e. 'state_inconsistent' */
268 #define DDF_init_full 0x02
269
270 #define DDF_access_mask 0xc0
271 #define DDF_access_rw 0x00
272 #define DDF_access_ro 0x80
273 #define DDF_access_blocked 0xc0
274
275 /* The content of the config_section - local scope
276 * It has multiple records each config_record_len sectors
277 * They can be vd_config or spare_assign
278 */
279
280 struct vd_config {
281 __u32 magic; /* DDF_VD_CONF_MAGIC */
282 __u32 crc;
283 char guid[DDF_GUID_LEN];
284 __u32 timestamp;
285 __u32 seqnum;
286 __u8 pad0[24];
287 __u16 prim_elmnt_count;
288 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
289 __u8 prl;
290 __u8 rlq;
291 __u8 sec_elmnt_count;
292 __u8 sec_elmnt_seq;
293 __u8 srl;
294 __u64 blocks; /* blocks per component could be different
295 * on different component devices...(only
296 * for concat I hope) */
297 __u64 array_blocks; /* blocks in array */
298 __u8 pad1[8];
299 __u32 spare_refs[8];
300 __u8 cache_pol[8];
301 __u8 bg_rate;
302 __u8 pad2[3];
303 __u8 pad3[52];
304 __u8 pad4[192];
305 __u8 v0[32]; /* reserved- 0xff */
306 __u8 v1[32]; /* reserved- 0xff */
307 __u8 v2[16]; /* reserved- 0xff */
308 __u8 v3[16]; /* reserved- 0xff */
309 __u8 vendor[32];
310 __u32 phys_refnum[0]; /* refnum of each disk in sequence */
311 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
312 bvd are always the same size */
313 };
314 #define LBA_OFFSET(ddf, vd) ((__u64 *) &(vd)->phys_refnum[(ddf)->mppe])
315
316 /* vd_config.cache_pol[7] is a bitmap */
317 #define DDF_cache_writeback 1 /* else writethrough */
318 #define DDF_cache_wadaptive 2 /* only applies if writeback */
319 #define DDF_cache_readahead 4
320 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
321 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
322 #define DDF_cache_wallowed 32 /* enable write caching */
323 #define DDF_cache_rallowed 64 /* enable read caching */
324
325 struct spare_assign {
326 __u32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
327 __u32 crc;
328 __u32 timestamp;
329 __u8 reserved[7];
330 __u8 type;
331 __u16 populated; /* SAEs used */
332 __u16 max; /* max SAEs */
333 __u8 pad[8];
334 struct spare_assign_entry {
335 char guid[DDF_GUID_LEN];
336 __u16 secondary_element;
337 __u8 pad[6];
338 } spare_ents[0];
339 };
340 /* spare_assign.type is a bitmap */
341 #define DDF_spare_dedicated 0x1 /* else global */
342 #define DDF_spare_revertible 0x2 /* else committable */
343 #define DDF_spare_active 0x4 /* else not active */
344 #define DDF_spare_affinity 0x8 /* enclosure affinity */
345
346 /* The data_section contents - local scope */
347 struct disk_data {
348 __u32 magic; /* DDF_PHYS_DATA_MAGIC */
349 __u32 crc;
350 char guid[DDF_GUID_LEN];
351 __u32 refnum; /* crc of some magic drive data ... */
352 __u8 forced_ref; /* set when above was not result of magic */
353 __u8 forced_guid; /* set if guid was forced rather than magic */
354 __u8 vendor[32];
355 __u8 pad[442];
356 };
357
358 /* bbm_section content */
359 struct bad_block_log {
360 __u32 magic;
361 __u32 crc;
362 __u16 entry_count;
363 __u32 spare_count;
364 __u8 pad[10];
365 __u64 first_spare;
366 struct mapped_block {
367 __u64 defective_start;
368 __u32 replacement_start;
369 __u16 remap_count;
370 __u8 pad[2];
371 } entries[0];
372 };
373
374 /* Struct for internally holding ddf structures */
375 /* The DDF structure stored on each device is potentially
376 * quite different, as some data is global and some is local.
377 * The global data is:
378 * - ddf header
379 * - controller_data
380 * - Physical disk records
381 * - Virtual disk records
382 * The local data is:
383 * - Configuration records
384 * - Physical Disk data section
385 * ( and Bad block and vendor which I don't care about yet).
386 *
387 * The local data is parsed into separate lists as it is read
388 * and reconstructed for writing. This means that we only need
389 * to make config changes once and they are automatically
390 * propagated to all devices.
391 * Note that the ddf_super has space of the conf and disk data
392 * for this disk and also for a list of all such data.
393 * The list is only used for the superblock that is being
394 * built in Create or Assemble to describe the whole array.
395 */
396 struct ddf_super {
397 struct ddf_header anchor, primary, secondary;
398 struct ddf_controller_data controller;
399 struct ddf_header *active;
400 struct phys_disk *phys;
401 struct virtual_disk *virt;
402 int pdsize, vdsize;
403 unsigned int max_part, mppe, conf_rec_len;
404 int currentdev;
405 int updates_pending;
406 struct vcl {
407 union {
408 char space[512];
409 struct {
410 struct vcl *next;
411 unsigned int vcnum; /* index into ->virt */
412 struct vd_config **other_bvds;
413 __u64 *block_sizes; /* NULL if all the same */
414 };
415 };
416 struct vd_config conf;
417 } *conflist, *currentconf;
418 struct dl {
419 union {
420 char space[512];
421 struct {
422 struct dl *next;
423 int major, minor;
424 char *devname;
425 int fd;
426 unsigned long long size; /* sectors */
427 unsigned long long primary_lba; /* sectors */
428 unsigned long long secondary_lba; /* sectors */
429 unsigned long long workspace_lba; /* sectors */
430 int pdnum; /* index in ->phys */
431 struct spare_assign *spare;
432 void *mdupdate; /* hold metadata update */
433
434 /* These fields used by auto-layout */
435 int raiddisk; /* slot to fill in autolayout */
436 __u64 esize;
437 };
438 };
439 struct disk_data disk;
440 struct vcl *vlist[0]; /* max_part in size */
441 } *dlist, *add_list;
442 };
443
444 #ifndef offsetof
445 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
446 #endif
447
448 #if DEBUG
449 static int all_ff(const char *guid);
450 static void pr_state(struct ddf_super *ddf, const char *msg)
451 {
452 unsigned int i;
453 dprintf("%s/%s: ", __func__, msg);
454 for (i = 0; i < __be16_to_cpu(ddf->active->max_vd_entries); i++) {
455 if (all_ff(ddf->virt->entries[i].guid))
456 continue;
457 dprintf("%u(s=%02x i=%02x) ", i,
458 ddf->virt->entries[i].state,
459 ddf->virt->entries[i].init_state);
460 }
461 dprintf("\n");
462 }
463 #else
464 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
465 #endif
466
467 #define ddf_set_updates_pending(x) \
468 do { (x)->updates_pending = 1; pr_state(x, __func__); } while (0)
469
470 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
471 __u32 refnum, unsigned int nmax,
472 const struct vd_config **bvd,
473 unsigned int *idx);
474
475 static unsigned int calc_crc(void *buf, int len)
476 {
477 /* crcs are always at the same place as in the ddf_header */
478 struct ddf_header *ddf = buf;
479 __u32 oldcrc = ddf->crc;
480 __u32 newcrc;
481 ddf->crc = 0xffffffff;
482
483 newcrc = crc32(0, buf, len);
484 ddf->crc = oldcrc;
485 /* The crc is store (like everything) bigendian, so convert
486 * here for simplicity
487 */
488 return __cpu_to_be32(newcrc);
489 }
490
491 #define DDF_INVALID_LEVEL 0xff
492 #define DDF_NO_SECONDARY 0xff
493 static int err_bad_md_layout(const mdu_array_info_t *array)
494 {
495 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
496 array->level, array->layout, array->raid_disks);
497 return DDF_INVALID_LEVEL;
498 }
499
500 static int layout_md2ddf(const mdu_array_info_t *array,
501 struct vd_config *conf)
502 {
503 __u16 prim_elmnt_count = __cpu_to_be16(array->raid_disks);
504 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
505 __u8 sec_elmnt_count = 1;
506 __u8 srl = DDF_NO_SECONDARY;
507
508 switch (array->level) {
509 case LEVEL_LINEAR:
510 prl = DDF_CONCAT;
511 break;
512 case 0:
513 rlq = DDF_RAID0_SIMPLE;
514 prl = DDF_RAID0;
515 break;
516 case 1:
517 switch (array->raid_disks) {
518 case 2:
519 rlq = DDF_RAID1_SIMPLE;
520 break;
521 case 3:
522 rlq = DDF_RAID1_MULTI;
523 break;
524 default:
525 return err_bad_md_layout(array);
526 }
527 prl = DDF_RAID1;
528 break;
529 case 4:
530 if (array->layout != 0)
531 return err_bad_md_layout(array);
532 rlq = DDF_RAID4_N;
533 prl = DDF_RAID4;
534 break;
535 case 5:
536 switch (array->layout) {
537 case ALGORITHM_LEFT_ASYMMETRIC:
538 rlq = DDF_RAID5_N_RESTART;
539 break;
540 case ALGORITHM_RIGHT_ASYMMETRIC:
541 rlq = DDF_RAID5_0_RESTART;
542 break;
543 case ALGORITHM_LEFT_SYMMETRIC:
544 rlq = DDF_RAID5_N_CONTINUE;
545 break;
546 case ALGORITHM_RIGHT_SYMMETRIC:
547 /* not mentioned in standard */
548 default:
549 return err_bad_md_layout(array);
550 }
551 prl = DDF_RAID5;
552 break;
553 case 6:
554 switch (array->layout) {
555 case ALGORITHM_ROTATING_N_RESTART:
556 rlq = DDF_RAID5_N_RESTART;
557 break;
558 case ALGORITHM_ROTATING_ZERO_RESTART:
559 rlq = DDF_RAID6_0_RESTART;
560 break;
561 case ALGORITHM_ROTATING_N_CONTINUE:
562 rlq = DDF_RAID5_N_CONTINUE;
563 break;
564 default:
565 return err_bad_md_layout(array);
566 }
567 prl = DDF_RAID6;
568 break;
569 case 10:
570 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
571 rlq = DDF_RAID1_SIMPLE;
572 prim_elmnt_count = __cpu_to_be16(2);
573 sec_elmnt_count = array->raid_disks / 2;
574 } else if (array->raid_disks % 3 == 0
575 && array->layout == 0x103) {
576 rlq = DDF_RAID1_MULTI;
577 prim_elmnt_count = __cpu_to_be16(3);
578 sec_elmnt_count = array->raid_disks / 3;
579 } else
580 return err_bad_md_layout(array);
581 srl = DDF_2SPANNED;
582 prl = DDF_RAID1;
583 break;
584 default:
585 return err_bad_md_layout(array);
586 }
587 conf->prl = prl;
588 conf->prim_elmnt_count = prim_elmnt_count;
589 conf->rlq = rlq;
590 conf->srl = srl;
591 conf->sec_elmnt_count = sec_elmnt_count;
592 return 0;
593 }
594
595 static int err_bad_ddf_layout(const struct vd_config *conf)
596 {
597 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
598 conf->prl, conf->rlq, __be16_to_cpu(conf->prim_elmnt_count));
599 return -1;
600 }
601
602 static int layout_ddf2md(const struct vd_config *conf,
603 mdu_array_info_t *array)
604 {
605 int level = LEVEL_UNSUPPORTED;
606 int layout = 0;
607 int raiddisks = __be16_to_cpu(conf->prim_elmnt_count);
608
609 if (conf->sec_elmnt_count > 1) {
610 /* see also check_secondary() */
611 if (conf->prl != DDF_RAID1 ||
612 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
613 pr_err("Unsupported secondary RAID level %u/%u\n",
614 conf->prl, conf->srl);
615 return -1;
616 }
617 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
618 layout = 0x102;
619 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
620 layout = 0x103;
621 else
622 return err_bad_ddf_layout(conf);
623 raiddisks *= conf->sec_elmnt_count;
624 level = 10;
625 goto good;
626 }
627
628 switch (conf->prl) {
629 case DDF_CONCAT:
630 level = LEVEL_LINEAR;
631 break;
632 case DDF_RAID0:
633 if (conf->rlq != DDF_RAID0_SIMPLE)
634 return err_bad_ddf_layout(conf);
635 level = 0;
636 break;
637 case DDF_RAID1:
638 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
639 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
640 return err_bad_ddf_layout(conf);
641 level = 1;
642 break;
643 case DDF_RAID4:
644 if (conf->rlq != DDF_RAID4_N)
645 return err_bad_ddf_layout(conf);
646 level = 4;
647 break;
648 case DDF_RAID5:
649 switch (conf->rlq) {
650 case DDF_RAID5_N_RESTART:
651 layout = ALGORITHM_LEFT_ASYMMETRIC;
652 break;
653 case DDF_RAID5_0_RESTART:
654 layout = ALGORITHM_RIGHT_ASYMMETRIC;
655 break;
656 case DDF_RAID5_N_CONTINUE:
657 layout = ALGORITHM_LEFT_SYMMETRIC;
658 break;
659 default:
660 return err_bad_ddf_layout(conf);
661 }
662 level = 5;
663 break;
664 case DDF_RAID6:
665 switch (conf->rlq) {
666 case DDF_RAID5_N_RESTART:
667 layout = ALGORITHM_ROTATING_N_RESTART;
668 break;
669 case DDF_RAID6_0_RESTART:
670 layout = ALGORITHM_ROTATING_ZERO_RESTART;
671 break;
672 case DDF_RAID5_N_CONTINUE:
673 layout = ALGORITHM_ROTATING_N_CONTINUE;
674 break;
675 default:
676 return err_bad_ddf_layout(conf);
677 }
678 level = 6;
679 break;
680 default:
681 return err_bad_ddf_layout(conf);
682 };
683
684 good:
685 array->level = level;
686 array->layout = layout;
687 array->raid_disks = raiddisks;
688 return 0;
689 }
690
691 static int load_ddf_header(int fd, unsigned long long lba,
692 unsigned long long size,
693 int type,
694 struct ddf_header *hdr, struct ddf_header *anchor)
695 {
696 /* read a ddf header (primary or secondary) from fd/lba
697 * and check that it is consistent with anchor
698 * Need to check:
699 * magic, crc, guid, rev, and LBA's header_type, and
700 * everything after header_type must be the same
701 */
702 if (lba >= size-1)
703 return 0;
704
705 if (lseek64(fd, lba<<9, 0) < 0)
706 return 0;
707
708 if (read(fd, hdr, 512) != 512)
709 return 0;
710
711 if (hdr->magic != DDF_HEADER_MAGIC)
712 return 0;
713 if (calc_crc(hdr, 512) != hdr->crc)
714 return 0;
715 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
716 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
717 anchor->primary_lba != hdr->primary_lba ||
718 anchor->secondary_lba != hdr->secondary_lba ||
719 hdr->type != type ||
720 memcmp(anchor->pad2, hdr->pad2, 512 -
721 offsetof(struct ddf_header, pad2)) != 0)
722 return 0;
723
724 /* Looks good enough to me... */
725 return 1;
726 }
727
728 static void *load_section(int fd, struct ddf_super *super, void *buf,
729 __u32 offset_be, __u32 len_be, int check)
730 {
731 unsigned long long offset = __be32_to_cpu(offset_be);
732 unsigned long long len = __be32_to_cpu(len_be);
733 int dofree = (buf == NULL);
734
735 if (check)
736 if (len != 2 && len != 8 && len != 32
737 && len != 128 && len != 512)
738 return NULL;
739
740 if (len > 1024)
741 return NULL;
742 if (buf) {
743 /* All pre-allocated sections are a single block */
744 if (len != 1)
745 return NULL;
746 } else if (posix_memalign(&buf, 512, len<<9) != 0)
747 buf = NULL;
748
749 if (!buf)
750 return NULL;
751
752 if (super->active->type == 1)
753 offset += __be64_to_cpu(super->active->primary_lba);
754 else
755 offset += __be64_to_cpu(super->active->secondary_lba);
756
757 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
758 if (dofree)
759 free(buf);
760 return NULL;
761 }
762 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
763 if (dofree)
764 free(buf);
765 return NULL;
766 }
767 return buf;
768 }
769
770 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
771 {
772 unsigned long long dsize;
773
774 get_dev_size(fd, NULL, &dsize);
775
776 if (lseek64(fd, dsize-512, 0) < 0) {
777 if (devname)
778 pr_err("Cannot seek to anchor block on %s: %s\n",
779 devname, strerror(errno));
780 return 1;
781 }
782 if (read(fd, &super->anchor, 512) != 512) {
783 if (devname)
784 pr_err("Cannot read anchor block on %s: %s\n",
785 devname, strerror(errno));
786 return 1;
787 }
788 if (super->anchor.magic != DDF_HEADER_MAGIC) {
789 if (devname)
790 pr_err("no DDF anchor found on %s\n",
791 devname);
792 return 2;
793 }
794 if (calc_crc(&super->anchor, 512) != super->anchor.crc) {
795 if (devname)
796 pr_err("bad CRC on anchor on %s\n",
797 devname);
798 return 2;
799 }
800 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
801 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
802 if (devname)
803 pr_err("can only support super revision"
804 " %.8s and earlier, not %.8s on %s\n",
805 DDF_REVISION_2, super->anchor.revision,devname);
806 return 2;
807 }
808 super->active = NULL;
809 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.primary_lba),
810 dsize >> 9, 1,
811 &super->primary, &super->anchor) == 0) {
812 if (devname)
813 pr_err("Failed to load primary DDF header "
814 "on %s\n", devname);
815 } else
816 super->active = &super->primary;
817 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.secondary_lba),
818 dsize >> 9, 2,
819 &super->secondary, &super->anchor)) {
820 if ((__be32_to_cpu(super->primary.seq)
821 < __be32_to_cpu(super->secondary.seq) &&
822 !super->secondary.openflag)
823 || (__be32_to_cpu(super->primary.seq)
824 == __be32_to_cpu(super->secondary.seq) &&
825 super->primary.openflag && !super->secondary.openflag)
826 || super->active == NULL
827 )
828 super->active = &super->secondary;
829 } else if (devname)
830 pr_err("Failed to load secondary DDF header on %s\n",
831 devname);
832 if (super->active == NULL)
833 return 2;
834 return 0;
835 }
836
837 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
838 {
839 void *ok;
840 ok = load_section(fd, super, &super->controller,
841 super->active->controller_section_offset,
842 super->active->controller_section_length,
843 0);
844 super->phys = load_section(fd, super, NULL,
845 super->active->phys_section_offset,
846 super->active->phys_section_length,
847 1);
848 super->pdsize = __be32_to_cpu(super->active->phys_section_length) * 512;
849
850 super->virt = load_section(fd, super, NULL,
851 super->active->virt_section_offset,
852 super->active->virt_section_length,
853 1);
854 super->vdsize = __be32_to_cpu(super->active->virt_section_length) * 512;
855 if (!ok ||
856 !super->phys ||
857 !super->virt) {
858 free(super->phys);
859 free(super->virt);
860 super->phys = NULL;
861 super->virt = NULL;
862 return 2;
863 }
864 super->conflist = NULL;
865 super->dlist = NULL;
866
867 super->max_part = __be16_to_cpu(super->active->max_partitions);
868 super->mppe = __be16_to_cpu(super->active->max_primary_element_entries);
869 super->conf_rec_len = __be16_to_cpu(super->active->config_record_len);
870 return 0;
871 }
872
873 #define DDF_UNUSED_BVD 0xff
874 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
875 {
876 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
877 unsigned int i, vdsize;
878 void *p;
879 if (n_vds == 0) {
880 vcl->other_bvds = NULL;
881 return 0;
882 }
883 vdsize = ddf->conf_rec_len * 512;
884 if (posix_memalign(&p, 512, n_vds *
885 (vdsize + sizeof(struct vd_config *))) != 0)
886 return -1;
887 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
888 for (i = 0; i < n_vds; i++) {
889 vcl->other_bvds[i] = p + i * vdsize;
890 memset(vcl->other_bvds[i], 0, vdsize);
891 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
892 }
893 return 0;
894 }
895
896 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
897 unsigned int len)
898 {
899 int i;
900 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
901 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
902 break;
903
904 if (i < vcl->conf.sec_elmnt_count-1) {
905 if (vd->seqnum <= vcl->other_bvds[i]->seqnum)
906 return;
907 } else {
908 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
909 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
910 break;
911 if (i == vcl->conf.sec_elmnt_count-1) {
912 pr_err("no space for sec level config %u, count is %u\n",
913 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
914 return;
915 }
916 }
917 memcpy(vcl->other_bvds[i], vd, len);
918 }
919
920 static int load_ddf_local(int fd, struct ddf_super *super,
921 char *devname, int keep)
922 {
923 struct dl *dl;
924 struct stat stb;
925 char *conf;
926 unsigned int i;
927 unsigned int confsec;
928 int vnum;
929 unsigned int max_virt_disks = __be16_to_cpu(super->active->max_vd_entries);
930 unsigned long long dsize;
931
932 /* First the local disk info */
933 if (posix_memalign((void**)&dl, 512,
934 sizeof(*dl) +
935 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
936 pr_err("%s could not allocate disk info buffer\n",
937 __func__);
938 return 1;
939 }
940
941 load_section(fd, super, &dl->disk,
942 super->active->data_section_offset,
943 super->active->data_section_length,
944 0);
945 dl->devname = devname ? xstrdup(devname) : NULL;
946
947 fstat(fd, &stb);
948 dl->major = major(stb.st_rdev);
949 dl->minor = minor(stb.st_rdev);
950 dl->next = super->dlist;
951 dl->fd = keep ? fd : -1;
952
953 dl->size = 0;
954 if (get_dev_size(fd, devname, &dsize))
955 dl->size = dsize >> 9;
956 /* If the disks have different sizes, the LBAs will differ
957 * between phys disks.
958 * At this point here, the values in super->active must be valid
959 * for this phys disk. */
960 dl->primary_lba = super->active->primary_lba;
961 dl->secondary_lba = super->active->secondary_lba;
962 dl->workspace_lba = super->active->workspace_lba;
963 dl->spare = NULL;
964 for (i = 0 ; i < super->max_part ; i++)
965 dl->vlist[i] = NULL;
966 super->dlist = dl;
967 dl->pdnum = -1;
968 for (i = 0; i < __be16_to_cpu(super->active->max_pd_entries); i++)
969 if (memcmp(super->phys->entries[i].guid,
970 dl->disk.guid, DDF_GUID_LEN) == 0)
971 dl->pdnum = i;
972
973 /* Now the config list. */
974 /* 'conf' is an array of config entries, some of which are
975 * probably invalid. Those which are good need to be copied into
976 * the conflist
977 */
978
979 conf = load_section(fd, super, NULL,
980 super->active->config_section_offset,
981 super->active->config_section_length,
982 0);
983
984 vnum = 0;
985 for (confsec = 0;
986 confsec < __be32_to_cpu(super->active->config_section_length);
987 confsec += super->conf_rec_len) {
988 struct vd_config *vd =
989 (struct vd_config *)((char*)conf + confsec*512);
990 struct vcl *vcl;
991
992 if (vd->magic == DDF_SPARE_ASSIGN_MAGIC) {
993 if (dl->spare)
994 continue;
995 if (posix_memalign((void**)&dl->spare, 512,
996 super->conf_rec_len*512) != 0) {
997 pr_err("%s could not allocate spare info buf\n",
998 __func__);
999 return 1;
1000 }
1001
1002 memcpy(dl->spare, vd, super->conf_rec_len*512);
1003 continue;
1004 }
1005 if (vd->magic != DDF_VD_CONF_MAGIC)
1006 continue;
1007 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1008 if (memcmp(vcl->conf.guid,
1009 vd->guid, DDF_GUID_LEN) == 0)
1010 break;
1011 }
1012
1013 if (vcl) {
1014 dl->vlist[vnum++] = vcl;
1015 if (vcl->other_bvds != NULL &&
1016 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1017 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1018 continue;
1019 }
1020 if (__be32_to_cpu(vd->seqnum) <=
1021 __be32_to_cpu(vcl->conf.seqnum))
1022 continue;
1023 } else {
1024 if (posix_memalign((void**)&vcl, 512,
1025 (super->conf_rec_len*512 +
1026 offsetof(struct vcl, conf))) != 0) {
1027 pr_err("%s could not allocate vcl buf\n",
1028 __func__);
1029 return 1;
1030 }
1031 vcl->next = super->conflist;
1032 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1033 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1034 if (alloc_other_bvds(super, vcl) != 0) {
1035 pr_err("%s could not allocate other bvds\n",
1036 __func__);
1037 free(vcl);
1038 return 1;
1039 };
1040 super->conflist = vcl;
1041 dl->vlist[vnum++] = vcl;
1042 }
1043 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1044 for (i=0; i < max_virt_disks ; i++)
1045 if (memcmp(super->virt->entries[i].guid,
1046 vcl->conf.guid, DDF_GUID_LEN)==0)
1047 break;
1048 if (i < max_virt_disks)
1049 vcl->vcnum = i;
1050 }
1051 free(conf);
1052
1053 return 0;
1054 }
1055
1056 #ifndef MDASSEMBLE
1057 static int load_super_ddf_all(struct supertype *st, int fd,
1058 void **sbp, char *devname);
1059 #endif
1060
1061 static void free_super_ddf(struct supertype *st);
1062
1063 static int load_super_ddf(struct supertype *st, int fd,
1064 char *devname)
1065 {
1066 unsigned long long dsize;
1067 struct ddf_super *super;
1068 int rv;
1069
1070 if (get_dev_size(fd, devname, &dsize) == 0)
1071 return 1;
1072
1073 if (!st->ignore_hw_compat && test_partition(fd))
1074 /* DDF is not allowed on partitions */
1075 return 1;
1076
1077 /* 32M is a lower bound */
1078 if (dsize <= 32*1024*1024) {
1079 if (devname)
1080 pr_err("%s is too small for ddf: "
1081 "size is %llu sectors.\n",
1082 devname, dsize>>9);
1083 return 1;
1084 }
1085 if (dsize & 511) {
1086 if (devname)
1087 pr_err("%s is an odd size for ddf: "
1088 "size is %llu bytes.\n",
1089 devname, dsize);
1090 return 1;
1091 }
1092
1093 free_super_ddf(st);
1094
1095 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1096 pr_err("malloc of %zu failed.\n",
1097 sizeof(*super));
1098 return 1;
1099 }
1100 memset(super, 0, sizeof(*super));
1101
1102 rv = load_ddf_headers(fd, super, devname);
1103 if (rv) {
1104 free(super);
1105 return rv;
1106 }
1107
1108 /* Have valid headers and have chosen the best. Let's read in the rest*/
1109
1110 rv = load_ddf_global(fd, super, devname);
1111
1112 if (rv) {
1113 if (devname)
1114 pr_err("Failed to load all information "
1115 "sections on %s\n", devname);
1116 free(super);
1117 return rv;
1118 }
1119
1120 rv = load_ddf_local(fd, super, devname, 0);
1121
1122 if (rv) {
1123 if (devname)
1124 pr_err("Failed to load all information "
1125 "sections on %s\n", devname);
1126 free(super);
1127 return rv;
1128 }
1129
1130 /* Should possibly check the sections .... */
1131
1132 st->sb = super;
1133 if (st->ss == NULL) {
1134 st->ss = &super_ddf;
1135 st->minor_version = 0;
1136 st->max_devs = 512;
1137 }
1138 return 0;
1139
1140 }
1141
1142 static void free_super_ddf(struct supertype *st)
1143 {
1144 struct ddf_super *ddf = st->sb;
1145 if (ddf == NULL)
1146 return;
1147 free(ddf->phys);
1148 free(ddf->virt);
1149 while (ddf->conflist) {
1150 struct vcl *v = ddf->conflist;
1151 ddf->conflist = v->next;
1152 if (v->block_sizes)
1153 free(v->block_sizes);
1154 if (v->other_bvds)
1155 /*
1156 v->other_bvds[0] points to beginning of buffer,
1157 see alloc_other_bvds()
1158 */
1159 free(v->other_bvds[0]);
1160 free(v);
1161 }
1162 while (ddf->dlist) {
1163 struct dl *d = ddf->dlist;
1164 ddf->dlist = d->next;
1165 if (d->fd >= 0)
1166 close(d->fd);
1167 if (d->spare)
1168 free(d->spare);
1169 free(d);
1170 }
1171 while (ddf->add_list) {
1172 struct dl *d = ddf->add_list;
1173 ddf->add_list = d->next;
1174 if (d->fd >= 0)
1175 close(d->fd);
1176 if (d->spare)
1177 free(d->spare);
1178 free(d);
1179 }
1180 free(ddf);
1181 st->sb = NULL;
1182 }
1183
1184 static struct supertype *match_metadata_desc_ddf(char *arg)
1185 {
1186 /* 'ddf' only support containers */
1187 struct supertype *st;
1188 if (strcmp(arg, "ddf") != 0 &&
1189 strcmp(arg, "default") != 0
1190 )
1191 return NULL;
1192
1193 st = xcalloc(1, sizeof(*st));
1194 st->ss = &super_ddf;
1195 st->max_devs = 512;
1196 st->minor_version = 0;
1197 st->sb = NULL;
1198 return st;
1199 }
1200
1201 #ifndef MDASSEMBLE
1202
1203 static mapping_t ddf_state[] = {
1204 { "Optimal", 0},
1205 { "Degraded", 1},
1206 { "Deleted", 2},
1207 { "Missing", 3},
1208 { "Failed", 4},
1209 { "Partially Optimal", 5},
1210 { "-reserved-", 6},
1211 { "-reserved-", 7},
1212 { NULL, 0}
1213 };
1214
1215 static mapping_t ddf_init_state[] = {
1216 { "Not Initialised", 0},
1217 { "QuickInit in Progress", 1},
1218 { "Fully Initialised", 2},
1219 { "*UNKNOWN*", 3},
1220 { NULL, 0}
1221 };
1222 static mapping_t ddf_access[] = {
1223 { "Read/Write", 0},
1224 { "Reserved", 1},
1225 { "Read Only", 2},
1226 { "Blocked (no access)", 3},
1227 { NULL ,0}
1228 };
1229
1230 static mapping_t ddf_level[] = {
1231 { "RAID0", DDF_RAID0},
1232 { "RAID1", DDF_RAID1},
1233 { "RAID3", DDF_RAID3},
1234 { "RAID4", DDF_RAID4},
1235 { "RAID5", DDF_RAID5},
1236 { "RAID1E",DDF_RAID1E},
1237 { "JBOD", DDF_JBOD},
1238 { "CONCAT",DDF_CONCAT},
1239 { "RAID5E",DDF_RAID5E},
1240 { "RAID5EE",DDF_RAID5EE},
1241 { "RAID6", DDF_RAID6},
1242 { NULL, 0}
1243 };
1244 static mapping_t ddf_sec_level[] = {
1245 { "Striped", DDF_2STRIPED},
1246 { "Mirrored", DDF_2MIRRORED},
1247 { "Concat", DDF_2CONCAT},
1248 { "Spanned", DDF_2SPANNED},
1249 { NULL, 0}
1250 };
1251 #endif
1252
1253 static int all_ff(const char *guid)
1254 {
1255 int i;
1256 for (i = 0; i < DDF_GUID_LEN; i++)
1257 if (guid[i] != (char)0xff)
1258 return 0;
1259 return 1;
1260 }
1261
1262 #ifndef MDASSEMBLE
1263 static void print_guid(char *guid, int tstamp)
1264 {
1265 /* A GUIDs are part (or all) ASCII and part binary.
1266 * They tend to be space padded.
1267 * We print the GUID in HEX, then in parentheses add
1268 * any initial ASCII sequence, and a possible
1269 * time stamp from bytes 16-19
1270 */
1271 int l = DDF_GUID_LEN;
1272 int i;
1273
1274 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1275 if ((i&3)==0 && i != 0) printf(":");
1276 printf("%02X", guid[i]&255);
1277 }
1278
1279 printf("\n (");
1280 while (l && guid[l-1] == ' ')
1281 l--;
1282 for (i=0 ; i<l ; i++) {
1283 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1284 fputc(guid[i], stdout);
1285 else
1286 break;
1287 }
1288 if (tstamp) {
1289 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1290 char tbuf[100];
1291 struct tm *tm;
1292 tm = localtime(&then);
1293 strftime(tbuf, 100, " %D %T",tm);
1294 fputs(tbuf, stdout);
1295 }
1296 printf(")");
1297 }
1298
1299 static const char *guid_str(const char *guid)
1300 {
1301 static char buf[DDF_GUID_LEN*2+1];
1302 int i;
1303 char *p = buf;
1304 for (i = 0; i < DDF_GUID_LEN; i++)
1305 p += sprintf(p, "%02x", (unsigned char)guid[i]);
1306 *p = '\0';
1307 return (const char *) buf;
1308 }
1309
1310 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1311 {
1312 int crl = sb->conf_rec_len;
1313 struct vcl *vcl;
1314
1315 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1316 unsigned int i;
1317 struct vd_config *vc = &vcl->conf;
1318
1319 if (calc_crc(vc, crl*512) != vc->crc)
1320 continue;
1321 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1322 continue;
1323
1324 /* Ok, we know about this VD, let's give more details */
1325 printf(" Raid Devices[%d] : %d (", n,
1326 __be16_to_cpu(vc->prim_elmnt_count));
1327 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++) {
1328 int j;
1329 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1330 for (j=0; j<cnt; j++)
1331 if (vc->phys_refnum[i] == sb->phys->entries[j].refnum)
1332 break;
1333 if (i) printf(" ");
1334 if (j < cnt)
1335 printf("%d", j);
1336 else
1337 printf("--");
1338 }
1339 printf(")\n");
1340 if (vc->chunk_shift != 255)
1341 printf(" Chunk Size[%d] : %d sectors\n", n,
1342 1 << vc->chunk_shift);
1343 printf(" Raid Level[%d] : %s\n", n,
1344 map_num(ddf_level, vc->prl)?:"-unknown-");
1345 if (vc->sec_elmnt_count != 1) {
1346 printf(" Secondary Position[%d] : %d of %d\n", n,
1347 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1348 printf(" Secondary Level[%d] : %s\n", n,
1349 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1350 }
1351 printf(" Device Size[%d] : %llu\n", n,
1352 (unsigned long long)__be64_to_cpu(vc->blocks)/2);
1353 printf(" Array Size[%d] : %llu\n", n,
1354 (unsigned long long)__be64_to_cpu(vc->array_blocks)/2);
1355 }
1356 }
1357
1358 static void examine_vds(struct ddf_super *sb)
1359 {
1360 int cnt = __be16_to_cpu(sb->virt->populated_vdes);
1361 unsigned int i;
1362 printf(" Virtual Disks : %d\n", cnt);
1363
1364 for (i = 0; i < __be16_to_cpu(sb->virt->max_vdes); i++) {
1365 struct virtual_entry *ve = &sb->virt->entries[i];
1366 if (all_ff(ve->guid))
1367 continue;
1368 printf("\n");
1369 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1370 printf("\n");
1371 printf(" unit[%d] : %d\n", i, __be16_to_cpu(ve->unit));
1372 printf(" state[%d] : %s, %s%s\n", i,
1373 map_num(ddf_state, ve->state & 7),
1374 (ve->state & 8) ? "Morphing, ": "",
1375 (ve->state & 16)? "Not Consistent" : "Consistent");
1376 printf(" init state[%d] : %s\n", i,
1377 map_num(ddf_init_state, ve->init_state&3));
1378 printf(" access[%d] : %s\n", i,
1379 map_num(ddf_access, (ve->init_state>>6) & 3));
1380 printf(" Name[%d] : %.16s\n", i, ve->name);
1381 examine_vd(i, sb, ve->guid);
1382 }
1383 if (cnt) printf("\n");
1384 }
1385
1386 static void examine_pds(struct ddf_super *sb)
1387 {
1388 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1389 int i;
1390 struct dl *dl;
1391 printf(" Physical Disks : %d\n", cnt);
1392 printf(" Number RefNo Size Device Type/State\n");
1393
1394 for (i=0 ; i<cnt ; i++) {
1395 struct phys_disk_entry *pd = &sb->phys->entries[i];
1396 int type = __be16_to_cpu(pd->type);
1397 int state = __be16_to_cpu(pd->state);
1398
1399 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1400 //printf("\n");
1401 printf(" %3d %08x ", i,
1402 __be32_to_cpu(pd->refnum));
1403 printf("%8lluK ",
1404 (unsigned long long)__be64_to_cpu(pd->config_size)>>1);
1405 for (dl = sb->dlist; dl ; dl = dl->next) {
1406 if (dl->disk.refnum == pd->refnum) {
1407 char *dv = map_dev(dl->major, dl->minor, 0);
1408 if (dv) {
1409 printf("%-15s", dv);
1410 break;
1411 }
1412 }
1413 }
1414 if (!dl)
1415 printf("%15s","");
1416 printf(" %s%s%s%s%s",
1417 (type&2) ? "active":"",
1418 (type&4) ? "Global-Spare":"",
1419 (type&8) ? "spare" : "",
1420 (type&16)? ", foreign" : "",
1421 (type&32)? "pass-through" : "");
1422 if (state & DDF_Failed)
1423 /* This over-rides these three */
1424 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1425 printf("/%s%s%s%s%s%s%s",
1426 (state&1)? "Online": "Offline",
1427 (state&2)? ", Failed": "",
1428 (state&4)? ", Rebuilding": "",
1429 (state&8)? ", in-transition": "",
1430 (state&16)? ", SMART-errors": "",
1431 (state&32)? ", Unrecovered-Read-Errors": "",
1432 (state&64)? ", Missing" : "");
1433 printf("\n");
1434 }
1435 }
1436
1437 static void examine_super_ddf(struct supertype *st, char *homehost)
1438 {
1439 struct ddf_super *sb = st->sb;
1440
1441 printf(" Magic : %08x\n", __be32_to_cpu(sb->anchor.magic));
1442 printf(" Version : %.8s\n", sb->anchor.revision);
1443 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1444 printf("\n");
1445 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1446 printf("\n");
1447 printf(" Seq : %08x\n", __be32_to_cpu(sb->active->seq));
1448 printf(" Redundant hdr : %s\n", sb->secondary.magic == DDF_HEADER_MAGIC
1449 ?"yes" : "no");
1450 examine_vds(sb);
1451 examine_pds(sb);
1452 }
1453
1454 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1455
1456 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1457 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1458
1459 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1460 {
1461 /*
1462 * Figure out the VD number for this supertype.
1463 * Returns DDF_CONTAINER for the container itself,
1464 * and DDF_NOTFOUND on error.
1465 */
1466 struct ddf_super *ddf = st->sb;
1467 struct mdinfo *sra;
1468 char *sub, *end;
1469 unsigned int vcnum;
1470
1471 if (*st->container_devnm == '\0')
1472 return DDF_CONTAINER;
1473
1474 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1475 if (!sra || sra->array.major_version != -1 ||
1476 sra->array.minor_version != -2 ||
1477 !is_subarray(sra->text_version))
1478 return DDF_NOTFOUND;
1479
1480 sub = strchr(sra->text_version + 1, '/');
1481 if (sub != NULL)
1482 vcnum = strtoul(sub + 1, &end, 10);
1483 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1484 vcnum >= __be16_to_cpu(ddf->active->max_vd_entries))
1485 return DDF_NOTFOUND;
1486
1487 return vcnum;
1488 }
1489
1490 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1491 {
1492 /* We just write a generic DDF ARRAY entry
1493 */
1494 struct mdinfo info;
1495 char nbuf[64];
1496 getinfo_super_ddf(st, &info, NULL);
1497 fname_from_uuid(st, &info, nbuf, ':');
1498
1499 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1500 }
1501
1502 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1503 {
1504 /* We just write a generic DDF ARRAY entry
1505 */
1506 struct ddf_super *ddf = st->sb;
1507 struct mdinfo info;
1508 unsigned int i;
1509 char nbuf[64];
1510 getinfo_super_ddf(st, &info, NULL);
1511 fname_from_uuid(st, &info, nbuf, ':');
1512
1513 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
1514 struct virtual_entry *ve = &ddf->virt->entries[i];
1515 struct vcl vcl;
1516 char nbuf1[64];
1517 if (all_ff(ve->guid))
1518 continue;
1519 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1520 ddf->currentconf =&vcl;
1521 uuid_from_super_ddf(st, info.uuid);
1522 fname_from_uuid(st, &info, nbuf1, ':');
1523 printf("ARRAY container=%s member=%d UUID=%s\n",
1524 nbuf+5, i, nbuf1+5);
1525 }
1526 }
1527
1528 static void export_examine_super_ddf(struct supertype *st)
1529 {
1530 struct mdinfo info;
1531 char nbuf[64];
1532 getinfo_super_ddf(st, &info, NULL);
1533 fname_from_uuid(st, &info, nbuf, ':');
1534 printf("MD_METADATA=ddf\n");
1535 printf("MD_LEVEL=container\n");
1536 printf("MD_UUID=%s\n", nbuf+5);
1537 }
1538
1539 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1540 {
1541 void *buf;
1542 unsigned long long dsize, offset;
1543 int bytes;
1544 struct ddf_header *ddf;
1545 int written = 0;
1546
1547 /* The meta consists of an anchor, a primary, and a secondary.
1548 * This all lives at the end of the device.
1549 * So it is easiest to find the earliest of primary and
1550 * secondary, and copy everything from there.
1551 *
1552 * Anchor is 512 from end It contains primary_lba and secondary_lba
1553 * we choose one of those
1554 */
1555
1556 if (posix_memalign(&buf, 4096, 4096) != 0)
1557 return 1;
1558
1559 if (!get_dev_size(from, NULL, &dsize))
1560 goto err;
1561
1562 if (lseek64(from, dsize-512, 0) < 0)
1563 goto err;
1564 if (read(from, buf, 512) != 512)
1565 goto err;
1566 ddf = buf;
1567 if (ddf->magic != DDF_HEADER_MAGIC ||
1568 calc_crc(ddf, 512) != ddf->crc ||
1569 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1570 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1571 goto err;
1572
1573 offset = dsize - 512;
1574 if ((__be64_to_cpu(ddf->primary_lba) << 9) < offset)
1575 offset = __be64_to_cpu(ddf->primary_lba) << 9;
1576 if ((__be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1577 offset = __be64_to_cpu(ddf->secondary_lba) << 9;
1578
1579 bytes = dsize - offset;
1580
1581 if (lseek64(from, offset, 0) < 0 ||
1582 lseek64(to, offset, 0) < 0)
1583 goto err;
1584 while (written < bytes) {
1585 int n = bytes - written;
1586 if (n > 4096)
1587 n = 4096;
1588 if (read(from, buf, n) != n)
1589 goto err;
1590 if (write(to, buf, n) != n)
1591 goto err;
1592 written += n;
1593 }
1594 free(buf);
1595 return 0;
1596 err:
1597 free(buf);
1598 return 1;
1599 }
1600
1601 static void detail_super_ddf(struct supertype *st, char *homehost)
1602 {
1603 /* FIXME later
1604 * Could print DDF GUID
1605 * Need to find which array
1606 * If whole, briefly list all arrays
1607 * If one, give name
1608 */
1609 }
1610
1611 static void brief_detail_super_ddf(struct supertype *st)
1612 {
1613 struct mdinfo info;
1614 char nbuf[64];
1615 struct ddf_super *ddf = st->sb;
1616 unsigned int vcnum = get_vd_num_of_subarray(st);
1617 if (vcnum == DDF_CONTAINER)
1618 uuid_from_super_ddf(st, info.uuid);
1619 else if (vcnum == DDF_NOTFOUND)
1620 return;
1621 else
1622 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1623 fname_from_uuid(st, &info, nbuf,':');
1624 printf(" UUID=%s", nbuf + 5);
1625 }
1626 #endif
1627
1628 static int match_home_ddf(struct supertype *st, char *homehost)
1629 {
1630 /* It matches 'this' host if the controller is a
1631 * Linux-MD controller with vendor_data matching
1632 * the hostname
1633 */
1634 struct ddf_super *ddf = st->sb;
1635 unsigned int len;
1636
1637 if (!homehost)
1638 return 0;
1639 len = strlen(homehost);
1640
1641 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1642 len < sizeof(ddf->controller.vendor_data) &&
1643 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1644 ddf->controller.vendor_data[len] == 0);
1645 }
1646
1647 #ifndef MDASSEMBLE
1648 static int find_index_in_bvd(const struct ddf_super *ddf,
1649 const struct vd_config *conf, unsigned int n,
1650 unsigned int *n_bvd)
1651 {
1652 /*
1653 * Find the index of the n-th valid physical disk in this BVD
1654 */
1655 unsigned int i, j;
1656 for (i = 0, j = 0; i < ddf->mppe &&
1657 j < __be16_to_cpu(conf->prim_elmnt_count); i++) {
1658 if (conf->phys_refnum[i] != 0xffffffff) {
1659 if (n == j) {
1660 *n_bvd = i;
1661 return 1;
1662 }
1663 j++;
1664 }
1665 }
1666 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1667 __func__, n, __be16_to_cpu(conf->prim_elmnt_count));
1668 return 0;
1669 }
1670
1671 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1672 unsigned int n,
1673 unsigned int *n_bvd, struct vcl **vcl)
1674 {
1675 struct vcl *v;
1676
1677 for (v = ddf->conflist; v; v = v->next) {
1678 unsigned int nsec, ibvd;
1679 struct vd_config *conf;
1680 if (inst != v->vcnum)
1681 continue;
1682 conf = &v->conf;
1683 if (conf->sec_elmnt_count == 1) {
1684 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1685 *vcl = v;
1686 return conf;
1687 } else
1688 goto bad;
1689 }
1690 if (v->other_bvds == NULL) {
1691 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1692 __func__, conf->sec_elmnt_count);
1693 goto bad;
1694 }
1695 nsec = n / __be16_to_cpu(conf->prim_elmnt_count);
1696 if (conf->sec_elmnt_seq != nsec) {
1697 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1698 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1699 == nsec)
1700 break;
1701 }
1702 if (ibvd == conf->sec_elmnt_count)
1703 goto bad;
1704 conf = v->other_bvds[ibvd-1];
1705 }
1706 if (!find_index_in_bvd(ddf, conf,
1707 n - nsec*conf->sec_elmnt_count, n_bvd))
1708 goto bad;
1709 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1710 , __func__, n, *n_bvd, ibvd-1, inst);
1711 *vcl = v;
1712 return conf;
1713 }
1714 bad:
1715 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1716 return NULL;
1717 }
1718 #endif
1719
1720 static int find_phys(const struct ddf_super *ddf, __u32 phys_refnum)
1721 {
1722 /* Find the entry in phys_disk which has the given refnum
1723 * and return it's index
1724 */
1725 unsigned int i;
1726 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++)
1727 if (ddf->phys->entries[i].refnum == phys_refnum)
1728 return i;
1729 return -1;
1730 }
1731
1732 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1733 {
1734 char buf[20];
1735 struct sha1_ctx ctx;
1736 sha1_init_ctx(&ctx);
1737 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1738 sha1_finish_ctx(&ctx, buf);
1739 memcpy(uuid, buf, 4*4);
1740 }
1741
1742 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1743 {
1744 /* The uuid returned here is used for:
1745 * uuid to put into bitmap file (Create, Grow)
1746 * uuid for backup header when saving critical section (Grow)
1747 * comparing uuids when re-adding a device into an array
1748 * In these cases the uuid required is that of the data-array,
1749 * not the device-set.
1750 * uuid to recognise same set when adding a missing device back
1751 * to an array. This is a uuid for the device-set.
1752 *
1753 * For each of these we can make do with a truncated
1754 * or hashed uuid rather than the original, as long as
1755 * everyone agrees.
1756 * In the case of SVD we assume the BVD is of interest,
1757 * though that might be the case if a bitmap were made for
1758 * a mirrored SVD - worry about that later.
1759 * So we need to find the VD configuration record for the
1760 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1761 * The first 16 bytes of the sha1 of these is used.
1762 */
1763 struct ddf_super *ddf = st->sb;
1764 struct vcl *vcl = ddf->currentconf;
1765 char *guid;
1766
1767 if (vcl)
1768 guid = vcl->conf.guid;
1769 else
1770 guid = ddf->anchor.guid;
1771 uuid_from_ddf_guid(guid, uuid);
1772 }
1773
1774 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1775
1776 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1777 {
1778 struct ddf_super *ddf = st->sb;
1779 int map_disks = info->array.raid_disks;
1780 __u32 *cptr;
1781
1782 if (ddf->currentconf) {
1783 getinfo_super_ddf_bvd(st, info, map);
1784 return;
1785 }
1786 memset(info, 0, sizeof(*info));
1787
1788 info->array.raid_disks = __be16_to_cpu(ddf->phys->used_pdes);
1789 info->array.level = LEVEL_CONTAINER;
1790 info->array.layout = 0;
1791 info->array.md_minor = -1;
1792 cptr = (__u32 *)(ddf->anchor.guid + 16);
1793 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1794
1795 info->array.utime = 0;
1796 info->array.chunk_size = 0;
1797 info->container_enough = 1;
1798
1799 info->disk.major = 0;
1800 info->disk.minor = 0;
1801 if (ddf->dlist) {
1802 info->disk.number = __be32_to_cpu(ddf->dlist->disk.refnum);
1803 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1804
1805 info->data_offset = __be64_to_cpu(ddf->phys->
1806 entries[info->disk.raid_disk].
1807 config_size);
1808 info->component_size = ddf->dlist->size - info->data_offset;
1809 } else {
1810 info->disk.number = -1;
1811 info->disk.raid_disk = -1;
1812 // info->disk.raid_disk = find refnum in the table and use index;
1813 }
1814 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1815
1816 info->recovery_start = MaxSector;
1817 info->reshape_active = 0;
1818 info->recovery_blocked = 0;
1819 info->name[0] = 0;
1820
1821 info->array.major_version = -1;
1822 info->array.minor_version = -2;
1823 strcpy(info->text_version, "ddf");
1824 info->safe_mode_delay = 0;
1825
1826 uuid_from_super_ddf(st, info->uuid);
1827
1828 if (map) {
1829 int i;
1830 for (i = 0 ; i < map_disks; i++) {
1831 if (i < info->array.raid_disks &&
1832 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1833 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1834 map[i] = 1;
1835 else
1836 map[i] = 0;
1837 }
1838 }
1839 }
1840
1841 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1842 {
1843 struct ddf_super *ddf = st->sb;
1844 struct vcl *vc = ddf->currentconf;
1845 int cd = ddf->currentdev;
1846 int n_prim;
1847 int j;
1848 struct dl *dl;
1849 int map_disks = info->array.raid_disks;
1850 __u32 *cptr;
1851 struct vd_config *conf;
1852
1853 memset(info, 0, sizeof(*info));
1854 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1855 return;
1856 info->array.md_minor = -1;
1857 cptr = (__u32 *)(vc->conf.guid + 16);
1858 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1859 info->array.utime = DECADE + __be32_to_cpu(vc->conf.timestamp);
1860 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1861 info->custom_array_size = 0;
1862
1863 conf = &vc->conf;
1864 n_prim = __be16_to_cpu(conf->prim_elmnt_count);
1865 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1866 int ibvd = cd / n_prim - 1;
1867 cd %= n_prim;
1868 conf = vc->other_bvds[ibvd];
1869 }
1870
1871 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1872 info->data_offset =
1873 __be64_to_cpu(LBA_OFFSET(ddf, &vc->conf)[cd]);
1874 if (vc->block_sizes)
1875 info->component_size = vc->block_sizes[cd];
1876 else
1877 info->component_size = __be64_to_cpu(vc->conf.blocks);
1878 }
1879
1880 for (dl = ddf->dlist; dl ; dl = dl->next)
1881 if (dl->raiddisk == ddf->currentdev)
1882 break;
1883
1884 info->disk.major = 0;
1885 info->disk.minor = 0;
1886 info->disk.state = 0;
1887 if (dl) {
1888 info->disk.major = dl->major;
1889 info->disk.minor = dl->minor;
1890 info->disk.raid_disk = dl->raiddisk;
1891 info->disk.number = dl->pdnum;
1892 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1893 }
1894
1895 info->container_member = ddf->currentconf->vcnum;
1896
1897 info->recovery_start = MaxSector;
1898 info->resync_start = 0;
1899 info->reshape_active = 0;
1900 info->recovery_blocked = 0;
1901 if (!(ddf->virt->entries[info->container_member].state
1902 & DDF_state_inconsistent) &&
1903 (ddf->virt->entries[info->container_member].init_state
1904 & DDF_initstate_mask)
1905 == DDF_init_full)
1906 info->resync_start = MaxSector;
1907
1908 uuid_from_super_ddf(st, info->uuid);
1909
1910 info->array.major_version = -1;
1911 info->array.minor_version = -2;
1912 sprintf(info->text_version, "/%s/%d",
1913 st->container_devnm,
1914 info->container_member);
1915 info->safe_mode_delay = 200;
1916
1917 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1918 info->name[16]=0;
1919 for(j=0; j<16; j++)
1920 if (info->name[j] == ' ')
1921 info->name[j] = 0;
1922
1923 if (map)
1924 for (j = 0; j < map_disks; j++) {
1925 map[j] = 0;
1926 if (j < info->array.raid_disks) {
1927 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1928 if (i >= 0 &&
1929 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1930 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1931 map[i] = 1;
1932 }
1933 }
1934 }
1935
1936 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
1937 char *update,
1938 char *devname, int verbose,
1939 int uuid_set, char *homehost)
1940 {
1941 /* For 'assemble' and 'force' we need to return non-zero if any
1942 * change was made. For others, the return value is ignored.
1943 * Update options are:
1944 * force-one : This device looks a bit old but needs to be included,
1945 * update age info appropriately.
1946 * assemble: clear any 'faulty' flag to allow this device to
1947 * be assembled.
1948 * force-array: Array is degraded but being forced, mark it clean
1949 * if that will be needed to assemble it.
1950 *
1951 * newdev: not used ????
1952 * grow: Array has gained a new device - this is currently for
1953 * linear only
1954 * resync: mark as dirty so a resync will happen.
1955 * uuid: Change the uuid of the array to match what is given
1956 * homehost: update the recorded homehost
1957 * name: update the name - preserving the homehost
1958 * _reshape_progress: record new reshape_progress position.
1959 *
1960 * Following are not relevant for this version:
1961 * sparc2.2 : update from old dodgey metadata
1962 * super-minor: change the preferred_minor number
1963 * summaries: update redundant counters.
1964 */
1965 int rv = 0;
1966 // struct ddf_super *ddf = st->sb;
1967 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
1968 // struct virtual_entry *ve = find_ve(ddf);
1969
1970 /* we don't need to handle "force-*" or "assemble" as
1971 * there is no need to 'trick' the kernel. We the metadata is
1972 * first updated to activate the array, all the implied modifications
1973 * will just happen.
1974 */
1975
1976 if (strcmp(update, "grow") == 0) {
1977 /* FIXME */
1978 } else if (strcmp(update, "resync") == 0) {
1979 // info->resync_checkpoint = 0;
1980 } else if (strcmp(update, "homehost") == 0) {
1981 /* homehost is stored in controller->vendor_data,
1982 * or it is when we are the vendor
1983 */
1984 // if (info->vendor_is_local)
1985 // strcpy(ddf->controller.vendor_data, homehost);
1986 rv = -1;
1987 } else if (strcmp(update, "name") == 0) {
1988 /* name is stored in virtual_entry->name */
1989 // memset(ve->name, ' ', 16);
1990 // strncpy(ve->name, info->name, 16);
1991 rv = -1;
1992 } else if (strcmp(update, "_reshape_progress") == 0) {
1993 /* We don't support reshape yet */
1994 } else if (strcmp(update, "assemble") == 0 ) {
1995 /* Do nothing, just succeed */
1996 rv = 0;
1997 } else
1998 rv = -1;
1999
2000 // update_all_csum(ddf);
2001
2002 return rv;
2003 }
2004
2005 static void make_header_guid(char *guid)
2006 {
2007 __u32 stamp;
2008 /* Create a DDF Header of Virtual Disk GUID */
2009
2010 /* 24 bytes of fiction required.
2011 * first 8 are a 'vendor-id' - "Linux-MD"
2012 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2013 * Remaining 8 random number plus timestamp
2014 */
2015 memcpy(guid, T10, sizeof(T10));
2016 stamp = __cpu_to_be32(0xdeadbeef);
2017 memcpy(guid+8, &stamp, 4);
2018 stamp = __cpu_to_be32(0);
2019 memcpy(guid+12, &stamp, 4);
2020 stamp = __cpu_to_be32(time(0) - DECADE);
2021 memcpy(guid+16, &stamp, 4);
2022 stamp = random32();
2023 memcpy(guid+20, &stamp, 4);
2024 }
2025
2026 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2027 {
2028 unsigned int i;
2029 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
2030 if (all_ff(ddf->virt->entries[i].guid))
2031 return i;
2032 }
2033 return DDF_NOTFOUND;
2034 }
2035
2036 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2037 const char *name)
2038 {
2039 unsigned int i;
2040 if (name == NULL)
2041 return DDF_NOTFOUND;
2042 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
2043 if (all_ff(ddf->virt->entries[i].guid))
2044 continue;
2045 if (!strncmp(name, ddf->virt->entries[i].name,
2046 sizeof(ddf->virt->entries[i].name)))
2047 return i;
2048 }
2049 return DDF_NOTFOUND;
2050 }
2051
2052 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2053 const char *guid)
2054 {
2055 unsigned int i;
2056 if (guid == NULL || all_ff(guid))
2057 return DDF_NOTFOUND;
2058 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++)
2059 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2060 return i;
2061 return DDF_NOTFOUND;
2062 }
2063
2064 static int init_super_ddf_bvd(struct supertype *st,
2065 mdu_array_info_t *info,
2066 unsigned long long size,
2067 char *name, char *homehost,
2068 int *uuid, unsigned long long data_offset);
2069
2070 static int init_super_ddf(struct supertype *st,
2071 mdu_array_info_t *info,
2072 unsigned long long size, char *name, char *homehost,
2073 int *uuid, unsigned long long data_offset)
2074 {
2075 /* This is primarily called by Create when creating a new array.
2076 * We will then get add_to_super called for each component, and then
2077 * write_init_super called to write it out to each device.
2078 * For DDF, Create can create on fresh devices or on a pre-existing
2079 * array.
2080 * To create on a pre-existing array a different method will be called.
2081 * This one is just for fresh drives.
2082 *
2083 * We need to create the entire 'ddf' structure which includes:
2084 * DDF headers - these are easy.
2085 * Controller data - a Sector describing this controller .. not that
2086 * this is a controller exactly.
2087 * Physical Disk Record - one entry per device, so
2088 * leave plenty of space.
2089 * Virtual Disk Records - again, just leave plenty of space.
2090 * This just lists VDs, doesn't give details
2091 * Config records - describes the VDs that use this disk
2092 * DiskData - describes 'this' device.
2093 * BadBlockManagement - empty
2094 * Diag Space - empty
2095 * Vendor Logs - Could we put bitmaps here?
2096 *
2097 */
2098 struct ddf_super *ddf;
2099 char hostname[17];
2100 int hostlen;
2101 int max_phys_disks, max_virt_disks;
2102 unsigned long long sector;
2103 int clen;
2104 int i;
2105 int pdsize, vdsize;
2106 struct phys_disk *pd;
2107 struct virtual_disk *vd;
2108
2109 if (data_offset != INVALID_SECTORS) {
2110 pr_err("data-offset not supported by DDF\n");
2111 return 0;
2112 }
2113
2114 if (st->sb)
2115 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2116 data_offset);
2117
2118 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2119 pr_err("%s could not allocate superblock\n", __func__);
2120 return 0;
2121 }
2122 memset(ddf, 0, sizeof(*ddf));
2123 ddf->dlist = NULL; /* no physical disks yet */
2124 ddf->conflist = NULL; /* No virtual disks yet */
2125 st->sb = ddf;
2126
2127 if (info == NULL) {
2128 /* zeroing superblock */
2129 return 0;
2130 }
2131
2132 /* At least 32MB *must* be reserved for the ddf. So let's just
2133 * start 32MB from the end, and put the primary header there.
2134 * Don't do secondary for now.
2135 * We don't know exactly where that will be yet as it could be
2136 * different on each device. To just set up the lengths.
2137 *
2138 */
2139
2140 ddf->anchor.magic = DDF_HEADER_MAGIC;
2141 make_header_guid(ddf->anchor.guid);
2142
2143 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2144 ddf->anchor.seq = __cpu_to_be32(1);
2145 ddf->anchor.timestamp = __cpu_to_be32(time(0) - DECADE);
2146 ddf->anchor.openflag = 0xFF;
2147 ddf->anchor.foreignflag = 0;
2148 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2149 ddf->anchor.pad0 = 0xff;
2150 memset(ddf->anchor.pad1, 0xff, 12);
2151 memset(ddf->anchor.header_ext, 0xff, 32);
2152 ddf->anchor.primary_lba = ~(__u64)0;
2153 ddf->anchor.secondary_lba = ~(__u64)0;
2154 ddf->anchor.type = DDF_HEADER_ANCHOR;
2155 memset(ddf->anchor.pad2, 0xff, 3);
2156 ddf->anchor.workspace_len = __cpu_to_be32(32768); /* Must be reserved */
2157 ddf->anchor.workspace_lba = ~(__u64)0; /* Put this at bottom
2158 of 32M reserved.. */
2159 max_phys_disks = 1023; /* Should be enough */
2160 ddf->anchor.max_pd_entries = __cpu_to_be16(max_phys_disks);
2161 max_virt_disks = 255;
2162 ddf->anchor.max_vd_entries = __cpu_to_be16(max_virt_disks); /* ?? */
2163 ddf->anchor.max_partitions = __cpu_to_be16(64); /* ?? */
2164 ddf->max_part = 64;
2165 ddf->mppe = 256;
2166 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2167 ddf->anchor.config_record_len = __cpu_to_be16(ddf->conf_rec_len);
2168 ddf->anchor.max_primary_element_entries = __cpu_to_be16(ddf->mppe);
2169 memset(ddf->anchor.pad3, 0xff, 54);
2170 /* controller sections is one sector long immediately
2171 * after the ddf header */
2172 sector = 1;
2173 ddf->anchor.controller_section_offset = __cpu_to_be32(sector);
2174 ddf->anchor.controller_section_length = __cpu_to_be32(1);
2175 sector += 1;
2176
2177 /* phys is 8 sectors after that */
2178 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2179 sizeof(struct phys_disk_entry)*max_phys_disks,
2180 512);
2181 switch(pdsize/512) {
2182 case 2: case 8: case 32: case 128: case 512: break;
2183 default: abort();
2184 }
2185 ddf->anchor.phys_section_offset = __cpu_to_be32(sector);
2186 ddf->anchor.phys_section_length =
2187 __cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2188 sector += pdsize/512;
2189
2190 /* virt is another 32 sectors */
2191 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2192 sizeof(struct virtual_entry) * max_virt_disks,
2193 512);
2194 switch(vdsize/512) {
2195 case 2: case 8: case 32: case 128: case 512: break;
2196 default: abort();
2197 }
2198 ddf->anchor.virt_section_offset = __cpu_to_be32(sector);
2199 ddf->anchor.virt_section_length =
2200 __cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2201 sector += vdsize/512;
2202
2203 clen = ddf->conf_rec_len * (ddf->max_part+1);
2204 ddf->anchor.config_section_offset = __cpu_to_be32(sector);
2205 ddf->anchor.config_section_length = __cpu_to_be32(clen);
2206 sector += clen;
2207
2208 ddf->anchor.data_section_offset = __cpu_to_be32(sector);
2209 ddf->anchor.data_section_length = __cpu_to_be32(1);
2210 sector += 1;
2211
2212 ddf->anchor.bbm_section_length = __cpu_to_be32(0);
2213 ddf->anchor.bbm_section_offset = __cpu_to_be32(0xFFFFFFFF);
2214 ddf->anchor.diag_space_length = __cpu_to_be32(0);
2215 ddf->anchor.diag_space_offset = __cpu_to_be32(0xFFFFFFFF);
2216 ddf->anchor.vendor_length = __cpu_to_be32(0);
2217 ddf->anchor.vendor_offset = __cpu_to_be32(0xFFFFFFFF);
2218
2219 memset(ddf->anchor.pad4, 0xff, 256);
2220
2221 memcpy(&ddf->primary, &ddf->anchor, 512);
2222 memcpy(&ddf->secondary, &ddf->anchor, 512);
2223
2224 ddf->primary.openflag = 1; /* I guess.. */
2225 ddf->primary.type = DDF_HEADER_PRIMARY;
2226
2227 ddf->secondary.openflag = 1; /* I guess.. */
2228 ddf->secondary.type = DDF_HEADER_SECONDARY;
2229
2230 ddf->active = &ddf->primary;
2231
2232 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2233
2234 /* 24 more bytes of fiction required.
2235 * first 8 are a 'vendor-id' - "Linux-MD"
2236 * Remaining 16 are serial number.... maybe a hostname would do?
2237 */
2238 memcpy(ddf->controller.guid, T10, sizeof(T10));
2239 gethostname(hostname, sizeof(hostname));
2240 hostname[sizeof(hostname) - 1] = 0;
2241 hostlen = strlen(hostname);
2242 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2243 for (i = strlen(T10) ; i+hostlen < 24; i++)
2244 ddf->controller.guid[i] = ' ';
2245
2246 ddf->controller.type.vendor_id = __cpu_to_be16(0xDEAD);
2247 ddf->controller.type.device_id = __cpu_to_be16(0xBEEF);
2248 ddf->controller.type.sub_vendor_id = 0;
2249 ddf->controller.type.sub_device_id = 0;
2250 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2251 memset(ddf->controller.pad, 0xff, 8);
2252 memset(ddf->controller.vendor_data, 0xff, 448);
2253 if (homehost && strlen(homehost) < 440)
2254 strcpy((char*)ddf->controller.vendor_data, homehost);
2255
2256 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2257 pr_err("%s could not allocate pd\n", __func__);
2258 return 0;
2259 }
2260 ddf->phys = pd;
2261 ddf->pdsize = pdsize;
2262
2263 memset(pd, 0xff, pdsize);
2264 memset(pd, 0, sizeof(*pd));
2265 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2266 pd->used_pdes = __cpu_to_be16(0);
2267 pd->max_pdes = __cpu_to_be16(max_phys_disks);
2268 memset(pd->pad, 0xff, 52);
2269 for (i = 0; i < max_phys_disks; i++)
2270 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2271
2272 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2273 pr_err("%s could not allocate vd\n", __func__);
2274 return 0;
2275 }
2276 ddf->virt = vd;
2277 ddf->vdsize = vdsize;
2278 memset(vd, 0, vdsize);
2279 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2280 vd->populated_vdes = __cpu_to_be16(0);
2281 vd->max_vdes = __cpu_to_be16(max_virt_disks);
2282 memset(vd->pad, 0xff, 52);
2283
2284 for (i=0; i<max_virt_disks; i++)
2285 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2286
2287 st->sb = ddf;
2288 ddf_set_updates_pending(ddf);
2289 return 1;
2290 }
2291
2292 static int chunk_to_shift(int chunksize)
2293 {
2294 return ffs(chunksize/512)-1;
2295 }
2296
2297 #ifndef MDASSEMBLE
2298 struct extent {
2299 unsigned long long start, size;
2300 };
2301 static int cmp_extent(const void *av, const void *bv)
2302 {
2303 const struct extent *a = av;
2304 const struct extent *b = bv;
2305 if (a->start < b->start)
2306 return -1;
2307 if (a->start > b->start)
2308 return 1;
2309 return 0;
2310 }
2311
2312 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2313 {
2314 /* find a list of used extents on the give physical device
2315 * (dnum) of the given ddf.
2316 * Return a malloced array of 'struct extent'
2317
2318 * FIXME ignore DDF_Legacy devices?
2319
2320 */
2321 struct extent *rv;
2322 int n = 0;
2323 unsigned int i;
2324
2325 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2326
2327 for (i = 0; i < ddf->max_part; i++) {
2328 const struct vd_config *bvd;
2329 unsigned int ibvd;
2330 struct vcl *v = dl->vlist[i];
2331 if (v == NULL ||
2332 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2333 &bvd, &ibvd) == DDF_NOTFOUND)
2334 continue;
2335 rv[n].start = __be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2336 rv[n].size = __be64_to_cpu(bvd->blocks);
2337 n++;
2338 }
2339 qsort(rv, n, sizeof(*rv), cmp_extent);
2340
2341 rv[n].start = __be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2342 rv[n].size = 0;
2343 return rv;
2344 }
2345 #endif
2346
2347 static int init_super_ddf_bvd(struct supertype *st,
2348 mdu_array_info_t *info,
2349 unsigned long long size,
2350 char *name, char *homehost,
2351 int *uuid, unsigned long long data_offset)
2352 {
2353 /* We are creating a BVD inside a pre-existing container.
2354 * so st->sb is already set.
2355 * We need to create a new vd_config and a new virtual_entry
2356 */
2357 struct ddf_super *ddf = st->sb;
2358 unsigned int venum, i;
2359 struct virtual_entry *ve;
2360 struct vcl *vcl;
2361 struct vd_config *vc;
2362
2363 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2364 pr_err("This ddf already has an array called %s\n", name);
2365 return 0;
2366 }
2367 venum = find_unused_vde(ddf);
2368 if (venum == DDF_NOTFOUND) {
2369 pr_err("Cannot find spare slot for virtual disk\n");
2370 return 0;
2371 }
2372 ve = &ddf->virt->entries[venum];
2373
2374 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2375 * timestamp, random number
2376 */
2377 make_header_guid(ve->guid);
2378 ve->unit = __cpu_to_be16(info->md_minor);
2379 ve->pad0 = 0xFFFF;
2380 ve->guid_crc = crc32(0, (unsigned char*)ddf->anchor.guid, DDF_GUID_LEN);
2381 ve->type = 0;
2382 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2383 if (info->state & 1) /* clean */
2384 ve->init_state = DDF_init_full;
2385 else
2386 ve->init_state = DDF_init_not;
2387
2388 memset(ve->pad1, 0xff, 14);
2389 memset(ve->name, ' ', 16);
2390 if (name)
2391 strncpy(ve->name, name, 16);
2392 ddf->virt->populated_vdes =
2393 __cpu_to_be16(__be16_to_cpu(ddf->virt->populated_vdes)+1);
2394
2395 /* Now create a new vd_config */
2396 if (posix_memalign((void**)&vcl, 512,
2397 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2398 pr_err("%s could not allocate vd_config\n", __func__);
2399 return 0;
2400 }
2401 vcl->vcnum = venum;
2402 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2403 vc = &vcl->conf;
2404
2405 vc->magic = DDF_VD_CONF_MAGIC;
2406 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2407 vc->timestamp = __cpu_to_be32(time(0)-DECADE);
2408 vc->seqnum = __cpu_to_be32(1);
2409 memset(vc->pad0, 0xff, 24);
2410 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2411 if (layout_md2ddf(info, vc) == -1 ||
2412 __be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2413 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2414 __func__, info->level, info->layout, info->raid_disks);
2415 free(vcl);
2416 return 0;
2417 }
2418 vc->sec_elmnt_seq = 0;
2419 if (alloc_other_bvds(ddf, vcl) != 0) {
2420 pr_err("%s could not allocate other bvds\n",
2421 __func__);
2422 free(vcl);
2423 return 0;
2424 }
2425 vc->blocks = __cpu_to_be64(info->size * 2);
2426 vc->array_blocks = __cpu_to_be64(
2427 calc_array_size(info->level, info->raid_disks, info->layout,
2428 info->chunk_size, info->size*2));
2429 memset(vc->pad1, 0xff, 8);
2430 vc->spare_refs[0] = 0xffffffff;
2431 vc->spare_refs[1] = 0xffffffff;
2432 vc->spare_refs[2] = 0xffffffff;
2433 vc->spare_refs[3] = 0xffffffff;
2434 vc->spare_refs[4] = 0xffffffff;
2435 vc->spare_refs[5] = 0xffffffff;
2436 vc->spare_refs[6] = 0xffffffff;
2437 vc->spare_refs[7] = 0xffffffff;
2438 memset(vc->cache_pol, 0, 8);
2439 vc->bg_rate = 0x80;
2440 memset(vc->pad2, 0xff, 3);
2441 memset(vc->pad3, 0xff, 52);
2442 memset(vc->pad4, 0xff, 192);
2443 memset(vc->v0, 0xff, 32);
2444 memset(vc->v1, 0xff, 32);
2445 memset(vc->v2, 0xff, 16);
2446 memset(vc->v3, 0xff, 16);
2447 memset(vc->vendor, 0xff, 32);
2448
2449 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2450 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2451
2452 for (i = 1; i < vc->sec_elmnt_count; i++) {
2453 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2454 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2455 }
2456
2457 vcl->next = ddf->conflist;
2458 ddf->conflist = vcl;
2459 ddf->currentconf = vcl;
2460 ddf_set_updates_pending(ddf);
2461 return 1;
2462 }
2463
2464 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2465
2466 #ifndef MDASSEMBLE
2467 static void add_to_super_ddf_bvd(struct supertype *st,
2468 mdu_disk_info_t *dk, int fd, char *devname)
2469 {
2470 /* fd and devname identify a device with-in the ddf container (st).
2471 * dk identifies a location in the new BVD.
2472 * We need to find suitable free space in that device and update
2473 * the phys_refnum and lba_offset for the newly created vd_config.
2474 * We might also want to update the type in the phys_disk
2475 * section.
2476 *
2477 * Alternately: fd == -1 and we have already chosen which device to
2478 * use and recorded in dlist->raid_disk;
2479 */
2480 struct dl *dl;
2481 struct ddf_super *ddf = st->sb;
2482 struct vd_config *vc;
2483 unsigned int i;
2484 unsigned long long blocks, pos, esize;
2485 struct extent *ex;
2486 unsigned int raid_disk = dk->raid_disk;
2487
2488 if (fd == -1) {
2489 for (dl = ddf->dlist; dl ; dl = dl->next)
2490 if (dl->raiddisk == dk->raid_disk)
2491 break;
2492 } else {
2493 for (dl = ddf->dlist; dl ; dl = dl->next)
2494 if (dl->major == dk->major &&
2495 dl->minor == dk->minor)
2496 break;
2497 }
2498 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2499 return;
2500
2501 vc = &ddf->currentconf->conf;
2502 if (vc->sec_elmnt_count > 1) {
2503 unsigned int n = __be16_to_cpu(vc->prim_elmnt_count);
2504 if (raid_disk >= n)
2505 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2506 raid_disk %= n;
2507 }
2508
2509 ex = get_extents(ddf, dl);
2510 if (!ex)
2511 return;
2512
2513 i = 0; pos = 0;
2514 blocks = __be64_to_cpu(vc->blocks);
2515 if (ddf->currentconf->block_sizes)
2516 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2517
2518 do {
2519 esize = ex[i].start - pos;
2520 if (esize >= blocks)
2521 break;
2522 pos = ex[i].start + ex[i].size;
2523 i++;
2524 } while (ex[i-1].size);
2525
2526 free(ex);
2527 if (esize < blocks)
2528 return;
2529
2530 ddf->currentdev = dk->raid_disk;
2531 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2532 LBA_OFFSET(ddf, vc)[raid_disk] = __cpu_to_be64(pos);
2533
2534 for (i = 0; i < ddf->max_part ; i++)
2535 if (dl->vlist[i] == NULL)
2536 break;
2537 if (i == ddf->max_part)
2538 return;
2539 dl->vlist[i] = ddf->currentconf;
2540
2541 if (fd >= 0)
2542 dl->fd = fd;
2543 if (devname)
2544 dl->devname = devname;
2545
2546 /* Check if we can mark array as optimal yet */
2547 i = ddf->currentconf->vcnum;
2548 ddf->virt->entries[i].state =
2549 (ddf->virt->entries[i].state & ~DDF_state_mask)
2550 | get_svd_state(ddf, ddf->currentconf);
2551 ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
2552 ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
2553 ddf_set_updates_pending(ddf);
2554 }
2555
2556 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2557 {
2558 unsigned int i;
2559 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++) {
2560 if (all_ff(ddf->phys->entries[i].guid))
2561 return i;
2562 }
2563 return DDF_NOTFOUND;
2564 }
2565
2566 /* add a device to a container, either while creating it or while
2567 * expanding a pre-existing container
2568 */
2569 static int add_to_super_ddf(struct supertype *st,
2570 mdu_disk_info_t *dk, int fd, char *devname,
2571 unsigned long long data_offset)
2572 {
2573 struct ddf_super *ddf = st->sb;
2574 struct dl *dd;
2575 time_t now;
2576 struct tm *tm;
2577 unsigned long long size;
2578 struct phys_disk_entry *pde;
2579 unsigned int n, i;
2580 struct stat stb;
2581 __u32 *tptr;
2582
2583 if (ddf->currentconf) {
2584 add_to_super_ddf_bvd(st, dk, fd, devname);
2585 return 0;
2586 }
2587
2588 /* This is device numbered dk->number. We need to create
2589 * a phys_disk entry and a more detailed disk_data entry.
2590 */
2591 fstat(fd, &stb);
2592 n = find_unused_pde(ddf);
2593 if (n == DDF_NOTFOUND) {
2594 pr_err("%s: No free slot in array, cannot add disk\n",
2595 __func__);
2596 return 1;
2597 }
2598 pde = &ddf->phys->entries[n];
2599 get_dev_size(fd, NULL, &size);
2600 if (size <= 32*1024*1024) {
2601 pr_err("%s: device size must be at least 32MB\n",
2602 __func__);
2603 return 1;
2604 }
2605 size >>= 9;
2606
2607 if (posix_memalign((void**)&dd, 512,
2608 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2609 pr_err("%s could allocate buffer for new disk, aborting\n",
2610 __func__);
2611 return 1;
2612 }
2613 dd->major = major(stb.st_rdev);
2614 dd->minor = minor(stb.st_rdev);
2615 dd->devname = devname;
2616 dd->fd = fd;
2617 dd->spare = NULL;
2618
2619 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2620 now = time(0);
2621 tm = localtime(&now);
2622 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2623 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2624 tptr = (__u32 *)(dd->disk.guid + 16);
2625 *tptr++ = random32();
2626 *tptr = random32();
2627
2628 do {
2629 /* Cannot be bothered finding a CRC of some irrelevant details*/
2630 dd->disk.refnum = random32();
2631 for (i = __be16_to_cpu(ddf->active->max_pd_entries);
2632 i > 0; i--)
2633 if (ddf->phys->entries[i-1].refnum == dd->disk.refnum)
2634 break;
2635 } while (i > 0);
2636
2637 dd->disk.forced_ref = 1;
2638 dd->disk.forced_guid = 1;
2639 memset(dd->disk.vendor, ' ', 32);
2640 memcpy(dd->disk.vendor, "Linux", 5);
2641 memset(dd->disk.pad, 0xff, 442);
2642 for (i = 0; i < ddf->max_part ; i++)
2643 dd->vlist[i] = NULL;
2644
2645 dd->pdnum = n;
2646
2647 if (st->update_tail) {
2648 int len = (sizeof(struct phys_disk) +
2649 sizeof(struct phys_disk_entry));
2650 struct phys_disk *pd;
2651
2652 pd = xmalloc(len);
2653 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2654 pd->used_pdes = __cpu_to_be16(n);
2655 pde = &pd->entries[0];
2656 dd->mdupdate = pd;
2657 } else
2658 ddf->phys->used_pdes = __cpu_to_be16(
2659 1 + __be16_to_cpu(ddf->phys->used_pdes));
2660
2661 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2662 pde->refnum = dd->disk.refnum;
2663 pde->type = __cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2664 pde->state = __cpu_to_be16(DDF_Online);
2665 dd->size = size;
2666 /*
2667 * If there is already a device in dlist, try to reserve the same
2668 * amount of workspace. Otherwise, use 32MB.
2669 * We checked disk size above already.
2670 */
2671 #define __calc_lba(new, old, lba, mb) do { \
2672 unsigned long long dif; \
2673 if ((old) != NULL) \
2674 dif = (old)->size - __be64_to_cpu((old)->lba); \
2675 else \
2676 dif = (new)->size; \
2677 if ((new)->size > dif) \
2678 (new)->lba = __cpu_to_be64((new)->size - dif); \
2679 else \
2680 (new)->lba = __cpu_to_be64((new)->size - (mb*1024*2)); \
2681 } while (0)
2682 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2683 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2684 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2685 pde->config_size = dd->workspace_lba;
2686
2687 sprintf(pde->path, "%17.17s","Information: nil") ;
2688 memset(pde->pad, 0xff, 6);
2689
2690 if (st->update_tail) {
2691 dd->next = ddf->add_list;
2692 ddf->add_list = dd;
2693 } else {
2694 dd->next = ddf->dlist;
2695 ddf->dlist = dd;
2696 ddf_set_updates_pending(ddf);
2697 }
2698
2699 return 0;
2700 }
2701
2702 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2703 {
2704 struct ddf_super *ddf = st->sb;
2705 struct dl *dl;
2706
2707 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2708 * disappeared from the container.
2709 * We need to arrange that it disappears from the metadata and
2710 * internal data structures too.
2711 * Most of the work is done by ddf_process_update which edits
2712 * the metadata and closes the file handle and attaches the memory
2713 * where free_updates will free it.
2714 */
2715 for (dl = ddf->dlist; dl ; dl = dl->next)
2716 if (dl->major == dk->major &&
2717 dl->minor == dk->minor)
2718 break;
2719 if (!dl)
2720 return -1;
2721
2722 if (st->update_tail) {
2723 int len = (sizeof(struct phys_disk) +
2724 sizeof(struct phys_disk_entry));
2725 struct phys_disk *pd;
2726
2727 pd = xmalloc(len);
2728 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2729 pd->used_pdes = __cpu_to_be16(dl->pdnum);
2730 pd->entries[0].state = __cpu_to_be16(DDF_Missing);
2731 append_metadata_update(st, pd, len);
2732 }
2733 return 0;
2734 }
2735
2736 /*
2737 * This is the write_init_super method for a ddf container. It is
2738 * called when creating a container or adding another device to a
2739 * container.
2740 */
2741 #define NULL_CONF_SZ 4096
2742
2743 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
2744 char *null_aligned)
2745 {
2746 unsigned long long sector;
2747 struct ddf_header *header;
2748 int fd, i, n_config, conf_size;
2749 int ret = 0;
2750
2751 fd = d->fd;
2752
2753 switch (type) {
2754 case DDF_HEADER_PRIMARY:
2755 header = &ddf->primary;
2756 sector = __be64_to_cpu(header->primary_lba);
2757 break;
2758 case DDF_HEADER_SECONDARY:
2759 header = &ddf->secondary;
2760 sector = __be64_to_cpu(header->secondary_lba);
2761 break;
2762 default:
2763 return 0;
2764 }
2765
2766 header->type = type;
2767 header->openflag = 1;
2768 header->crc = calc_crc(header, 512);
2769
2770 lseek64(fd, sector<<9, 0);
2771 if (write(fd, header, 512) < 0)
2772 goto out;
2773
2774 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2775 if (write(fd, &ddf->controller, 512) < 0)
2776 goto out;
2777
2778 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2779 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2780 goto out;
2781 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2782 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2783 goto out;
2784
2785 /* Now write lots of config records. */
2786 n_config = ddf->max_part;
2787 conf_size = ddf->conf_rec_len * 512;
2788 for (i = 0 ; i <= n_config ; i++) {
2789 struct vcl *c;
2790 struct vd_config *vdc = NULL;
2791 if (i == n_config) {
2792 c = (struct vcl *)d->spare;
2793 if (c)
2794 vdc = &c->conf;
2795 } else {
2796 unsigned int dummy;
2797 c = d->vlist[i];
2798 if (c)
2799 get_pd_index_from_refnum(
2800 c, d->disk.refnum,
2801 ddf->mppe,
2802 (const struct vd_config **)&vdc,
2803 &dummy);
2804 }
2805 if (c) {
2806 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2807 i, d->disk.refnum, guid_str(vdc->guid),
2808 vdc->sec_elmnt_seq);
2809 vdc->seqnum = header->seq;
2810 vdc->crc = calc_crc(vdc, conf_size);
2811 if (write(fd, vdc, conf_size) < 0)
2812 break;
2813 } else {
2814 unsigned int togo = conf_size;
2815 while (togo > NULL_CONF_SZ) {
2816 if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
2817 break;
2818 togo -= NULL_CONF_SZ;
2819 }
2820 if (write(fd, null_aligned, togo) < 0)
2821 break;
2822 }
2823 }
2824 if (i <= n_config)
2825 goto out;
2826
2827 d->disk.crc = calc_crc(&d->disk, 512);
2828 if (write(fd, &d->disk, 512) < 0)
2829 goto out;
2830
2831 ret = 1;
2832 out:
2833 header->openflag = 0;
2834 header->crc = calc_crc(header, 512);
2835
2836 lseek64(fd, sector<<9, 0);
2837 if (write(fd, header, 512) < 0)
2838 ret = 0;
2839
2840 return ret;
2841 }
2842
2843 static int __write_init_super_ddf(struct supertype *st)
2844 {
2845 struct ddf_super *ddf = st->sb;
2846 struct dl *d;
2847 int attempts = 0;
2848 int successes = 0;
2849 unsigned long long size;
2850 char *null_aligned;
2851 __u32 seq;
2852
2853 pr_state(ddf, __func__);
2854 if (posix_memalign((void**)&null_aligned, 4096, NULL_CONF_SZ) != 0) {
2855 return -ENOMEM;
2856 }
2857 memset(null_aligned, 0xff, NULL_CONF_SZ);
2858
2859 seq = ddf->active->seq + 1;
2860
2861 /* try to write updated metadata,
2862 * if we catch a failure move on to the next disk
2863 */
2864 for (d = ddf->dlist; d; d=d->next) {
2865 int fd = d->fd;
2866
2867 if (fd < 0)
2868 continue;
2869
2870 attempts++;
2871 /* We need to fill in the primary, (secondary) and workspace
2872 * lba's in the headers, set their checksums,
2873 * Also checksum phys, virt....
2874 *
2875 * Then write everything out, finally the anchor is written.
2876 */
2877 get_dev_size(fd, NULL, &size);
2878 size /= 512;
2879 if (d->workspace_lba != 0)
2880 ddf->anchor.workspace_lba = d->workspace_lba;
2881 else
2882 ddf->anchor.workspace_lba =
2883 __cpu_to_be64(size - 32*1024*2);
2884 if (d->primary_lba != 0)
2885 ddf->anchor.primary_lba = d->primary_lba;
2886 else
2887 ddf->anchor.primary_lba =
2888 __cpu_to_be64(size - 16*1024*2);
2889 if (d->secondary_lba != 0)
2890 ddf->anchor.secondary_lba = d->secondary_lba;
2891 else
2892 ddf->anchor.secondary_lba =
2893 __cpu_to_be64(size - 32*1024*2);
2894 ddf->anchor.seq = seq;
2895 memcpy(&ddf->primary, &ddf->anchor, 512);
2896 memcpy(&ddf->secondary, &ddf->anchor, 512);
2897
2898 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2899 ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
2900 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2901
2902 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY,
2903 null_aligned))
2904 continue;
2905
2906 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY,
2907 null_aligned))
2908 continue;
2909
2910 lseek64(fd, (size-1)*512, SEEK_SET);
2911 if (write(fd, &ddf->anchor, 512) < 0)
2912 continue;
2913 successes++;
2914 }
2915 free(null_aligned);
2916
2917 return attempts != successes;
2918 }
2919
2920 static int write_init_super_ddf(struct supertype *st)
2921 {
2922 struct ddf_super *ddf = st->sb;
2923 struct vcl *currentconf = ddf->currentconf;
2924
2925 /* we are done with currentconf reset it to point st at the container */
2926 ddf->currentconf = NULL;
2927
2928 if (st->update_tail) {
2929 /* queue the virtual_disk and vd_config as metadata updates */
2930 struct virtual_disk *vd;
2931 struct vd_config *vc;
2932 int len;
2933
2934 if (!currentconf) {
2935 int len = (sizeof(struct phys_disk) +
2936 sizeof(struct phys_disk_entry));
2937
2938 /* adding a disk to the container. */
2939 if (!ddf->add_list)
2940 return 0;
2941
2942 append_metadata_update(st, ddf->add_list->mdupdate, len);
2943 ddf->add_list->mdupdate = NULL;
2944 return 0;
2945 }
2946
2947 /* Newly created VD */
2948
2949 /* First the virtual disk. We have a slightly fake header */
2950 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
2951 vd = xmalloc(len);
2952 *vd = *ddf->virt;
2953 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
2954 vd->populated_vdes = __cpu_to_be16(currentconf->vcnum);
2955 append_metadata_update(st, vd, len);
2956
2957 /* Then the vd_config */
2958 len = ddf->conf_rec_len * 512;
2959 vc = xmalloc(len);
2960 memcpy(vc, &currentconf->conf, len);
2961 append_metadata_update(st, vc, len);
2962
2963 /* FIXME I need to close the fds! */
2964 return 0;
2965 } else {
2966 struct dl *d;
2967 for (d = ddf->dlist; d; d=d->next)
2968 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
2969 return __write_init_super_ddf(st);
2970 }
2971 }
2972
2973 #endif
2974
2975 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
2976 unsigned long long data_offset)
2977 {
2978 /* We must reserve the last 32Meg */
2979 if (devsize <= 32*1024*2)
2980 return 0;
2981 return devsize - 32*1024*2;
2982 }
2983
2984 #ifndef MDASSEMBLE
2985
2986 static int reserve_space(struct supertype *st, int raiddisks,
2987 unsigned long long size, int chunk,
2988 unsigned long long *freesize)
2989 {
2990 /* Find 'raiddisks' spare extents at least 'size' big (but
2991 * only caring about multiples of 'chunk') and remember
2992 * them.
2993 * If the cannot be found, fail.
2994 */
2995 struct dl *dl;
2996 struct ddf_super *ddf = st->sb;
2997 int cnt = 0;
2998
2999 for (dl = ddf->dlist; dl ; dl=dl->next) {
3000 dl->raiddisk = -1;
3001 dl->esize = 0;
3002 }
3003 /* Now find largest extent on each device */
3004 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3005 struct extent *e = get_extents(ddf, dl);
3006 unsigned long long pos = 0;
3007 int i = 0;
3008 int found = 0;
3009 unsigned long long minsize = size;
3010
3011 if (size == 0)
3012 minsize = chunk;
3013
3014 if (!e)
3015 continue;
3016 do {
3017 unsigned long long esize;
3018 esize = e[i].start - pos;
3019 if (esize >= minsize) {
3020 found = 1;
3021 minsize = esize;
3022 }
3023 pos = e[i].start + e[i].size;
3024 i++;
3025 } while (e[i-1].size);
3026 if (found) {
3027 cnt++;
3028 dl->esize = minsize;
3029 }
3030 free(e);
3031 }
3032 if (cnt < raiddisks) {
3033 pr_err("not enough devices with space to create array.\n");
3034 return 0; /* No enough free spaces large enough */
3035 }
3036 if (size == 0) {
3037 /* choose the largest size of which there are at least 'raiddisk' */
3038 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3039 struct dl *dl2;
3040 if (dl->esize <= size)
3041 continue;
3042 /* This is bigger than 'size', see if there are enough */
3043 cnt = 0;
3044 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3045 if (dl2->esize >= dl->esize)
3046 cnt++;
3047 if (cnt >= raiddisks)
3048 size = dl->esize;
3049 }
3050 if (chunk) {
3051 size = size / chunk;
3052 size *= chunk;
3053 }
3054 *freesize = size;
3055 if (size < 32) {
3056 pr_err("not enough spare devices to create array.\n");
3057 return 0;
3058 }
3059 }
3060 /* We have a 'size' of which there are enough spaces.
3061 * We simply do a first-fit */
3062 cnt = 0;
3063 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3064 if (dl->esize < size)
3065 continue;
3066
3067 dl->raiddisk = cnt;
3068 cnt++;
3069 }
3070 return 1;
3071 }
3072
3073 static int
3074 validate_geometry_ddf_container(struct supertype *st,
3075 int level, int layout, int raiddisks,
3076 int chunk, unsigned long long size,
3077 unsigned long long data_offset,
3078 char *dev, unsigned long long *freesize,
3079 int verbose);
3080
3081 static int validate_geometry_ddf_bvd(struct supertype *st,
3082 int level, int layout, int raiddisks,
3083 int *chunk, unsigned long long size,
3084 unsigned long long data_offset,
3085 char *dev, unsigned long long *freesize,
3086 int verbose);
3087
3088 static int validate_geometry_ddf(struct supertype *st,
3089 int level, int layout, int raiddisks,
3090 int *chunk, unsigned long long size,
3091 unsigned long long data_offset,
3092 char *dev, unsigned long long *freesize,
3093 int verbose)
3094 {
3095 int fd;
3096 struct mdinfo *sra;
3097 int cfd;
3098
3099 /* ddf potentially supports lots of things, but it depends on
3100 * what devices are offered (and maybe kernel version?)
3101 * If given unused devices, we will make a container.
3102 * If given devices in a container, we will make a BVD.
3103 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3104 */
3105
3106 if (chunk && *chunk == UnSet)
3107 *chunk = DEFAULT_CHUNK;
3108
3109 if (level == -1000000) level = LEVEL_CONTAINER;
3110 if (level == LEVEL_CONTAINER) {
3111 /* Must be a fresh device to add to a container */
3112 return validate_geometry_ddf_container(st, level, layout,
3113 raiddisks, chunk?*chunk:0,
3114 size, data_offset, dev,
3115 freesize,
3116 verbose);
3117 }
3118
3119 if (!dev) {
3120 mdu_array_info_t array = {
3121 .level = level, .layout = layout,
3122 .raid_disks = raiddisks
3123 };
3124 struct vd_config conf;
3125 if (layout_md2ddf(&array, &conf) == -1) {
3126 if (verbose)
3127 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3128 level, layout, raiddisks);
3129 return 0;
3130 }
3131 /* Should check layout? etc */
3132
3133 if (st->sb && freesize) {
3134 /* --create was given a container to create in.
3135 * So we need to check that there are enough
3136 * free spaces and return the amount of space.
3137 * We may as well remember which drives were
3138 * chosen so that add_to_super/getinfo_super
3139 * can return them.
3140 */
3141 return reserve_space(st, raiddisks, size, chunk?*chunk:0, freesize);
3142 }
3143 return 1;
3144 }
3145
3146 if (st->sb) {
3147 /* A container has already been opened, so we are
3148 * creating in there. Maybe a BVD, maybe an SVD.
3149 * Should make a distinction one day.
3150 */
3151 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3152 chunk, size, data_offset, dev,
3153 freesize,
3154 verbose);
3155 }
3156 /* This is the first device for the array.
3157 * If it is a container, we read it in and do automagic allocations,
3158 * no other devices should be given.
3159 * Otherwise it must be a member device of a container, and we
3160 * do manual allocation.
3161 * Later we should check for a BVD and make an SVD.
3162 */
3163 fd = open(dev, O_RDONLY|O_EXCL, 0);
3164 if (fd >= 0) {
3165 sra = sysfs_read(fd, NULL, GET_VERSION);
3166 close(fd);
3167 if (sra && sra->array.major_version == -1 &&
3168 strcmp(sra->text_version, "ddf") == 0) {
3169
3170 /* load super */
3171 /* find space for 'n' devices. */
3172 /* remember the devices */
3173 /* Somehow return the fact that we have enough */
3174 }
3175
3176 if (verbose)
3177 pr_err("ddf: Cannot create this array "
3178 "on device %s - a container is required.\n",
3179 dev);
3180 return 0;
3181 }
3182 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3183 if (verbose)
3184 pr_err("ddf: Cannot open %s: %s\n",
3185 dev, strerror(errno));
3186 return 0;
3187 }
3188 /* Well, it is in use by someone, maybe a 'ddf' container. */
3189 cfd = open_container(fd);
3190 if (cfd < 0) {
3191 close(fd);
3192 if (verbose)
3193 pr_err("ddf: Cannot use %s: %s\n",
3194 dev, strerror(EBUSY));
3195 return 0;
3196 }
3197 sra = sysfs_read(cfd, NULL, GET_VERSION);
3198 close(fd);
3199 if (sra && sra->array.major_version == -1 &&
3200 strcmp(sra->text_version, "ddf") == 0) {
3201 /* This is a member of a ddf container. Load the container
3202 * and try to create a bvd
3203 */
3204 struct ddf_super *ddf;
3205 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3206 st->sb = ddf;
3207 strcpy(st->container_devnm, fd2devnm(cfd));
3208 close(cfd);
3209 return validate_geometry_ddf_bvd(st, level, layout,
3210 raiddisks, chunk, size,
3211 data_offset,
3212 dev, freesize,
3213 verbose);
3214 }
3215 close(cfd);
3216 } else /* device may belong to a different container */
3217 return 0;
3218
3219 return 1;
3220 }
3221
3222 static int
3223 validate_geometry_ddf_container(struct supertype *st,
3224 int level, int layout, int raiddisks,
3225 int chunk, unsigned long long size,
3226 unsigned long long data_offset,
3227 char *dev, unsigned long long *freesize,
3228 int verbose)
3229 {
3230 int fd;
3231 unsigned long long ldsize;
3232
3233 if (level != LEVEL_CONTAINER)
3234 return 0;
3235 if (!dev)
3236 return 1;
3237
3238 fd = open(dev, O_RDONLY|O_EXCL, 0);
3239 if (fd < 0) {
3240 if (verbose)
3241 pr_err("ddf: Cannot open %s: %s\n",
3242 dev, strerror(errno));
3243 return 0;
3244 }
3245 if (!get_dev_size(fd, dev, &ldsize)) {
3246 close(fd);
3247 return 0;
3248 }
3249 close(fd);
3250
3251 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3252 if (*freesize == 0)
3253 return 0;
3254
3255 return 1;
3256 }
3257
3258 static int validate_geometry_ddf_bvd(struct supertype *st,
3259 int level, int layout, int raiddisks,
3260 int *chunk, unsigned long long size,
3261 unsigned long long data_offset,
3262 char *dev, unsigned long long *freesize,
3263 int verbose)
3264 {
3265 struct stat stb;
3266 struct ddf_super *ddf = st->sb;
3267 struct dl *dl;
3268 unsigned long long pos = 0;
3269 unsigned long long maxsize;
3270 struct extent *e;
3271 int i;
3272 /* ddf/bvd supports lots of things, but not containers */
3273 if (level == LEVEL_CONTAINER) {
3274 if (verbose)
3275 pr_err("DDF cannot create a container within an container\n");
3276 return 0;
3277 }
3278 /* We must have the container info already read in. */
3279 if (!ddf)
3280 return 0;
3281
3282 if (!dev) {
3283 /* General test: make sure there is space for
3284 * 'raiddisks' device extents of size 'size'.
3285 */
3286 unsigned long long minsize = size;
3287 int dcnt = 0;
3288 if (minsize == 0)
3289 minsize = 8;
3290 for (dl = ddf->dlist; dl ; dl = dl->next)
3291 {
3292 int found = 0;
3293 pos = 0;
3294
3295 i = 0;
3296 e = get_extents(ddf, dl);
3297 if (!e) continue;
3298 do {
3299 unsigned long long esize;
3300 esize = e[i].start - pos;
3301 if (esize >= minsize)
3302 found = 1;
3303 pos = e[i].start + e[i].size;
3304 i++;
3305 } while (e[i-1].size);
3306 if (found)
3307 dcnt++;
3308 free(e);
3309 }
3310 if (dcnt < raiddisks) {
3311 if (verbose)
3312 pr_err("ddf: Not enough devices with "
3313 "space for this array (%d < %d)\n",
3314 dcnt, raiddisks);
3315 return 0;
3316 }
3317 return 1;
3318 }
3319 /* This device must be a member of the set */
3320 if (stat(dev, &stb) < 0)
3321 return 0;
3322 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3323 return 0;
3324 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3325 if (dl->major == (int)major(stb.st_rdev) &&
3326 dl->minor == (int)minor(stb.st_rdev))
3327 break;
3328 }
3329 if (!dl) {
3330 if (verbose)
3331 pr_err("ddf: %s is not in the "
3332 "same DDF set\n",
3333 dev);
3334 return 0;
3335 }
3336 e = get_extents(ddf, dl);
3337 maxsize = 0;
3338 i = 0;
3339 if (e) do {
3340 unsigned long long esize;
3341 esize = e[i].start - pos;
3342 if (esize >= maxsize)
3343 maxsize = esize;
3344 pos = e[i].start + e[i].size;
3345 i++;
3346 } while (e[i-1].size);
3347 *freesize = maxsize;
3348 // FIXME here I am
3349
3350 return 1;
3351 }
3352
3353 static int load_super_ddf_all(struct supertype *st, int fd,
3354 void **sbp, char *devname)
3355 {
3356 struct mdinfo *sra;
3357 struct ddf_super *super;
3358 struct mdinfo *sd, *best = NULL;
3359 int bestseq = 0;
3360 int seq;
3361 char nm[20];
3362 int dfd;
3363
3364 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3365 if (!sra)
3366 return 1;
3367 if (sra->array.major_version != -1 ||
3368 sra->array.minor_version != -2 ||
3369 strcmp(sra->text_version, "ddf") != 0)
3370 return 1;
3371
3372 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3373 return 1;
3374 memset(super, 0, sizeof(*super));
3375
3376 /* first, try each device, and choose the best ddf */
3377 for (sd = sra->devs ; sd ; sd = sd->next) {
3378 int rv;
3379 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3380 dfd = dev_open(nm, O_RDONLY);
3381 if (dfd < 0)
3382 return 2;
3383 rv = load_ddf_headers(dfd, super, NULL);
3384 close(dfd);
3385 if (rv == 0) {
3386 seq = __be32_to_cpu(super->active->seq);
3387 if (super->active->openflag)
3388 seq--;
3389 if (!best || seq > bestseq) {
3390 bestseq = seq;
3391 best = sd;
3392 }
3393 }
3394 }
3395 if (!best)
3396 return 1;
3397 /* OK, load this ddf */
3398 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3399 dfd = dev_open(nm, O_RDONLY);
3400 if (dfd < 0)
3401 return 1;
3402 load_ddf_headers(dfd, super, NULL);
3403 load_ddf_global(dfd, super, NULL);
3404 close(dfd);
3405 /* Now we need the device-local bits */
3406 for (sd = sra->devs ; sd ; sd = sd->next) {
3407 int rv;
3408
3409 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3410 dfd = dev_open(nm, O_RDWR);
3411 if (dfd < 0)
3412 return 2;
3413 rv = load_ddf_headers(dfd, super, NULL);
3414 if (rv == 0)
3415 rv = load_ddf_local(dfd, super, NULL, 1);
3416 if (rv)
3417 return 1;
3418 }
3419
3420 *sbp = super;
3421 if (st->ss == NULL) {
3422 st->ss = &super_ddf;
3423 st->minor_version = 0;
3424 st->max_devs = 512;
3425 }
3426 strcpy(st->container_devnm, fd2devnm(fd));
3427 return 0;
3428 }
3429
3430 static int load_container_ddf(struct supertype *st, int fd,
3431 char *devname)
3432 {
3433 return load_super_ddf_all(st, fd, &st->sb, devname);
3434 }
3435
3436 #endif /* MDASSEMBLE */
3437
3438 static int check_secondary(const struct vcl *vc)
3439 {
3440 const struct vd_config *conf = &vc->conf;
3441 int i;
3442
3443 /* The only DDF secondary RAID level md can support is
3444 * RAID 10, if the stripe sizes and Basic volume sizes
3445 * are all equal.
3446 * Other configurations could in theory be supported by exposing
3447 * the BVDs to user space and using device mapper for the secondary
3448 * mapping. So far we don't support that.
3449 */
3450
3451 __u64 sec_elements[4] = {0, 0, 0, 0};
3452 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3453 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3454
3455 if (vc->other_bvds == NULL) {
3456 pr_err("No BVDs for secondary RAID found\n");
3457 return -1;
3458 }
3459 if (conf->prl != DDF_RAID1) {
3460 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3461 return -1;
3462 }
3463 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3464 pr_err("Secondary RAID level %d is unsupported\n",
3465 conf->srl);
3466 return -1;
3467 }
3468 __set_sec_seen(conf->sec_elmnt_seq);
3469 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3470 const struct vd_config *bvd = vc->other_bvds[i];
3471 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3472 continue;
3473 if (bvd->srl != conf->srl) {
3474 pr_err("Inconsistent secondary RAID level across BVDs\n");
3475 return -1;
3476 }
3477 if (bvd->prl != conf->prl) {
3478 pr_err("Different RAID levels for BVDs are unsupported\n");
3479 return -1;
3480 }
3481 if (bvd->prim_elmnt_count != conf->prim_elmnt_count) {
3482 pr_err("All BVDs must have the same number of primary elements\n");
3483 return -1;
3484 }
3485 if (bvd->chunk_shift != conf->chunk_shift) {
3486 pr_err("Different strip sizes for BVDs are unsupported\n");
3487 return -1;
3488 }
3489 if (bvd->array_blocks != conf->array_blocks) {
3490 pr_err("Different BVD sizes are unsupported\n");
3491 return -1;
3492 }
3493 __set_sec_seen(bvd->sec_elmnt_seq);
3494 }
3495 for (i = 0; i < conf->sec_elmnt_count; i++) {
3496 if (!__was_sec_seen(i)) {
3497 pr_err("BVD %d is missing\n", i);
3498 return -1;
3499 }
3500 }
3501 return 0;
3502 }
3503
3504 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3505 __u32 refnum, unsigned int nmax,
3506 const struct vd_config **bvd,
3507 unsigned int *idx)
3508 {
3509 unsigned int i, j, n, sec, cnt;
3510
3511 cnt = __be16_to_cpu(vc->conf.prim_elmnt_count);
3512 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3513
3514 for (i = 0, j = 0 ; i < nmax ; i++) {
3515 /* j counts valid entries for this BVD */
3516 if (vc->conf.phys_refnum[i] != 0xffffffff)
3517 j++;
3518 if (vc->conf.phys_refnum[i] == refnum) {
3519 *bvd = &vc->conf;
3520 *idx = i;
3521 return sec * cnt + j - 1;
3522 }
3523 }
3524 if (vc->other_bvds == NULL)
3525 goto bad;
3526
3527 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3528 struct vd_config *vd = vc->other_bvds[n-1];
3529 sec = vd->sec_elmnt_seq;
3530 if (sec == DDF_UNUSED_BVD)
3531 continue;
3532 for (i = 0, j = 0 ; i < nmax ; i++) {
3533 if (vd->phys_refnum[i] != 0xffffffff)
3534 j++;
3535 if (vd->phys_refnum[i] == refnum) {
3536 *bvd = vd;
3537 *idx = i;
3538 return sec * cnt + j - 1;
3539 }
3540 }
3541 }
3542 bad:
3543 *bvd = NULL;
3544 return DDF_NOTFOUND;
3545 }
3546
3547 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3548 {
3549 /* Given a container loaded by load_super_ddf_all,
3550 * extract information about all the arrays into
3551 * an mdinfo tree.
3552 *
3553 * For each vcl in conflist: create an mdinfo, fill it in,
3554 * then look for matching devices (phys_refnum) in dlist
3555 * and create appropriate device mdinfo.
3556 */
3557 struct ddf_super *ddf = st->sb;
3558 struct mdinfo *rest = NULL;
3559 struct vcl *vc;
3560
3561 for (vc = ddf->conflist ; vc ; vc=vc->next)
3562 {
3563 unsigned int i;
3564 unsigned int j;
3565 struct mdinfo *this;
3566 char *ep;
3567 __u32 *cptr;
3568 unsigned int pd;
3569
3570 if (subarray &&
3571 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3572 *ep != '\0'))
3573 continue;
3574
3575 if (vc->conf.sec_elmnt_count > 1) {
3576 if (check_secondary(vc) != 0)
3577 continue;
3578 }
3579
3580 this = xcalloc(1, sizeof(*this));
3581 this->next = rest;
3582 rest = this;
3583
3584 if (layout_ddf2md(&vc->conf, &this->array))
3585 continue;
3586 this->array.md_minor = -1;
3587 this->array.major_version = -1;
3588 this->array.minor_version = -2;
3589 cptr = (__u32 *)(vc->conf.guid + 16);
3590 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3591 this->array.utime = DECADE +
3592 __be32_to_cpu(vc->conf.timestamp);
3593 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3594
3595 i = vc->vcnum;
3596 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3597 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3598 DDF_init_full) {
3599 this->array.state = 0;
3600 this->resync_start = 0;
3601 } else {
3602 this->array.state = 1;
3603 this->resync_start = MaxSector;
3604 }
3605 memcpy(this->name, ddf->virt->entries[i].name, 16);
3606 this->name[16]=0;
3607 for(j=0; j<16; j++)
3608 if (this->name[j] == ' ')
3609 this->name[j] = 0;
3610
3611 memset(this->uuid, 0, sizeof(this->uuid));
3612 this->component_size = __be64_to_cpu(vc->conf.blocks);
3613 this->array.size = this->component_size / 2;
3614 this->container_member = i;
3615
3616 ddf->currentconf = vc;
3617 uuid_from_super_ddf(st, this->uuid);
3618 ddf->currentconf = NULL;
3619
3620 sprintf(this->text_version, "/%s/%d",
3621 st->container_devnm, this->container_member);
3622
3623 for (pd = 0; pd < __be16_to_cpu(ddf->phys->used_pdes); pd++) {
3624 struct mdinfo *dev;
3625 struct dl *d;
3626 const struct vd_config *bvd;
3627 unsigned int iphys;
3628 int stt;
3629
3630 if (ddf->phys->entries[pd].refnum == 0xFFFFFFFF)
3631 continue;
3632
3633 stt = __be16_to_cpu(ddf->phys->entries[pd].state);
3634 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3635 != DDF_Online)
3636 continue;
3637
3638 i = get_pd_index_from_refnum(
3639 vc, ddf->phys->entries[pd].refnum,
3640 ddf->mppe, &bvd, &iphys);
3641 if (i == DDF_NOTFOUND)
3642 continue;
3643
3644 this->array.working_disks++;
3645
3646 for (d = ddf->dlist; d ; d=d->next)
3647 if (d->disk.refnum ==
3648 ddf->phys->entries[pd].refnum)
3649 break;
3650 if (d == NULL)
3651 /* Haven't found that one yet, maybe there are others */
3652 continue;
3653
3654 dev = xcalloc(1, sizeof(*dev));
3655 dev->next = this->devs;
3656 this->devs = dev;
3657
3658 dev->disk.number = __be32_to_cpu(d->disk.refnum);
3659 dev->disk.major = d->major;
3660 dev->disk.minor = d->minor;
3661 dev->disk.raid_disk = i;
3662 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3663 dev->recovery_start = MaxSector;
3664
3665 dev->events = __be32_to_cpu(ddf->primary.seq);
3666 dev->data_offset =
3667 __be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3668 dev->component_size = __be64_to_cpu(bvd->blocks);
3669 if (d->devname)
3670 strcpy(dev->name, d->devname);
3671 }
3672 }
3673 return rest;
3674 }
3675
3676 static int store_super_ddf(struct supertype *st, int fd)
3677 {
3678 struct ddf_super *ddf = st->sb;
3679 unsigned long long dsize;
3680 void *buf;
3681 int rc;
3682
3683 if (!ddf)
3684 return 1;
3685
3686 if (!get_dev_size(fd, NULL, &dsize))
3687 return 1;
3688
3689 if (ddf->dlist || ddf->conflist) {
3690 struct stat sta;
3691 struct dl *dl;
3692 int ofd, ret;
3693
3694 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3695 pr_err("%s: file descriptor for invalid device\n",
3696 __func__);
3697 return 1;
3698 }
3699 for (dl = ddf->dlist; dl; dl = dl->next)
3700 if (dl->major == (int)major(sta.st_rdev) &&
3701 dl->minor == (int)minor(sta.st_rdev))
3702 break;
3703 if (!dl) {
3704 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3705 (int)major(sta.st_rdev),
3706 (int)minor(sta.st_rdev));
3707 return 1;
3708 }
3709 /*
3710 For DDF, writing to just one disk makes no sense.
3711 We would run the risk of writing inconsistent meta data
3712 to the devices. So just call __write_init_super_ddf and
3713 write to all devices, including this one.
3714 Use the fd passed to this function, just in case dl->fd
3715 is invalid.
3716 */
3717 ofd = dl->fd;
3718 dl->fd = fd;
3719 ret = __write_init_super_ddf(st);
3720 dl->fd = ofd;
3721 return ret;
3722 }
3723
3724 if (posix_memalign(&buf, 512, 512) != 0)
3725 return 1;
3726 memset(buf, 0, 512);
3727
3728 lseek64(fd, dsize-512, 0);
3729 rc = write(fd, buf, 512);
3730 free(buf);
3731 if (rc < 0)
3732 return 1;
3733 return 0;
3734 }
3735
3736 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3737 {
3738 /*
3739 * return:
3740 * 0 same, or first was empty, and second was copied
3741 * 1 second had wrong number
3742 * 2 wrong uuid
3743 * 3 wrong other info
3744 */
3745 struct ddf_super *first = st->sb;
3746 struct ddf_super *second = tst->sb;
3747 struct dl *dl1, *dl2;
3748 struct vcl *vl1, *vl2;
3749 unsigned int max_vds, max_pds, pd, vd;
3750
3751 if (!first) {
3752 st->sb = tst->sb;
3753 tst->sb = NULL;
3754 return 0;
3755 }
3756
3757 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3758 return 2;
3759
3760 if (first->anchor.seq != second->anchor.seq) {
3761 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3762 __be32_to_cpu(first->anchor.seq),
3763 __be32_to_cpu(second->anchor.seq));
3764 return 3;
3765 }
3766 if (first->max_part != second->max_part ||
3767 first->phys->used_pdes != second->phys->used_pdes ||
3768 first->virt->populated_vdes != second->virt->populated_vdes) {
3769 dprintf("%s: PD/VD number mismatch\n", __func__);
3770 return 3;
3771 }
3772
3773 max_pds = __be16_to_cpu(first->phys->used_pdes);
3774 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3775 for (pd = 0; pd < max_pds; pd++)
3776 if (first->phys->entries[pd].refnum == dl2->disk.refnum)
3777 break;
3778 if (pd == max_pds) {
3779 dprintf("%s: no match for disk %08x\n", __func__,
3780 __be32_to_cpu(dl2->disk.refnum));
3781 return 3;
3782 }
3783 }
3784
3785 max_vds = __be16_to_cpu(first->active->max_vd_entries);
3786 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3787 if (vl2->conf.magic != DDF_VD_CONF_MAGIC)
3788 continue;
3789 for (vd = 0; vd < max_vds; vd++)
3790 if (!memcmp(first->virt->entries[vd].guid,
3791 vl2->conf.guid, DDF_GUID_LEN))
3792 break;
3793 if (vd == max_vds) {
3794 dprintf("%s: no match for VD config\n", __func__);
3795 return 3;
3796 }
3797 }
3798 /* FIXME should I look at anything else? */
3799
3800 /*
3801 At this point we are fairly sure that the meta data matches.
3802 But the new disk may contain additional local data.
3803 Add it to the super block.
3804 */
3805 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3806 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3807 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3808 DDF_GUID_LEN))
3809 break;
3810 if (vl1) {
3811 if (vl1->other_bvds != NULL &&
3812 vl1->conf.sec_elmnt_seq !=
3813 vl2->conf.sec_elmnt_seq) {
3814 dprintf("%s: adding BVD %u\n", __func__,
3815 vl2->conf.sec_elmnt_seq);
3816 add_other_bvd(vl1, &vl2->conf,
3817 first->conf_rec_len*512);
3818 }
3819 continue;
3820 }
3821
3822 if (posix_memalign((void **)&vl1, 512,
3823 (first->conf_rec_len*512 +
3824 offsetof(struct vcl, conf))) != 0) {
3825 pr_err("%s could not allocate vcl buf\n",
3826 __func__);
3827 return 3;
3828 }
3829
3830 vl1->next = first->conflist;
3831 vl1->block_sizes = NULL;
3832 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3833 if (alloc_other_bvds(first, vl1) != 0) {
3834 pr_err("%s could not allocate other bvds\n",
3835 __func__);
3836 free(vl1);
3837 return 3;
3838 }
3839 for (vd = 0; vd < max_vds; vd++)
3840 if (!memcmp(first->virt->entries[vd].guid,
3841 vl1->conf.guid, DDF_GUID_LEN))
3842 break;
3843 vl1->vcnum = vd;
3844 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3845 first->conflist = vl1;
3846 }
3847
3848 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3849 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3850 if (dl1->disk.refnum == dl2->disk.refnum)
3851 break;
3852 if (dl1)
3853 continue;
3854
3855 if (posix_memalign((void **)&dl1, 512,
3856 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3857 != 0) {
3858 pr_err("%s could not allocate disk info buffer\n",
3859 __func__);
3860 return 3;
3861 }
3862 memcpy(dl1, dl2, sizeof(*dl1));
3863 dl1->mdupdate = NULL;
3864 dl1->next = first->dlist;
3865 dl1->fd = -1;
3866 for (pd = 0; pd < max_pds; pd++)
3867 if (first->phys->entries[pd].refnum == dl1->disk.refnum)
3868 break;
3869 dl1->pdnum = pd;
3870 if (dl2->spare) {
3871 if (posix_memalign((void **)&dl1->spare, 512,
3872 first->conf_rec_len*512) != 0) {
3873 pr_err("%s could not allocate spare info buf\n",
3874 __func__);
3875 return 3;
3876 }
3877 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3878 }
3879 for (vd = 0 ; vd < first->max_part ; vd++) {
3880 if (!dl2->vlist[vd]) {
3881 dl1->vlist[vd] = NULL;
3882 continue;
3883 }
3884 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3885 if (!memcmp(vl1->conf.guid,
3886 dl2->vlist[vd]->conf.guid,
3887 DDF_GUID_LEN))
3888 break;
3889 dl1->vlist[vd] = vl1;
3890 }
3891 }
3892 first->dlist = dl1;
3893 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3894 dl1->disk.refnum);
3895 }
3896
3897 return 0;
3898 }
3899
3900 #ifndef MDASSEMBLE
3901 /*
3902 * A new array 'a' has been started which claims to be instance 'inst'
3903 * within container 'c'.
3904 * We need to confirm that the array matches the metadata in 'c' so
3905 * that we don't corrupt any metadata.
3906 */
3907 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3908 {
3909 struct ddf_super *ddf = c->sb;
3910 int n = atoi(inst);
3911 if (all_ff(ddf->virt->entries[n].guid)) {
3912 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
3913 return -ENODEV;
3914 }
3915 dprintf("ddf: open_new %d\n", n);
3916 a->info.container_member = n;
3917 return 0;
3918 }
3919
3920 /*
3921 * The array 'a' is to be marked clean in the metadata.
3922 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
3923 * clean up to the point (in sectors). If that cannot be recorded in the
3924 * metadata, then leave it as dirty.
3925 *
3926 * For DDF, we need to clear the DDF_state_inconsistent bit in the
3927 * !global! virtual_disk.virtual_entry structure.
3928 */
3929 static int ddf_set_array_state(struct active_array *a, int consistent)
3930 {
3931 struct ddf_super *ddf = a->container->sb;
3932 int inst = a->info.container_member;
3933 int old = ddf->virt->entries[inst].state;
3934 if (consistent == 2) {
3935 /* Should check if a recovery should be started FIXME */
3936 consistent = 1;
3937 if (!is_resync_complete(&a->info))
3938 consistent = 0;
3939 }
3940 if (consistent)
3941 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
3942 else
3943 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
3944 if (old != ddf->virt->entries[inst].state)
3945 ddf_set_updates_pending(ddf);
3946
3947 old = ddf->virt->entries[inst].init_state;
3948 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
3949 if (is_resync_complete(&a->info))
3950 ddf->virt->entries[inst].init_state |= DDF_init_full;
3951 else if (a->info.resync_start == 0)
3952 ddf->virt->entries[inst].init_state |= DDF_init_not;
3953 else
3954 ddf->virt->entries[inst].init_state |= DDF_init_quick;
3955 if (old != ddf->virt->entries[inst].init_state)
3956 ddf_set_updates_pending(ddf);
3957
3958 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
3959 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
3960 consistent?"clean":"dirty",
3961 a->info.resync_start);
3962 return consistent;
3963 }
3964
3965 static int get_bvd_state(const struct ddf_super *ddf,
3966 const struct vd_config *vc)
3967 {
3968 unsigned int i, n_bvd, working = 0;
3969 unsigned int n_prim = __be16_to_cpu(vc->prim_elmnt_count);
3970 int pd, st, state;
3971 for (i = 0; i < n_prim; i++) {
3972 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
3973 continue;
3974 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
3975 if (pd < 0)
3976 continue;
3977 st = __be16_to_cpu(ddf->phys->entries[pd].state);
3978 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3979 == DDF_Online)
3980 working++;
3981 }
3982
3983 state = DDF_state_degraded;
3984 if (working == n_prim)
3985 state = DDF_state_optimal;
3986 else
3987 switch (vc->prl) {
3988 case DDF_RAID0:
3989 case DDF_CONCAT:
3990 case DDF_JBOD:
3991 state = DDF_state_failed;
3992 break;
3993 case DDF_RAID1:
3994 if (working == 0)
3995 state = DDF_state_failed;
3996 else if (working >= 2)
3997 state = DDF_state_part_optimal;
3998 break;
3999 case DDF_RAID4:
4000 case DDF_RAID5:
4001 if (working < n_prim - 1)
4002 state = DDF_state_failed;
4003 break;
4004 case DDF_RAID6:
4005 if (working < n_prim - 2)
4006 state = DDF_state_failed;
4007 else if (working == n_prim - 1)
4008 state = DDF_state_part_optimal;
4009 break;
4010 }
4011 return state;
4012 }
4013
4014 static int secondary_state(int state, int other, int seclevel)
4015 {
4016 if (state == DDF_state_optimal && other == DDF_state_optimal)
4017 return DDF_state_optimal;
4018 if (seclevel == DDF_2MIRRORED) {
4019 if (state == DDF_state_optimal || other == DDF_state_optimal)
4020 return DDF_state_part_optimal;
4021 if (state == DDF_state_failed && other == DDF_state_failed)
4022 return DDF_state_failed;
4023 return DDF_state_degraded;
4024 } else {
4025 if (state == DDF_state_failed || other == DDF_state_failed)
4026 return DDF_state_failed;
4027 if (state == DDF_state_degraded || other == DDF_state_degraded)
4028 return DDF_state_degraded;
4029 return DDF_state_part_optimal;
4030 }
4031 }
4032
4033 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4034 {
4035 int state = get_bvd_state(ddf, &vcl->conf);
4036 unsigned int i;
4037 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4038 state = secondary_state(
4039 state,
4040 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4041 vcl->conf.srl);
4042 }
4043 return state;
4044 }
4045
4046 /*
4047 * The state of each disk is stored in the global phys_disk structure
4048 * in phys_disk.entries[n].state.
4049 * This makes various combinations awkward.
4050 * - When a device fails in any array, it must be failed in all arrays
4051 * that include a part of this device.
4052 * - When a component is rebuilding, we cannot include it officially in the
4053 * array unless this is the only array that uses the device.
4054 *
4055 * So: when transitioning:
4056 * Online -> failed, just set failed flag. monitor will propagate
4057 * spare -> online, the device might need to be added to the array.
4058 * spare -> failed, just set failed. Don't worry if in array or not.
4059 */
4060 static void ddf_set_disk(struct active_array *a, int n, int state)
4061 {
4062 struct ddf_super *ddf = a->container->sb;
4063 unsigned int inst = a->info.container_member, n_bvd;
4064 struct vcl *vcl;
4065 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4066 &n_bvd, &vcl);
4067 int pd;
4068 struct mdinfo *mdi;
4069 struct dl *dl;
4070
4071 if (vc == NULL) {
4072 dprintf("ddf: cannot find instance %d!!\n", inst);
4073 return;
4074 }
4075 /* Find the matching slot in 'info'. */
4076 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4077 if (mdi->disk.raid_disk == n)
4078 break;
4079 if (!mdi)
4080 return;
4081
4082 /* and find the 'dl' entry corresponding to that. */
4083 for (dl = ddf->dlist; dl; dl = dl->next)
4084 if (mdi->state_fd >= 0 &&
4085 mdi->disk.major == dl->major &&
4086 mdi->disk.minor == dl->minor)
4087 break;
4088 if (!dl)
4089 return;
4090
4091 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4092 if (pd < 0 || pd != dl->pdnum) {
4093 /* disk doesn't currently exist or has changed.
4094 * If it is now in_sync, insert it. */
4095 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4096 __func__, dl->pdnum, dl->major, dl->minor,
4097 dl->disk.refnum);
4098 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4099 __func__, inst, n_bvd, vc->phys_refnum[n_bvd], pd);
4100 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4101 pd = dl->pdnum; /* FIXME: is this really correct ? */
4102 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4103 LBA_OFFSET(ddf, vc)[n_bvd] =
4104 __cpu_to_be64(mdi->data_offset);
4105 ddf->phys->entries[pd].type &=
4106 ~__cpu_to_be16(DDF_Global_Spare);
4107 ddf->phys->entries[pd].type |=
4108 __cpu_to_be16(DDF_Active_in_VD);
4109 ddf_set_updates_pending(ddf);
4110 }
4111 } else {
4112 int old = ddf->phys->entries[pd].state;
4113 if (state & DS_FAULTY)
4114 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Failed);
4115 if (state & DS_INSYNC) {
4116 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Online);
4117 ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
4118 }
4119 if (old != ddf->phys->entries[pd].state)
4120 ddf_set_updates_pending(ddf);
4121 }
4122
4123 dprintf("ddf: set_disk %d to %x\n", n, state);
4124
4125 /* Now we need to check the state of the array and update
4126 * virtual_disk.entries[n].state.
4127 * It needs to be one of "optimal", "degraded", "failed".
4128 * I don't understand 'deleted' or 'missing'.
4129 */
4130 state = get_svd_state(ddf, vcl);
4131
4132 if (ddf->virt->entries[inst].state !=
4133 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4134 | state)) {
4135
4136 ddf->virt->entries[inst].state =
4137 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4138 | state;
4139 ddf_set_updates_pending(ddf);
4140 }
4141
4142 }
4143
4144 static void ddf_sync_metadata(struct supertype *st)
4145 {
4146
4147 /*
4148 * Write all data to all devices.
4149 * Later, we might be able to track whether only local changes
4150 * have been made, or whether any global data has been changed,
4151 * but ddf is sufficiently weird that it probably always
4152 * changes global data ....
4153 */
4154 struct ddf_super *ddf = st->sb;
4155 if (!ddf->updates_pending)
4156 return;
4157 ddf->updates_pending = 0;
4158 __write_init_super_ddf(st);
4159 dprintf("ddf: sync_metadata\n");
4160 }
4161
4162 static void ddf_process_update(struct supertype *st,
4163 struct metadata_update *update)
4164 {
4165 /* Apply this update to the metadata.
4166 * The first 4 bytes are a DDF_*_MAGIC which guides
4167 * our actions.
4168 * Possible update are:
4169 * DDF_PHYS_RECORDS_MAGIC
4170 * Add a new physical device or remove an old one.
4171 * Changes to this record only happen implicitly.
4172 * used_pdes is the device number.
4173 * DDF_VIRT_RECORDS_MAGIC
4174 * Add a new VD. Possibly also change the 'access' bits.
4175 * populated_vdes is the entry number.
4176 * DDF_VD_CONF_MAGIC
4177 * New or updated VD. the VIRT_RECORD must already
4178 * exist. For an update, phys_refnum and lba_offset
4179 * (at least) are updated, and the VD_CONF must
4180 * be written to precisely those devices listed with
4181 * a phys_refnum.
4182 * DDF_SPARE_ASSIGN_MAGIC
4183 * replacement Spare Assignment Record... but for which device?
4184 *
4185 * So, e.g.:
4186 * - to create a new array, we send a VIRT_RECORD and
4187 * a VD_CONF. Then assemble and start the array.
4188 * - to activate a spare we send a VD_CONF to add the phys_refnum
4189 * and offset. This will also mark the spare as active with
4190 * a spare-assignment record.
4191 */
4192 struct ddf_super *ddf = st->sb;
4193 __u32 *magic = (__u32*)update->buf;
4194 struct phys_disk *pd;
4195 struct virtual_disk *vd;
4196 struct vd_config *vc;
4197 struct vcl *vcl;
4198 struct dl *dl;
4199 unsigned int mppe;
4200 unsigned int ent;
4201 unsigned int pdnum, pd2;
4202
4203 dprintf("Process update %x\n", *magic);
4204
4205 switch (*magic) {
4206 case DDF_PHYS_RECORDS_MAGIC:
4207
4208 if (update->len != (sizeof(struct phys_disk) +
4209 sizeof(struct phys_disk_entry)))
4210 return;
4211 pd = (struct phys_disk*)update->buf;
4212
4213 ent = __be16_to_cpu(pd->used_pdes);
4214 if (ent >= __be16_to_cpu(ddf->phys->max_pdes))
4215 return;
4216 if (pd->entries[0].state & __cpu_to_be16(DDF_Missing)) {
4217 struct dl **dlp;
4218 /* removing this disk. */
4219 ddf->phys->entries[ent].state |= __cpu_to_be16(DDF_Missing);
4220 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4221 struct dl *dl = *dlp;
4222 if (dl->pdnum == (signed)ent) {
4223 close(dl->fd);
4224 dl->fd = -1;
4225 /* FIXME this doesn't free
4226 * dl->devname */
4227 update->space = dl;
4228 *dlp = dl->next;
4229 break;
4230 }
4231 }
4232 ddf_set_updates_pending(ddf);
4233 return;
4234 }
4235 if (!all_ff(ddf->phys->entries[ent].guid))
4236 return;
4237 ddf->phys->entries[ent] = pd->entries[0];
4238 ddf->phys->used_pdes = __cpu_to_be16(1 +
4239 __be16_to_cpu(ddf->phys->used_pdes));
4240 ddf_set_updates_pending(ddf);
4241 if (ddf->add_list) {
4242 struct active_array *a;
4243 struct dl *al = ddf->add_list;
4244 ddf->add_list = al->next;
4245
4246 al->next = ddf->dlist;
4247 ddf->dlist = al;
4248
4249 /* As a device has been added, we should check
4250 * for any degraded devices that might make
4251 * use of this spare */
4252 for (a = st->arrays ; a; a=a->next)
4253 a->check_degraded = 1;
4254 }
4255 break;
4256
4257 case DDF_VIRT_RECORDS_MAGIC:
4258
4259 if (update->len != (sizeof(struct virtual_disk) +
4260 sizeof(struct virtual_entry)))
4261 return;
4262 vd = (struct virtual_disk*)update->buf;
4263
4264 ent = find_unused_vde(ddf);
4265 if (ent == DDF_NOTFOUND)
4266 return;
4267 ddf->virt->entries[ent] = vd->entries[0];
4268 ddf->virt->populated_vdes = __cpu_to_be16(1 +
4269 __be16_to_cpu(ddf->virt->populated_vdes));
4270 ddf_set_updates_pending(ddf);
4271 break;
4272
4273 case DDF_VD_CONF_MAGIC:
4274 dprintf("len %d %d\n", update->len, ddf->conf_rec_len);
4275
4276 mppe = __be16_to_cpu(ddf->anchor.max_primary_element_entries);
4277 if ((unsigned)update->len != ddf->conf_rec_len * 512)
4278 return;
4279 vc = (struct vd_config*)update->buf;
4280 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4281 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4282 break;
4283 dprintf("vcl = %p\n", vcl);
4284 if (vcl) {
4285 /* An update, just copy the phys_refnum and lba_offset
4286 * fields
4287 */
4288 struct vd_config *conf = &vcl->conf;
4289 if (vcl->other_bvds != NULL &&
4290 conf->sec_elmnt_seq != vc->sec_elmnt_seq) {
4291 unsigned int i;
4292 for (i = 1; i < conf->sec_elmnt_count; i++)
4293 if (vcl->other_bvds[i-1]->sec_elmnt_seq
4294 == vc->sec_elmnt_seq)
4295 break;
4296 if (i == conf->sec_elmnt_count) {
4297 pr_err("%s/DDF_VD_CONF_MAGIC: BVD %u not found\n",
4298 __func__, vc->sec_elmnt_seq);
4299 return;
4300 }
4301 conf = vcl->other_bvds[i-1];
4302 }
4303 memcpy(conf->phys_refnum, vc->phys_refnum,
4304 mppe * (sizeof(__u32) + sizeof(__u64)));
4305 } else {
4306 /* A new VD_CONF */
4307 if (!update->space)
4308 return;
4309 vcl = update->space;
4310 update->space = NULL;
4311 vcl->next = ddf->conflist;
4312 memcpy(&vcl->conf, vc, update->len);
4313 ent = find_vde_by_guid(ddf, vc->guid);
4314 if (ent == DDF_NOTFOUND)
4315 return;
4316 vcl->vcnum = ent;
4317 ddf->conflist = vcl;
4318 }
4319 /* Set DDF_Transition on all Failed devices - to help
4320 * us detect those that are no longer in use
4321 */
4322 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4323 if (ddf->phys->entries[pdnum].state
4324 & __be16_to_cpu(DDF_Failed))
4325 ddf->phys->entries[pdnum].state
4326 |= __be16_to_cpu(DDF_Transition);
4327 /* Now make sure vlist is correct for each dl. */
4328 for (dl = ddf->dlist; dl; dl = dl->next) {
4329 unsigned int vn = 0;
4330 int in_degraded = 0;
4331 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4332 unsigned int dn, ibvd;
4333 const struct vd_config *conf;
4334 int vstate;
4335 dn = get_pd_index_from_refnum(vcl,
4336 dl->disk.refnum,
4337 ddf->mppe,
4338 &conf, &ibvd);
4339 if (dn == DDF_NOTFOUND)
4340 continue;
4341 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4342 dl->pdnum, dl->disk.refnum,
4343 guid_str(conf->guid),
4344 conf->sec_elmnt_seq, vn);
4345 /* Clear the Transition flag */
4346 if (ddf->phys->entries[dl->pdnum].state
4347 & __be16_to_cpu(DDF_Failed))
4348 ddf->phys->entries[dl->pdnum].state &=
4349 ~__be16_to_cpu(DDF_Transition);
4350 dl->vlist[vn++] = vcl;
4351 vstate = ddf->virt->entries[vcl->vcnum].state
4352 & DDF_state_mask;
4353 if (vstate == DDF_state_degraded ||
4354 vstate == DDF_state_part_optimal)
4355 in_degraded = 1;
4356 }
4357 while (vn < ddf->max_part)
4358 dl->vlist[vn++] = NULL;
4359 if (dl->vlist[0]) {
4360 ddf->phys->entries[dl->pdnum].type &=
4361 ~__cpu_to_be16(DDF_Global_Spare);
4362 if (!(ddf->phys->entries[dl->pdnum].type &
4363 __cpu_to_be16(DDF_Active_in_VD))) {
4364 ddf->phys->entries[dl->pdnum].type |=
4365 __cpu_to_be16(DDF_Active_in_VD);
4366 if (in_degraded)
4367 ddf->phys->entries[dl->pdnum].state |=
4368 __cpu_to_be16(DDF_Rebuilding);
4369 }
4370 }
4371 if (dl->spare) {
4372 ddf->phys->entries[dl->pdnum].type &=
4373 ~__cpu_to_be16(DDF_Global_Spare);
4374 ddf->phys->entries[dl->pdnum].type |=
4375 __cpu_to_be16(DDF_Spare);
4376 }
4377 if (!dl->vlist[0] && !dl->spare) {
4378 ddf->phys->entries[dl->pdnum].type |=
4379 __cpu_to_be16(DDF_Global_Spare);
4380 ddf->phys->entries[dl->pdnum].type &=
4381 ~__cpu_to_be16(DDF_Spare |
4382 DDF_Active_in_VD);
4383 }
4384 }
4385
4386 /* Now remove any 'Failed' devices that are not part
4387 * of any VD. They will have the Transition flag set.
4388 * Once done, we need to update all dl->pdnum numbers.
4389 */
4390 pd2 = 0;
4391 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4392 if ((ddf->phys->entries[pdnum].state
4393 & __be16_to_cpu(DDF_Failed))
4394 && (ddf->phys->entries[pdnum].state
4395 & __be16_to_cpu(DDF_Transition)))
4396 /* skip this one */;
4397 else if (pdnum == pd2)
4398 pd2++;
4399 else {
4400 ddf->phys->entries[pd2] = ddf->phys->entries[pdnum];
4401 for (dl = ddf->dlist; dl; dl = dl->next)
4402 if (dl->pdnum == (int)pdnum)
4403 dl->pdnum = pd2;
4404 pd2++;
4405 }
4406 ddf->phys->used_pdes = __cpu_to_be16(pd2);
4407 while (pd2 < pdnum) {
4408 memset(ddf->phys->entries[pd2].guid, 0xff, DDF_GUID_LEN);
4409 pd2++;
4410 }
4411
4412 ddf_set_updates_pending(ddf);
4413 break;
4414 case DDF_SPARE_ASSIGN_MAGIC:
4415 default: break;
4416 }
4417 }
4418
4419 static void ddf_prepare_update(struct supertype *st,
4420 struct metadata_update *update)
4421 {
4422 /* This update arrived at managemon.
4423 * We are about to pass it to monitor.
4424 * If a malloc is needed, do it here.
4425 */
4426 struct ddf_super *ddf = st->sb;
4427 __u32 *magic = (__u32*)update->buf;
4428 if (*magic == DDF_VD_CONF_MAGIC)
4429 if (posix_memalign(&update->space, 512,
4430 offsetof(struct vcl, conf)
4431 + ddf->conf_rec_len * 512) != 0)
4432 update->space = NULL;
4433 }
4434
4435 /*
4436 * Check if the array 'a' is degraded but not failed.
4437 * If it is, find as many spares as are available and needed and
4438 * arrange for their inclusion.
4439 * We only choose devices which are not already in the array,
4440 * and prefer those with a spare-assignment to this array.
4441 * otherwise we choose global spares - assuming always that
4442 * there is enough room.
4443 * For each spare that we assign, we return an 'mdinfo' which
4444 * describes the position for the device in the array.
4445 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4446 * the new phys_refnum and lba_offset values.
4447 *
4448 * Only worry about BVDs at the moment.
4449 */
4450 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4451 struct metadata_update **updates)
4452 {
4453 int working = 0;
4454 struct mdinfo *d;
4455 struct ddf_super *ddf = a->container->sb;
4456 int global_ok = 0;
4457 struct mdinfo *rv = NULL;
4458 struct mdinfo *di;
4459 struct metadata_update *mu;
4460 struct dl *dl;
4461 int i;
4462 struct vcl *vcl;
4463 struct vd_config *vc;
4464 unsigned int n_bvd;
4465
4466 for (d = a->info.devs ; d ; d = d->next) {
4467 if ((d->curr_state & DS_FAULTY) &&
4468 d->state_fd >= 0)
4469 /* wait for Removal to happen */
4470 return NULL;
4471 if (d->state_fd >= 0)
4472 working ++;
4473 }
4474
4475 dprintf("ddf_activate: working=%d (%d) level=%d\n", working, a->info.array.raid_disks,
4476 a->info.array.level);
4477 if (working == a->info.array.raid_disks)
4478 return NULL; /* array not degraded */
4479 switch (a->info.array.level) {
4480 case 1:
4481 if (working == 0)
4482 return NULL; /* failed */
4483 break;
4484 case 4:
4485 case 5:
4486 if (working < a->info.array.raid_disks - 1)
4487 return NULL; /* failed */
4488 break;
4489 case 6:
4490 if (working < a->info.array.raid_disks - 2)
4491 return NULL; /* failed */
4492 break;
4493 default: /* concat or stripe */
4494 return NULL; /* failed */
4495 }
4496
4497 /* For each slot, if it is not working, find a spare */
4498 dl = ddf->dlist;
4499 for (i = 0; i < a->info.array.raid_disks; i++) {
4500 for (d = a->info.devs ; d ; d = d->next)
4501 if (d->disk.raid_disk == i)
4502 break;
4503 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4504 if (d && (d->state_fd >= 0))
4505 continue;
4506
4507 /* OK, this device needs recovery. Find a spare */
4508 again:
4509 for ( ; dl ; dl = dl->next) {
4510 unsigned long long esize;
4511 unsigned long long pos;
4512 struct mdinfo *d2;
4513 int is_global = 0;
4514 int is_dedicated = 0;
4515 struct extent *ex;
4516 unsigned int j;
4517 /* If in this array, skip */
4518 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4519 if (d2->state_fd >= 0 &&
4520 d2->disk.major == dl->major &&
4521 d2->disk.minor == dl->minor) {
4522 dprintf("%x:%x already in array\n", dl->major, dl->minor);
4523 break;
4524 }
4525 if (d2)
4526 continue;
4527 if (ddf->phys->entries[dl->pdnum].type &
4528 __cpu_to_be16(DDF_Spare)) {
4529 /* Check spare assign record */
4530 if (dl->spare) {
4531 if (dl->spare->type & DDF_spare_dedicated) {
4532 /* check spare_ents for guid */
4533 for (j = 0 ;
4534 j < __be16_to_cpu(dl->spare->populated);
4535 j++) {
4536 if (memcmp(dl->spare->spare_ents[j].guid,
4537 ddf->virt->entries[a->info.container_member].guid,
4538 DDF_GUID_LEN) == 0)
4539 is_dedicated = 1;
4540 }
4541 } else
4542 is_global = 1;
4543 }
4544 } else if (ddf->phys->entries[dl->pdnum].type &
4545 __cpu_to_be16(DDF_Global_Spare)) {
4546 is_global = 1;
4547 } else if (!(ddf->phys->entries[dl->pdnum].state &
4548 __cpu_to_be16(DDF_Failed))) {
4549 /* we can possibly use some of this */
4550 is_global = 1;
4551 }
4552 if ( ! (is_dedicated ||
4553 (is_global && global_ok))) {
4554 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4555 is_dedicated, is_global);
4556 continue;
4557 }
4558
4559 /* We are allowed to use this device - is there space?
4560 * We need a->info.component_size sectors */
4561 ex = get_extents(ddf, dl);
4562 if (!ex) {
4563 dprintf("cannot get extents\n");
4564 continue;
4565 }
4566 j = 0; pos = 0;
4567 esize = 0;
4568
4569 do {
4570 esize = ex[j].start - pos;
4571 if (esize >= a->info.component_size)
4572 break;
4573 pos = ex[j].start + ex[j].size;
4574 j++;
4575 } while (ex[j-1].size);
4576
4577 free(ex);
4578 if (esize < a->info.component_size) {
4579 dprintf("%x:%x has no room: %llu %llu\n",
4580 dl->major, dl->minor,
4581 esize, a->info.component_size);
4582 /* No room */
4583 continue;
4584 }
4585
4586 /* Cool, we have a device with some space at pos */
4587 di = xcalloc(1, sizeof(*di));
4588 di->disk.number = i;
4589 di->disk.raid_disk = i;
4590 di->disk.major = dl->major;
4591 di->disk.minor = dl->minor;
4592 di->disk.state = 0;
4593 di->recovery_start = 0;
4594 di->data_offset = pos;
4595 di->component_size = a->info.component_size;
4596 di->container_member = dl->pdnum;
4597 di->next = rv;
4598 rv = di;
4599 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
4600 i, pos);
4601
4602 break;
4603 }
4604 if (!dl && ! global_ok) {
4605 /* not enough dedicated spares, try global */
4606 global_ok = 1;
4607 dl = ddf->dlist;
4608 goto again;
4609 }
4610 }
4611
4612 if (!rv)
4613 /* No spares found */
4614 return rv;
4615 /* Now 'rv' has a list of devices to return.
4616 * Create a metadata_update record to update the
4617 * phys_refnum and lba_offset values
4618 */
4619 mu = xmalloc(sizeof(*mu));
4620 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4621 free(mu);
4622 mu = NULL;
4623 }
4624 mu->buf = xmalloc(ddf->conf_rec_len * 512);
4625 mu->len = ddf->conf_rec_len * 512;
4626 mu->space = NULL;
4627 mu->space_list = NULL;
4628 mu->next = *updates;
4629 vc = find_vdcr(ddf, a->info.container_member, di->disk.raid_disk,
4630 &n_bvd, &vcl);
4631 memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
4632
4633 vc = (struct vd_config*)mu->buf;
4634 for (di = rv ; di ; di = di->next) {
4635 vc->phys_refnum[di->disk.raid_disk] =
4636 ddf->phys->entries[dl->pdnum].refnum;
4637 LBA_OFFSET(ddf, vc)[di->disk.raid_disk]
4638 = __cpu_to_be64(di->data_offset);
4639 }
4640 *updates = mu;
4641 return rv;
4642 }
4643 #endif /* MDASSEMBLE */
4644
4645 static int ddf_level_to_layout(int level)
4646 {
4647 switch(level) {
4648 case 0:
4649 case 1:
4650 return 0;
4651 case 5:
4652 return ALGORITHM_LEFT_SYMMETRIC;
4653 case 6:
4654 return ALGORITHM_ROTATING_N_CONTINUE;
4655 case 10:
4656 return 0x102;
4657 default:
4658 return UnSet;
4659 }
4660 }
4661
4662 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
4663 {
4664 if (level && *level == UnSet)
4665 *level = LEVEL_CONTAINER;
4666
4667 if (level && layout && *layout == UnSet)
4668 *layout = ddf_level_to_layout(*level);
4669 }
4670
4671 struct superswitch super_ddf = {
4672 #ifndef MDASSEMBLE
4673 .examine_super = examine_super_ddf,
4674 .brief_examine_super = brief_examine_super_ddf,
4675 .brief_examine_subarrays = brief_examine_subarrays_ddf,
4676 .export_examine_super = export_examine_super_ddf,
4677 .detail_super = detail_super_ddf,
4678 .brief_detail_super = brief_detail_super_ddf,
4679 .validate_geometry = validate_geometry_ddf,
4680 .write_init_super = write_init_super_ddf,
4681 .add_to_super = add_to_super_ddf,
4682 .remove_from_super = remove_from_super_ddf,
4683 .load_container = load_container_ddf,
4684 .copy_metadata = copy_metadata_ddf,
4685 #endif
4686 .match_home = match_home_ddf,
4687 .uuid_from_super= uuid_from_super_ddf,
4688 .getinfo_super = getinfo_super_ddf,
4689 .update_super = update_super_ddf,
4690
4691 .avail_size = avail_size_ddf,
4692
4693 .compare_super = compare_super_ddf,
4694
4695 .load_super = load_super_ddf,
4696 .init_super = init_super_ddf,
4697 .store_super = store_super_ddf,
4698 .free_super = free_super_ddf,
4699 .match_metadata_desc = match_metadata_desc_ddf,
4700 .container_content = container_content_ddf,
4701 .default_geometry = default_geometry_ddf,
4702
4703 .external = 1,
4704
4705 #ifndef MDASSEMBLE
4706 /* for mdmon */
4707 .open_new = ddf_open_new,
4708 .set_array_state= ddf_set_array_state,
4709 .set_disk = ddf_set_disk,
4710 .sync_metadata = ddf_sync_metadata,
4711 .process_update = ddf_process_update,
4712 .prepare_update = ddf_prepare_update,
4713 .activate_spare = ddf_activate_spare,
4714 #endif
4715 .name = "ddf",
4716 };