]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: add_to_super_ddf: RAID10 changes
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 /* Primary Raid Level (PRL) */
62 #define DDF_RAID0 0x00
63 #define DDF_RAID1 0x01
64 #define DDF_RAID3 0x03
65 #define DDF_RAID4 0x04
66 #define DDF_RAID5 0x05
67 #define DDF_RAID1E 0x11
68 #define DDF_JBOD 0x0f
69 #define DDF_CONCAT 0x1f
70 #define DDF_RAID5E 0x15
71 #define DDF_RAID5EE 0x25
72 #define DDF_RAID6 0x06
73
74 /* Raid Level Qualifier (RLQ) */
75 #define DDF_RAID0_SIMPLE 0x00
76 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
77 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
78 #define DDF_RAID3_0 0x00 /* parity in first extent */
79 #define DDF_RAID3_N 0x01 /* parity in last extent */
80 #define DDF_RAID4_0 0x00 /* parity in first extent */
81 #define DDF_RAID4_N 0x01 /* parity in last extent */
82 /* these apply to raid5e and raid5ee as well */
83 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
84 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
85 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
86 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
87
88 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
89 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
90
91 /* Secondary RAID Level (SRL) */
92 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
93 #define DDF_2MIRRORED 0x01
94 #define DDF_2CONCAT 0x02
95 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
96
97 /* Magic numbers */
98 #define DDF_HEADER_MAGIC __cpu_to_be32(0xDE11DE11)
99 #define DDF_CONTROLLER_MAGIC __cpu_to_be32(0xAD111111)
100 #define DDF_PHYS_RECORDS_MAGIC __cpu_to_be32(0x22222222)
101 #define DDF_PHYS_DATA_MAGIC __cpu_to_be32(0x33333333)
102 #define DDF_VIRT_RECORDS_MAGIC __cpu_to_be32(0xDDDDDDDD)
103 #define DDF_VD_CONF_MAGIC __cpu_to_be32(0xEEEEEEEE)
104 #define DDF_SPARE_ASSIGN_MAGIC __cpu_to_be32(0x55555555)
105 #define DDF_VU_CONF_MAGIC __cpu_to_be32(0x88888888)
106 #define DDF_VENDOR_LOG_MAGIC __cpu_to_be32(0x01dBEEF0)
107 #define DDF_BBM_LOG_MAGIC __cpu_to_be32(0xABADB10C)
108
109 #define DDF_GUID_LEN 24
110 #define DDF_REVISION_0 "01.00.00"
111 #define DDF_REVISION_2 "01.02.00"
112
113 struct ddf_header {
114 __u32 magic; /* DDF_HEADER_MAGIC */
115 __u32 crc;
116 char guid[DDF_GUID_LEN];
117 char revision[8]; /* 01.02.00 */
118 __u32 seq; /* starts at '1' */
119 __u32 timestamp;
120 __u8 openflag;
121 __u8 foreignflag;
122 __u8 enforcegroups;
123 __u8 pad0; /* 0xff */
124 __u8 pad1[12]; /* 12 * 0xff */
125 /* 64 bytes so far */
126 __u8 header_ext[32]; /* reserved: fill with 0xff */
127 __u64 primary_lba;
128 __u64 secondary_lba;
129 __u8 type;
130 __u8 pad2[3]; /* 0xff */
131 __u32 workspace_len; /* sectors for vendor space -
132 * at least 32768(sectors) */
133 __u64 workspace_lba;
134 __u16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
135 __u16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
136 __u16 max_partitions; /* i.e. max num of configuration
137 record entries per disk */
138 __u16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
139 *12/512) */
140 __u16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
141 __u8 pad3[54]; /* 0xff */
142 /* 192 bytes so far */
143 __u32 controller_section_offset;
144 __u32 controller_section_length;
145 __u32 phys_section_offset;
146 __u32 phys_section_length;
147 __u32 virt_section_offset;
148 __u32 virt_section_length;
149 __u32 config_section_offset;
150 __u32 config_section_length;
151 __u32 data_section_offset;
152 __u32 data_section_length;
153 __u32 bbm_section_offset;
154 __u32 bbm_section_length;
155 __u32 diag_space_offset;
156 __u32 diag_space_length;
157 __u32 vendor_offset;
158 __u32 vendor_length;
159 /* 256 bytes so far */
160 __u8 pad4[256]; /* 0xff */
161 };
162
163 /* type field */
164 #define DDF_HEADER_ANCHOR 0x00
165 #define DDF_HEADER_PRIMARY 0x01
166 #define DDF_HEADER_SECONDARY 0x02
167
168 /* The content of the 'controller section' - global scope */
169 struct ddf_controller_data {
170 __u32 magic; /* DDF_CONTROLLER_MAGIC */
171 __u32 crc;
172 char guid[DDF_GUID_LEN];
173 struct controller_type {
174 __u16 vendor_id;
175 __u16 device_id;
176 __u16 sub_vendor_id;
177 __u16 sub_device_id;
178 } type;
179 char product_id[16];
180 __u8 pad[8]; /* 0xff */
181 __u8 vendor_data[448];
182 };
183
184 /* The content of phys_section - global scope */
185 struct phys_disk {
186 __u32 magic; /* DDF_PHYS_RECORDS_MAGIC */
187 __u32 crc;
188 __u16 used_pdes;
189 __u16 max_pdes;
190 __u8 pad[52];
191 struct phys_disk_entry {
192 char guid[DDF_GUID_LEN];
193 __u32 refnum;
194 __u16 type;
195 __u16 state;
196 __u64 config_size; /* DDF structures must be after here */
197 char path[18]; /* another horrible structure really */
198 __u8 pad[6];
199 } entries[0];
200 };
201
202 /* phys_disk_entry.type is a bitmap - bigendian remember */
203 #define DDF_Forced_PD_GUID 1
204 #define DDF_Active_in_VD 2
205 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
206 #define DDF_Spare 8 /* overrides Global_spare */
207 #define DDF_Foreign 16
208 #define DDF_Legacy 32 /* no DDF on this device */
209
210 #define DDF_Interface_mask 0xf00
211 #define DDF_Interface_SCSI 0x100
212 #define DDF_Interface_SAS 0x200
213 #define DDF_Interface_SATA 0x300
214 #define DDF_Interface_FC 0x400
215
216 /* phys_disk_entry.state is a bigendian bitmap */
217 #define DDF_Online 1
218 #define DDF_Failed 2 /* overrides 1,4,8 */
219 #define DDF_Rebuilding 4
220 #define DDF_Transition 8
221 #define DDF_SMART 16
222 #define DDF_ReadErrors 32
223 #define DDF_Missing 64
224
225 /* The content of the virt_section global scope */
226 struct virtual_disk {
227 __u32 magic; /* DDF_VIRT_RECORDS_MAGIC */
228 __u32 crc;
229 __u16 populated_vdes;
230 __u16 max_vdes;
231 __u8 pad[52];
232 struct virtual_entry {
233 char guid[DDF_GUID_LEN];
234 __u16 unit;
235 __u16 pad0; /* 0xffff */
236 __u16 guid_crc;
237 __u16 type;
238 __u8 state;
239 __u8 init_state;
240 __u8 pad1[14];
241 char name[16];
242 } entries[0];
243 };
244
245 /* virtual_entry.type is a bitmap - bigendian */
246 #define DDF_Shared 1
247 #define DDF_Enforce_Groups 2
248 #define DDF_Unicode 4
249 #define DDF_Owner_Valid 8
250
251 /* virtual_entry.state is a bigendian bitmap */
252 #define DDF_state_mask 0x7
253 #define DDF_state_optimal 0x0
254 #define DDF_state_degraded 0x1
255 #define DDF_state_deleted 0x2
256 #define DDF_state_missing 0x3
257 #define DDF_state_failed 0x4
258 #define DDF_state_part_optimal 0x5
259
260 #define DDF_state_morphing 0x8
261 #define DDF_state_inconsistent 0x10
262
263 /* virtual_entry.init_state is a bigendian bitmap */
264 #define DDF_initstate_mask 0x03
265 #define DDF_init_not 0x00
266 #define DDF_init_quick 0x01 /* initialisation is progress.
267 * i.e. 'state_inconsistent' */
268 #define DDF_init_full 0x02
269
270 #define DDF_access_mask 0xc0
271 #define DDF_access_rw 0x00
272 #define DDF_access_ro 0x80
273 #define DDF_access_blocked 0xc0
274
275 /* The content of the config_section - local scope
276 * It has multiple records each config_record_len sectors
277 * They can be vd_config or spare_assign
278 */
279
280 struct vd_config {
281 __u32 magic; /* DDF_VD_CONF_MAGIC */
282 __u32 crc;
283 char guid[DDF_GUID_LEN];
284 __u32 timestamp;
285 __u32 seqnum;
286 __u8 pad0[24];
287 __u16 prim_elmnt_count;
288 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
289 __u8 prl;
290 __u8 rlq;
291 __u8 sec_elmnt_count;
292 __u8 sec_elmnt_seq;
293 __u8 srl;
294 __u64 blocks; /* blocks per component could be different
295 * on different component devices...(only
296 * for concat I hope) */
297 __u64 array_blocks; /* blocks in array */
298 __u8 pad1[8];
299 __u32 spare_refs[8];
300 __u8 cache_pol[8];
301 __u8 bg_rate;
302 __u8 pad2[3];
303 __u8 pad3[52];
304 __u8 pad4[192];
305 __u8 v0[32]; /* reserved- 0xff */
306 __u8 v1[32]; /* reserved- 0xff */
307 __u8 v2[16]; /* reserved- 0xff */
308 __u8 v3[16]; /* reserved- 0xff */
309 __u8 vendor[32];
310 __u32 phys_refnum[0]; /* refnum of each disk in sequence */
311 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
312 bvd are always the same size */
313 };
314 #define LBA_OFFSET(ddf, vd) ((__u64 *) &(vd)->phys_refnum[(ddf)->mppe])
315
316 /* vd_config.cache_pol[7] is a bitmap */
317 #define DDF_cache_writeback 1 /* else writethrough */
318 #define DDF_cache_wadaptive 2 /* only applies if writeback */
319 #define DDF_cache_readahead 4
320 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
321 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
322 #define DDF_cache_wallowed 32 /* enable write caching */
323 #define DDF_cache_rallowed 64 /* enable read caching */
324
325 struct spare_assign {
326 __u32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
327 __u32 crc;
328 __u32 timestamp;
329 __u8 reserved[7];
330 __u8 type;
331 __u16 populated; /* SAEs used */
332 __u16 max; /* max SAEs */
333 __u8 pad[8];
334 struct spare_assign_entry {
335 char guid[DDF_GUID_LEN];
336 __u16 secondary_element;
337 __u8 pad[6];
338 } spare_ents[0];
339 };
340 /* spare_assign.type is a bitmap */
341 #define DDF_spare_dedicated 0x1 /* else global */
342 #define DDF_spare_revertible 0x2 /* else committable */
343 #define DDF_spare_active 0x4 /* else not active */
344 #define DDF_spare_affinity 0x8 /* enclosure affinity */
345
346 /* The data_section contents - local scope */
347 struct disk_data {
348 __u32 magic; /* DDF_PHYS_DATA_MAGIC */
349 __u32 crc;
350 char guid[DDF_GUID_LEN];
351 __u32 refnum; /* crc of some magic drive data ... */
352 __u8 forced_ref; /* set when above was not result of magic */
353 __u8 forced_guid; /* set if guid was forced rather than magic */
354 __u8 vendor[32];
355 __u8 pad[442];
356 };
357
358 /* bbm_section content */
359 struct bad_block_log {
360 __u32 magic;
361 __u32 crc;
362 __u16 entry_count;
363 __u32 spare_count;
364 __u8 pad[10];
365 __u64 first_spare;
366 struct mapped_block {
367 __u64 defective_start;
368 __u32 replacement_start;
369 __u16 remap_count;
370 __u8 pad[2];
371 } entries[0];
372 };
373
374 /* Struct for internally holding ddf structures */
375 /* The DDF structure stored on each device is potentially
376 * quite different, as some data is global and some is local.
377 * The global data is:
378 * - ddf header
379 * - controller_data
380 * - Physical disk records
381 * - Virtual disk records
382 * The local data is:
383 * - Configuration records
384 * - Physical Disk data section
385 * ( and Bad block and vendor which I don't care about yet).
386 *
387 * The local data is parsed into separate lists as it is read
388 * and reconstructed for writing. This means that we only need
389 * to make config changes once and they are automatically
390 * propagated to all devices.
391 * Note that the ddf_super has space of the conf and disk data
392 * for this disk and also for a list of all such data.
393 * The list is only used for the superblock that is being
394 * built in Create or Assemble to describe the whole array.
395 */
396 struct ddf_super {
397 struct ddf_header anchor, primary, secondary;
398 struct ddf_controller_data controller;
399 struct ddf_header *active;
400 struct phys_disk *phys;
401 struct virtual_disk *virt;
402 int pdsize, vdsize;
403 unsigned int max_part, mppe, conf_rec_len;
404 int currentdev;
405 int updates_pending;
406 struct vcl {
407 union {
408 char space[512];
409 struct {
410 struct vcl *next;
411 unsigned int vcnum; /* index into ->virt */
412 struct vd_config **other_bvds;
413 __u64 *block_sizes; /* NULL if all the same */
414 };
415 };
416 struct vd_config conf;
417 } *conflist, *currentconf;
418 struct dl {
419 union {
420 char space[512];
421 struct {
422 struct dl *next;
423 int major, minor;
424 char *devname;
425 int fd;
426 unsigned long long size; /* sectors */
427 unsigned long long primary_lba; /* sectors */
428 unsigned long long secondary_lba; /* sectors */
429 unsigned long long workspace_lba; /* sectors */
430 int pdnum; /* index in ->phys */
431 struct spare_assign *spare;
432 void *mdupdate; /* hold metadata update */
433
434 /* These fields used by auto-layout */
435 int raiddisk; /* slot to fill in autolayout */
436 __u64 esize;
437 };
438 };
439 struct disk_data disk;
440 struct vcl *vlist[0]; /* max_part in size */
441 } *dlist, *add_list;
442 };
443
444 #ifndef offsetof
445 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
446 #endif
447
448 #if DEBUG
449 static int all_ff(const char *guid);
450 static void pr_state(struct ddf_super *ddf, const char *msg)
451 {
452 unsigned int i;
453 dprintf("%s/%s: ", __func__, msg);
454 for (i = 0; i < __be16_to_cpu(ddf->active->max_vd_entries); i++) {
455 if (all_ff(ddf->virt->entries[i].guid))
456 continue;
457 dprintf("%u(s=%02x i=%02x) ", i,
458 ddf->virt->entries[i].state,
459 ddf->virt->entries[i].init_state);
460 }
461 dprintf("\n");
462 }
463 #else
464 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
465 #endif
466
467 #define ddf_set_updates_pending(x) \
468 do { (x)->updates_pending = 1; pr_state(x, __func__); } while (0)
469
470 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
471 __u32 refnum, unsigned int nmax,
472 const struct vd_config **bvd,
473 unsigned int *idx);
474
475 static unsigned int calc_crc(void *buf, int len)
476 {
477 /* crcs are always at the same place as in the ddf_header */
478 struct ddf_header *ddf = buf;
479 __u32 oldcrc = ddf->crc;
480 __u32 newcrc;
481 ddf->crc = 0xffffffff;
482
483 newcrc = crc32(0, buf, len);
484 ddf->crc = oldcrc;
485 /* The crc is store (like everything) bigendian, so convert
486 * here for simplicity
487 */
488 return __cpu_to_be32(newcrc);
489 }
490
491 #define DDF_INVALID_LEVEL 0xff
492 #define DDF_NO_SECONDARY 0xff
493 static int err_bad_md_layout(const mdu_array_info_t *array)
494 {
495 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
496 array->level, array->layout, array->raid_disks);
497 return DDF_INVALID_LEVEL;
498 }
499
500 static int layout_md2ddf(const mdu_array_info_t *array,
501 struct vd_config *conf)
502 {
503 __u16 prim_elmnt_count = __cpu_to_be16(array->raid_disks);
504 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
505 __u8 sec_elmnt_count = 1;
506 __u8 srl = DDF_NO_SECONDARY;
507
508 switch (array->level) {
509 case LEVEL_LINEAR:
510 prl = DDF_CONCAT;
511 break;
512 case 0:
513 rlq = DDF_RAID0_SIMPLE;
514 prl = DDF_RAID0;
515 break;
516 case 1:
517 switch (array->raid_disks) {
518 case 2:
519 rlq = DDF_RAID1_SIMPLE;
520 break;
521 case 3:
522 rlq = DDF_RAID1_MULTI;
523 break;
524 default:
525 return err_bad_md_layout(array);
526 }
527 prl = DDF_RAID1;
528 break;
529 case 4:
530 if (array->layout != 0)
531 return err_bad_md_layout(array);
532 rlq = DDF_RAID4_N;
533 prl = DDF_RAID4;
534 break;
535 case 5:
536 switch (array->layout) {
537 case ALGORITHM_LEFT_ASYMMETRIC:
538 rlq = DDF_RAID5_N_RESTART;
539 break;
540 case ALGORITHM_RIGHT_ASYMMETRIC:
541 rlq = DDF_RAID5_0_RESTART;
542 break;
543 case ALGORITHM_LEFT_SYMMETRIC:
544 rlq = DDF_RAID5_N_CONTINUE;
545 break;
546 case ALGORITHM_RIGHT_SYMMETRIC:
547 /* not mentioned in standard */
548 default:
549 return err_bad_md_layout(array);
550 }
551 prl = DDF_RAID5;
552 break;
553 case 6:
554 switch (array->layout) {
555 case ALGORITHM_ROTATING_N_RESTART:
556 rlq = DDF_RAID5_N_RESTART;
557 break;
558 case ALGORITHM_ROTATING_ZERO_RESTART:
559 rlq = DDF_RAID6_0_RESTART;
560 break;
561 case ALGORITHM_ROTATING_N_CONTINUE:
562 rlq = DDF_RAID5_N_CONTINUE;
563 break;
564 default:
565 return err_bad_md_layout(array);
566 }
567 prl = DDF_RAID6;
568 break;
569 case 10:
570 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
571 rlq = DDF_RAID1_SIMPLE;
572 prim_elmnt_count = __cpu_to_be16(2);
573 sec_elmnt_count = array->raid_disks / 2;
574 } else if (array->raid_disks % 3 == 0
575 && array->layout == 0x103) {
576 rlq = DDF_RAID1_MULTI;
577 prim_elmnt_count = __cpu_to_be16(3);
578 sec_elmnt_count = array->raid_disks / 3;
579 } else
580 return err_bad_md_layout(array);
581 srl = DDF_2SPANNED;
582 prl = DDF_RAID1;
583 break;
584 default:
585 return err_bad_md_layout(array);
586 }
587 conf->prl = prl;
588 conf->prim_elmnt_count = prim_elmnt_count;
589 conf->rlq = rlq;
590 conf->srl = srl;
591 conf->sec_elmnt_count = sec_elmnt_count;
592 return 0;
593 }
594
595 static int err_bad_ddf_layout(const struct vd_config *conf)
596 {
597 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
598 conf->prl, conf->rlq, __be16_to_cpu(conf->prim_elmnt_count));
599 return -1;
600 }
601
602 static int layout_ddf2md(const struct vd_config *conf,
603 mdu_array_info_t *array)
604 {
605 int level = LEVEL_UNSUPPORTED;
606 int layout = 0;
607 int raiddisks = __be16_to_cpu(conf->prim_elmnt_count);
608
609 if (conf->sec_elmnt_count > 1) {
610 /* see also check_secondary() */
611 if (conf->prl != DDF_RAID1 ||
612 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
613 pr_err("Unsupported secondary RAID level %u/%u\n",
614 conf->prl, conf->srl);
615 return -1;
616 }
617 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
618 layout = 0x102;
619 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
620 layout = 0x103;
621 else
622 return err_bad_ddf_layout(conf);
623 raiddisks *= conf->sec_elmnt_count;
624 level = 10;
625 goto good;
626 }
627
628 switch (conf->prl) {
629 case DDF_CONCAT:
630 level = LEVEL_LINEAR;
631 break;
632 case DDF_RAID0:
633 if (conf->rlq != DDF_RAID0_SIMPLE)
634 return err_bad_ddf_layout(conf);
635 level = 0;
636 break;
637 case DDF_RAID1:
638 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
639 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
640 return err_bad_ddf_layout(conf);
641 level = 1;
642 break;
643 case DDF_RAID4:
644 if (conf->rlq != DDF_RAID4_N)
645 return err_bad_ddf_layout(conf);
646 level = 4;
647 break;
648 case DDF_RAID5:
649 switch (conf->rlq) {
650 case DDF_RAID5_N_RESTART:
651 layout = ALGORITHM_LEFT_ASYMMETRIC;
652 break;
653 case DDF_RAID5_0_RESTART:
654 layout = ALGORITHM_RIGHT_ASYMMETRIC;
655 break;
656 case DDF_RAID5_N_CONTINUE:
657 layout = ALGORITHM_LEFT_SYMMETRIC;
658 break;
659 default:
660 return err_bad_ddf_layout(conf);
661 }
662 level = 5;
663 break;
664 case DDF_RAID6:
665 switch (conf->rlq) {
666 case DDF_RAID5_N_RESTART:
667 layout = ALGORITHM_ROTATING_N_RESTART;
668 break;
669 case DDF_RAID6_0_RESTART:
670 layout = ALGORITHM_ROTATING_ZERO_RESTART;
671 break;
672 case DDF_RAID5_N_CONTINUE:
673 layout = ALGORITHM_ROTATING_N_CONTINUE;
674 break;
675 default:
676 return err_bad_ddf_layout(conf);
677 }
678 level = 6;
679 break;
680 default:
681 return err_bad_ddf_layout(conf);
682 };
683
684 good:
685 array->level = level;
686 array->layout = layout;
687 array->raid_disks = raiddisks;
688 return 0;
689 }
690
691 static int load_ddf_header(int fd, unsigned long long lba,
692 unsigned long long size,
693 int type,
694 struct ddf_header *hdr, struct ddf_header *anchor)
695 {
696 /* read a ddf header (primary or secondary) from fd/lba
697 * and check that it is consistent with anchor
698 * Need to check:
699 * magic, crc, guid, rev, and LBA's header_type, and
700 * everything after header_type must be the same
701 */
702 if (lba >= size-1)
703 return 0;
704
705 if (lseek64(fd, lba<<9, 0) < 0)
706 return 0;
707
708 if (read(fd, hdr, 512) != 512)
709 return 0;
710
711 if (hdr->magic != DDF_HEADER_MAGIC)
712 return 0;
713 if (calc_crc(hdr, 512) != hdr->crc)
714 return 0;
715 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
716 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
717 anchor->primary_lba != hdr->primary_lba ||
718 anchor->secondary_lba != hdr->secondary_lba ||
719 hdr->type != type ||
720 memcmp(anchor->pad2, hdr->pad2, 512 -
721 offsetof(struct ddf_header, pad2)) != 0)
722 return 0;
723
724 /* Looks good enough to me... */
725 return 1;
726 }
727
728 static void *load_section(int fd, struct ddf_super *super, void *buf,
729 __u32 offset_be, __u32 len_be, int check)
730 {
731 unsigned long long offset = __be32_to_cpu(offset_be);
732 unsigned long long len = __be32_to_cpu(len_be);
733 int dofree = (buf == NULL);
734
735 if (check)
736 if (len != 2 && len != 8 && len != 32
737 && len != 128 && len != 512)
738 return NULL;
739
740 if (len > 1024)
741 return NULL;
742 if (buf) {
743 /* All pre-allocated sections are a single block */
744 if (len != 1)
745 return NULL;
746 } else if (posix_memalign(&buf, 512, len<<9) != 0)
747 buf = NULL;
748
749 if (!buf)
750 return NULL;
751
752 if (super->active->type == 1)
753 offset += __be64_to_cpu(super->active->primary_lba);
754 else
755 offset += __be64_to_cpu(super->active->secondary_lba);
756
757 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
758 if (dofree)
759 free(buf);
760 return NULL;
761 }
762 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
763 if (dofree)
764 free(buf);
765 return NULL;
766 }
767 return buf;
768 }
769
770 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
771 {
772 unsigned long long dsize;
773
774 get_dev_size(fd, NULL, &dsize);
775
776 if (lseek64(fd, dsize-512, 0) < 0) {
777 if (devname)
778 pr_err("Cannot seek to anchor block on %s: %s\n",
779 devname, strerror(errno));
780 return 1;
781 }
782 if (read(fd, &super->anchor, 512) != 512) {
783 if (devname)
784 pr_err("Cannot read anchor block on %s: %s\n",
785 devname, strerror(errno));
786 return 1;
787 }
788 if (super->anchor.magic != DDF_HEADER_MAGIC) {
789 if (devname)
790 pr_err("no DDF anchor found on %s\n",
791 devname);
792 return 2;
793 }
794 if (calc_crc(&super->anchor, 512) != super->anchor.crc) {
795 if (devname)
796 pr_err("bad CRC on anchor on %s\n",
797 devname);
798 return 2;
799 }
800 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
801 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
802 if (devname)
803 pr_err("can only support super revision"
804 " %.8s and earlier, not %.8s on %s\n",
805 DDF_REVISION_2, super->anchor.revision,devname);
806 return 2;
807 }
808 super->active = NULL;
809 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.primary_lba),
810 dsize >> 9, 1,
811 &super->primary, &super->anchor) == 0) {
812 if (devname)
813 pr_err("Failed to load primary DDF header "
814 "on %s\n", devname);
815 } else
816 super->active = &super->primary;
817 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.secondary_lba),
818 dsize >> 9, 2,
819 &super->secondary, &super->anchor)) {
820 if ((__be32_to_cpu(super->primary.seq)
821 < __be32_to_cpu(super->secondary.seq) &&
822 !super->secondary.openflag)
823 || (__be32_to_cpu(super->primary.seq)
824 == __be32_to_cpu(super->secondary.seq) &&
825 super->primary.openflag && !super->secondary.openflag)
826 || super->active == NULL
827 )
828 super->active = &super->secondary;
829 } else if (devname)
830 pr_err("Failed to load secondary DDF header on %s\n",
831 devname);
832 if (super->active == NULL)
833 return 2;
834 return 0;
835 }
836
837 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
838 {
839 void *ok;
840 ok = load_section(fd, super, &super->controller,
841 super->active->controller_section_offset,
842 super->active->controller_section_length,
843 0);
844 super->phys = load_section(fd, super, NULL,
845 super->active->phys_section_offset,
846 super->active->phys_section_length,
847 1);
848 super->pdsize = __be32_to_cpu(super->active->phys_section_length) * 512;
849
850 super->virt = load_section(fd, super, NULL,
851 super->active->virt_section_offset,
852 super->active->virt_section_length,
853 1);
854 super->vdsize = __be32_to_cpu(super->active->virt_section_length) * 512;
855 if (!ok ||
856 !super->phys ||
857 !super->virt) {
858 free(super->phys);
859 free(super->virt);
860 super->phys = NULL;
861 super->virt = NULL;
862 return 2;
863 }
864 super->conflist = NULL;
865 super->dlist = NULL;
866
867 super->max_part = __be16_to_cpu(super->active->max_partitions);
868 super->mppe = __be16_to_cpu(super->active->max_primary_element_entries);
869 super->conf_rec_len = __be16_to_cpu(super->active->config_record_len);
870 return 0;
871 }
872
873 #define DDF_UNUSED_BVD 0xff
874 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
875 {
876 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
877 unsigned int i, vdsize;
878 void *p;
879 if (n_vds == 0) {
880 vcl->other_bvds = NULL;
881 return 0;
882 }
883 vdsize = ddf->conf_rec_len * 512;
884 if (posix_memalign(&p, 512, n_vds *
885 (vdsize + sizeof(struct vd_config *))) != 0)
886 return -1;
887 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
888 for (i = 0; i < n_vds; i++) {
889 vcl->other_bvds[i] = p + i * vdsize;
890 memset(vcl->other_bvds[i], 0, vdsize);
891 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
892 }
893 return 0;
894 }
895
896 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
897 unsigned int len)
898 {
899 int i;
900 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
901 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
902 break;
903
904 if (i < vcl->conf.sec_elmnt_count-1) {
905 if (vd->seqnum <= vcl->other_bvds[i]->seqnum)
906 return;
907 } else {
908 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
909 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
910 break;
911 if (i == vcl->conf.sec_elmnt_count-1) {
912 pr_err("no space for sec level config %u, count is %u\n",
913 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
914 return;
915 }
916 }
917 memcpy(vcl->other_bvds[i], vd, len);
918 }
919
920 static int load_ddf_local(int fd, struct ddf_super *super,
921 char *devname, int keep)
922 {
923 struct dl *dl;
924 struct stat stb;
925 char *conf;
926 unsigned int i;
927 unsigned int confsec;
928 int vnum;
929 unsigned int max_virt_disks = __be16_to_cpu(super->active->max_vd_entries);
930 unsigned long long dsize;
931
932 /* First the local disk info */
933 if (posix_memalign((void**)&dl, 512,
934 sizeof(*dl) +
935 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
936 pr_err("%s could not allocate disk info buffer\n",
937 __func__);
938 return 1;
939 }
940
941 load_section(fd, super, &dl->disk,
942 super->active->data_section_offset,
943 super->active->data_section_length,
944 0);
945 dl->devname = devname ? xstrdup(devname) : NULL;
946
947 fstat(fd, &stb);
948 dl->major = major(stb.st_rdev);
949 dl->minor = minor(stb.st_rdev);
950 dl->next = super->dlist;
951 dl->fd = keep ? fd : -1;
952
953 dl->size = 0;
954 if (get_dev_size(fd, devname, &dsize))
955 dl->size = dsize >> 9;
956 /* If the disks have different sizes, the LBAs will differ
957 * between phys disks.
958 * At this point here, the values in super->active must be valid
959 * for this phys disk. */
960 dl->primary_lba = super->active->primary_lba;
961 dl->secondary_lba = super->active->secondary_lba;
962 dl->workspace_lba = super->active->workspace_lba;
963 dl->spare = NULL;
964 for (i = 0 ; i < super->max_part ; i++)
965 dl->vlist[i] = NULL;
966 super->dlist = dl;
967 dl->pdnum = -1;
968 for (i = 0; i < __be16_to_cpu(super->active->max_pd_entries); i++)
969 if (memcmp(super->phys->entries[i].guid,
970 dl->disk.guid, DDF_GUID_LEN) == 0)
971 dl->pdnum = i;
972
973 /* Now the config list. */
974 /* 'conf' is an array of config entries, some of which are
975 * probably invalid. Those which are good need to be copied into
976 * the conflist
977 */
978
979 conf = load_section(fd, super, NULL,
980 super->active->config_section_offset,
981 super->active->config_section_length,
982 0);
983
984 vnum = 0;
985 for (confsec = 0;
986 confsec < __be32_to_cpu(super->active->config_section_length);
987 confsec += super->conf_rec_len) {
988 struct vd_config *vd =
989 (struct vd_config *)((char*)conf + confsec*512);
990 struct vcl *vcl;
991
992 if (vd->magic == DDF_SPARE_ASSIGN_MAGIC) {
993 if (dl->spare)
994 continue;
995 if (posix_memalign((void**)&dl->spare, 512,
996 super->conf_rec_len*512) != 0) {
997 pr_err("%s could not allocate spare info buf\n",
998 __func__);
999 return 1;
1000 }
1001
1002 memcpy(dl->spare, vd, super->conf_rec_len*512);
1003 continue;
1004 }
1005 if (vd->magic != DDF_VD_CONF_MAGIC)
1006 continue;
1007 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1008 if (memcmp(vcl->conf.guid,
1009 vd->guid, DDF_GUID_LEN) == 0)
1010 break;
1011 }
1012
1013 if (vcl) {
1014 dl->vlist[vnum++] = vcl;
1015 if (vcl->other_bvds != NULL &&
1016 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1017 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1018 continue;
1019 }
1020 if (__be32_to_cpu(vd->seqnum) <=
1021 __be32_to_cpu(vcl->conf.seqnum))
1022 continue;
1023 } else {
1024 if (posix_memalign((void**)&vcl, 512,
1025 (super->conf_rec_len*512 +
1026 offsetof(struct vcl, conf))) != 0) {
1027 pr_err("%s could not allocate vcl buf\n",
1028 __func__);
1029 return 1;
1030 }
1031 vcl->next = super->conflist;
1032 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1033 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1034 if (alloc_other_bvds(super, vcl) != 0) {
1035 pr_err("%s could not allocate other bvds\n",
1036 __func__);
1037 free(vcl);
1038 return 1;
1039 };
1040 super->conflist = vcl;
1041 dl->vlist[vnum++] = vcl;
1042 }
1043 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1044 for (i=0; i < max_virt_disks ; i++)
1045 if (memcmp(super->virt->entries[i].guid,
1046 vcl->conf.guid, DDF_GUID_LEN)==0)
1047 break;
1048 if (i < max_virt_disks)
1049 vcl->vcnum = i;
1050 }
1051 free(conf);
1052
1053 return 0;
1054 }
1055
1056 #ifndef MDASSEMBLE
1057 static int load_super_ddf_all(struct supertype *st, int fd,
1058 void **sbp, char *devname);
1059 #endif
1060
1061 static void free_super_ddf(struct supertype *st);
1062
1063 static int load_super_ddf(struct supertype *st, int fd,
1064 char *devname)
1065 {
1066 unsigned long long dsize;
1067 struct ddf_super *super;
1068 int rv;
1069
1070 if (get_dev_size(fd, devname, &dsize) == 0)
1071 return 1;
1072
1073 if (!st->ignore_hw_compat && test_partition(fd))
1074 /* DDF is not allowed on partitions */
1075 return 1;
1076
1077 /* 32M is a lower bound */
1078 if (dsize <= 32*1024*1024) {
1079 if (devname)
1080 pr_err("%s is too small for ddf: "
1081 "size is %llu sectors.\n",
1082 devname, dsize>>9);
1083 return 1;
1084 }
1085 if (dsize & 511) {
1086 if (devname)
1087 pr_err("%s is an odd size for ddf: "
1088 "size is %llu bytes.\n",
1089 devname, dsize);
1090 return 1;
1091 }
1092
1093 free_super_ddf(st);
1094
1095 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1096 pr_err("malloc of %zu failed.\n",
1097 sizeof(*super));
1098 return 1;
1099 }
1100 memset(super, 0, sizeof(*super));
1101
1102 rv = load_ddf_headers(fd, super, devname);
1103 if (rv) {
1104 free(super);
1105 return rv;
1106 }
1107
1108 /* Have valid headers and have chosen the best. Let's read in the rest*/
1109
1110 rv = load_ddf_global(fd, super, devname);
1111
1112 if (rv) {
1113 if (devname)
1114 pr_err("Failed to load all information "
1115 "sections on %s\n", devname);
1116 free(super);
1117 return rv;
1118 }
1119
1120 rv = load_ddf_local(fd, super, devname, 0);
1121
1122 if (rv) {
1123 if (devname)
1124 pr_err("Failed to load all information "
1125 "sections on %s\n", devname);
1126 free(super);
1127 return rv;
1128 }
1129
1130 /* Should possibly check the sections .... */
1131
1132 st->sb = super;
1133 if (st->ss == NULL) {
1134 st->ss = &super_ddf;
1135 st->minor_version = 0;
1136 st->max_devs = 512;
1137 }
1138 return 0;
1139
1140 }
1141
1142 static void free_super_ddf(struct supertype *st)
1143 {
1144 struct ddf_super *ddf = st->sb;
1145 if (ddf == NULL)
1146 return;
1147 free(ddf->phys);
1148 free(ddf->virt);
1149 while (ddf->conflist) {
1150 struct vcl *v = ddf->conflist;
1151 ddf->conflist = v->next;
1152 if (v->block_sizes)
1153 free(v->block_sizes);
1154 if (v->other_bvds)
1155 /*
1156 v->other_bvds[0] points to beginning of buffer,
1157 see alloc_other_bvds()
1158 */
1159 free(v->other_bvds[0]);
1160 free(v);
1161 }
1162 while (ddf->dlist) {
1163 struct dl *d = ddf->dlist;
1164 ddf->dlist = d->next;
1165 if (d->fd >= 0)
1166 close(d->fd);
1167 if (d->spare)
1168 free(d->spare);
1169 free(d);
1170 }
1171 while (ddf->add_list) {
1172 struct dl *d = ddf->add_list;
1173 ddf->add_list = d->next;
1174 if (d->fd >= 0)
1175 close(d->fd);
1176 if (d->spare)
1177 free(d->spare);
1178 free(d);
1179 }
1180 free(ddf);
1181 st->sb = NULL;
1182 }
1183
1184 static struct supertype *match_metadata_desc_ddf(char *arg)
1185 {
1186 /* 'ddf' only support containers */
1187 struct supertype *st;
1188 if (strcmp(arg, "ddf") != 0 &&
1189 strcmp(arg, "default") != 0
1190 )
1191 return NULL;
1192
1193 st = xcalloc(1, sizeof(*st));
1194 st->ss = &super_ddf;
1195 st->max_devs = 512;
1196 st->minor_version = 0;
1197 st->sb = NULL;
1198 return st;
1199 }
1200
1201 #ifndef MDASSEMBLE
1202
1203 static mapping_t ddf_state[] = {
1204 { "Optimal", 0},
1205 { "Degraded", 1},
1206 { "Deleted", 2},
1207 { "Missing", 3},
1208 { "Failed", 4},
1209 { "Partially Optimal", 5},
1210 { "-reserved-", 6},
1211 { "-reserved-", 7},
1212 { NULL, 0}
1213 };
1214
1215 static mapping_t ddf_init_state[] = {
1216 { "Not Initialised", 0},
1217 { "QuickInit in Progress", 1},
1218 { "Fully Initialised", 2},
1219 { "*UNKNOWN*", 3},
1220 { NULL, 0}
1221 };
1222 static mapping_t ddf_access[] = {
1223 { "Read/Write", 0},
1224 { "Reserved", 1},
1225 { "Read Only", 2},
1226 { "Blocked (no access)", 3},
1227 { NULL ,0}
1228 };
1229
1230 static mapping_t ddf_level[] = {
1231 { "RAID0", DDF_RAID0},
1232 { "RAID1", DDF_RAID1},
1233 { "RAID3", DDF_RAID3},
1234 { "RAID4", DDF_RAID4},
1235 { "RAID5", DDF_RAID5},
1236 { "RAID1E",DDF_RAID1E},
1237 { "JBOD", DDF_JBOD},
1238 { "CONCAT",DDF_CONCAT},
1239 { "RAID5E",DDF_RAID5E},
1240 { "RAID5EE",DDF_RAID5EE},
1241 { "RAID6", DDF_RAID6},
1242 { NULL, 0}
1243 };
1244 static mapping_t ddf_sec_level[] = {
1245 { "Striped", DDF_2STRIPED},
1246 { "Mirrored", DDF_2MIRRORED},
1247 { "Concat", DDF_2CONCAT},
1248 { "Spanned", DDF_2SPANNED},
1249 { NULL, 0}
1250 };
1251 #endif
1252
1253 static int all_ff(const char *guid)
1254 {
1255 int i;
1256 for (i = 0; i < DDF_GUID_LEN; i++)
1257 if (guid[i] != (char)0xff)
1258 return 0;
1259 return 1;
1260 }
1261
1262 #ifndef MDASSEMBLE
1263 static void print_guid(char *guid, int tstamp)
1264 {
1265 /* A GUIDs are part (or all) ASCII and part binary.
1266 * They tend to be space padded.
1267 * We print the GUID in HEX, then in parentheses add
1268 * any initial ASCII sequence, and a possible
1269 * time stamp from bytes 16-19
1270 */
1271 int l = DDF_GUID_LEN;
1272 int i;
1273
1274 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1275 if ((i&3)==0 && i != 0) printf(":");
1276 printf("%02X", guid[i]&255);
1277 }
1278
1279 printf("\n (");
1280 while (l && guid[l-1] == ' ')
1281 l--;
1282 for (i=0 ; i<l ; i++) {
1283 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1284 fputc(guid[i], stdout);
1285 else
1286 break;
1287 }
1288 if (tstamp) {
1289 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1290 char tbuf[100];
1291 struct tm *tm;
1292 tm = localtime(&then);
1293 strftime(tbuf, 100, " %D %T",tm);
1294 fputs(tbuf, stdout);
1295 }
1296 printf(")");
1297 }
1298
1299 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1300 {
1301 int crl = sb->conf_rec_len;
1302 struct vcl *vcl;
1303
1304 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1305 unsigned int i;
1306 struct vd_config *vc = &vcl->conf;
1307
1308 if (calc_crc(vc, crl*512) != vc->crc)
1309 continue;
1310 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1311 continue;
1312
1313 /* Ok, we know about this VD, let's give more details */
1314 printf(" Raid Devices[%d] : %d (", n,
1315 __be16_to_cpu(vc->prim_elmnt_count));
1316 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++) {
1317 int j;
1318 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1319 for (j=0; j<cnt; j++)
1320 if (vc->phys_refnum[i] == sb->phys->entries[j].refnum)
1321 break;
1322 if (i) printf(" ");
1323 if (j < cnt)
1324 printf("%d", j);
1325 else
1326 printf("--");
1327 }
1328 printf(")\n");
1329 if (vc->chunk_shift != 255)
1330 printf(" Chunk Size[%d] : %d sectors\n", n,
1331 1 << vc->chunk_shift);
1332 printf(" Raid Level[%d] : %s\n", n,
1333 map_num(ddf_level, vc->prl)?:"-unknown-");
1334 if (vc->sec_elmnt_count != 1) {
1335 printf(" Secondary Position[%d] : %d of %d\n", n,
1336 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1337 printf(" Secondary Level[%d] : %s\n", n,
1338 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1339 }
1340 printf(" Device Size[%d] : %llu\n", n,
1341 (unsigned long long)__be64_to_cpu(vc->blocks)/2);
1342 printf(" Array Size[%d] : %llu\n", n,
1343 (unsigned long long)__be64_to_cpu(vc->array_blocks)/2);
1344 }
1345 }
1346
1347 static void examine_vds(struct ddf_super *sb)
1348 {
1349 int cnt = __be16_to_cpu(sb->virt->populated_vdes);
1350 unsigned int i;
1351 printf(" Virtual Disks : %d\n", cnt);
1352
1353 for (i = 0; i < __be16_to_cpu(sb->virt->max_vdes); i++) {
1354 struct virtual_entry *ve = &sb->virt->entries[i];
1355 if (all_ff(ve->guid))
1356 continue;
1357 printf("\n");
1358 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1359 printf("\n");
1360 printf(" unit[%d] : %d\n", i, __be16_to_cpu(ve->unit));
1361 printf(" state[%d] : %s, %s%s\n", i,
1362 map_num(ddf_state, ve->state & 7),
1363 (ve->state & 8) ? "Morphing, ": "",
1364 (ve->state & 16)? "Not Consistent" : "Consistent");
1365 printf(" init state[%d] : %s\n", i,
1366 map_num(ddf_init_state, ve->init_state&3));
1367 printf(" access[%d] : %s\n", i,
1368 map_num(ddf_access, (ve->init_state>>6) & 3));
1369 printf(" Name[%d] : %.16s\n", i, ve->name);
1370 examine_vd(i, sb, ve->guid);
1371 }
1372 if (cnt) printf("\n");
1373 }
1374
1375 static void examine_pds(struct ddf_super *sb)
1376 {
1377 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1378 int i;
1379 struct dl *dl;
1380 printf(" Physical Disks : %d\n", cnt);
1381 printf(" Number RefNo Size Device Type/State\n");
1382
1383 for (i=0 ; i<cnt ; i++) {
1384 struct phys_disk_entry *pd = &sb->phys->entries[i];
1385 int type = __be16_to_cpu(pd->type);
1386 int state = __be16_to_cpu(pd->state);
1387
1388 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1389 //printf("\n");
1390 printf(" %3d %08x ", i,
1391 __be32_to_cpu(pd->refnum));
1392 printf("%8lluK ",
1393 (unsigned long long)__be64_to_cpu(pd->config_size)>>1);
1394 for (dl = sb->dlist; dl ; dl = dl->next) {
1395 if (dl->disk.refnum == pd->refnum) {
1396 char *dv = map_dev(dl->major, dl->minor, 0);
1397 if (dv) {
1398 printf("%-15s", dv);
1399 break;
1400 }
1401 }
1402 }
1403 if (!dl)
1404 printf("%15s","");
1405 printf(" %s%s%s%s%s",
1406 (type&2) ? "active":"",
1407 (type&4) ? "Global-Spare":"",
1408 (type&8) ? "spare" : "",
1409 (type&16)? ", foreign" : "",
1410 (type&32)? "pass-through" : "");
1411 if (state & DDF_Failed)
1412 /* This over-rides these three */
1413 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1414 printf("/%s%s%s%s%s%s%s",
1415 (state&1)? "Online": "Offline",
1416 (state&2)? ", Failed": "",
1417 (state&4)? ", Rebuilding": "",
1418 (state&8)? ", in-transition": "",
1419 (state&16)? ", SMART-errors": "",
1420 (state&32)? ", Unrecovered-Read-Errors": "",
1421 (state&64)? ", Missing" : "");
1422 printf("\n");
1423 }
1424 }
1425
1426 static void examine_super_ddf(struct supertype *st, char *homehost)
1427 {
1428 struct ddf_super *sb = st->sb;
1429
1430 printf(" Magic : %08x\n", __be32_to_cpu(sb->anchor.magic));
1431 printf(" Version : %.8s\n", sb->anchor.revision);
1432 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1433 printf("\n");
1434 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1435 printf("\n");
1436 printf(" Seq : %08x\n", __be32_to_cpu(sb->active->seq));
1437 printf(" Redundant hdr : %s\n", sb->secondary.magic == DDF_HEADER_MAGIC
1438 ?"yes" : "no");
1439 examine_vds(sb);
1440 examine_pds(sb);
1441 }
1442
1443 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1444
1445 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1446 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1447
1448 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1449 {
1450 /*
1451 * Figure out the VD number for this supertype.
1452 * Returns DDF_CONTAINER for the container itself,
1453 * and DDF_NOTFOUND on error.
1454 */
1455 struct ddf_super *ddf = st->sb;
1456 struct mdinfo *sra;
1457 char *sub, *end;
1458 unsigned int vcnum;
1459
1460 if (*st->container_devnm == '\0')
1461 return DDF_CONTAINER;
1462
1463 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1464 if (!sra || sra->array.major_version != -1 ||
1465 sra->array.minor_version != -2 ||
1466 !is_subarray(sra->text_version))
1467 return DDF_NOTFOUND;
1468
1469 sub = strchr(sra->text_version + 1, '/');
1470 if (sub != NULL)
1471 vcnum = strtoul(sub + 1, &end, 10);
1472 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1473 vcnum >= __be16_to_cpu(ddf->active->max_vd_entries))
1474 return DDF_NOTFOUND;
1475
1476 return vcnum;
1477 }
1478
1479 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1480 {
1481 /* We just write a generic DDF ARRAY entry
1482 */
1483 struct mdinfo info;
1484 char nbuf[64];
1485 getinfo_super_ddf(st, &info, NULL);
1486 fname_from_uuid(st, &info, nbuf, ':');
1487
1488 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1489 }
1490
1491 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1492 {
1493 /* We just write a generic DDF ARRAY entry
1494 */
1495 struct ddf_super *ddf = st->sb;
1496 struct mdinfo info;
1497 unsigned int i;
1498 char nbuf[64];
1499 getinfo_super_ddf(st, &info, NULL);
1500 fname_from_uuid(st, &info, nbuf, ':');
1501
1502 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
1503 struct virtual_entry *ve = &ddf->virt->entries[i];
1504 struct vcl vcl;
1505 char nbuf1[64];
1506 if (all_ff(ve->guid))
1507 continue;
1508 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1509 ddf->currentconf =&vcl;
1510 uuid_from_super_ddf(st, info.uuid);
1511 fname_from_uuid(st, &info, nbuf1, ':');
1512 printf("ARRAY container=%s member=%d UUID=%s\n",
1513 nbuf+5, i, nbuf1+5);
1514 }
1515 }
1516
1517 static void export_examine_super_ddf(struct supertype *st)
1518 {
1519 struct mdinfo info;
1520 char nbuf[64];
1521 getinfo_super_ddf(st, &info, NULL);
1522 fname_from_uuid(st, &info, nbuf, ':');
1523 printf("MD_METADATA=ddf\n");
1524 printf("MD_LEVEL=container\n");
1525 printf("MD_UUID=%s\n", nbuf+5);
1526 }
1527
1528 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1529 {
1530 void *buf;
1531 unsigned long long dsize, offset;
1532 int bytes;
1533 struct ddf_header *ddf;
1534 int written = 0;
1535
1536 /* The meta consists of an anchor, a primary, and a secondary.
1537 * This all lives at the end of the device.
1538 * So it is easiest to find the earliest of primary and
1539 * secondary, and copy everything from there.
1540 *
1541 * Anchor is 512 from end It contains primary_lba and secondary_lba
1542 * we choose one of those
1543 */
1544
1545 if (posix_memalign(&buf, 4096, 4096) != 0)
1546 return 1;
1547
1548 if (!get_dev_size(from, NULL, &dsize))
1549 goto err;
1550
1551 if (lseek64(from, dsize-512, 0) < 0)
1552 goto err;
1553 if (read(from, buf, 512) != 512)
1554 goto err;
1555 ddf = buf;
1556 if (ddf->magic != DDF_HEADER_MAGIC ||
1557 calc_crc(ddf, 512) != ddf->crc ||
1558 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1559 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1560 goto err;
1561
1562 offset = dsize - 512;
1563 if ((__be64_to_cpu(ddf->primary_lba) << 9) < offset)
1564 offset = __be64_to_cpu(ddf->primary_lba) << 9;
1565 if ((__be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1566 offset = __be64_to_cpu(ddf->secondary_lba) << 9;
1567
1568 bytes = dsize - offset;
1569
1570 if (lseek64(from, offset, 0) < 0 ||
1571 lseek64(to, offset, 0) < 0)
1572 goto err;
1573 while (written < bytes) {
1574 int n = bytes - written;
1575 if (n > 4096)
1576 n = 4096;
1577 if (read(from, buf, n) != n)
1578 goto err;
1579 if (write(to, buf, n) != n)
1580 goto err;
1581 written += n;
1582 }
1583 free(buf);
1584 return 0;
1585 err:
1586 free(buf);
1587 return 1;
1588 }
1589
1590 static void detail_super_ddf(struct supertype *st, char *homehost)
1591 {
1592 /* FIXME later
1593 * Could print DDF GUID
1594 * Need to find which array
1595 * If whole, briefly list all arrays
1596 * If one, give name
1597 */
1598 }
1599
1600 static void brief_detail_super_ddf(struct supertype *st)
1601 {
1602 struct mdinfo info;
1603 char nbuf[64];
1604 struct ddf_super *ddf = st->sb;
1605 unsigned int vcnum = get_vd_num_of_subarray(st);
1606 if (vcnum == DDF_CONTAINER)
1607 uuid_from_super_ddf(st, info.uuid);
1608 else if (vcnum == DDF_NOTFOUND)
1609 return;
1610 else
1611 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1612 fname_from_uuid(st, &info, nbuf,':');
1613 printf(" UUID=%s", nbuf + 5);
1614 }
1615 #endif
1616
1617 static int match_home_ddf(struct supertype *st, char *homehost)
1618 {
1619 /* It matches 'this' host if the controller is a
1620 * Linux-MD controller with vendor_data matching
1621 * the hostname
1622 */
1623 struct ddf_super *ddf = st->sb;
1624 unsigned int len;
1625
1626 if (!homehost)
1627 return 0;
1628 len = strlen(homehost);
1629
1630 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1631 len < sizeof(ddf->controller.vendor_data) &&
1632 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1633 ddf->controller.vendor_data[len] == 0);
1634 }
1635
1636 #ifndef MDASSEMBLE
1637 static int find_index_in_bvd(const struct ddf_super *ddf,
1638 const struct vd_config *conf, unsigned int n,
1639 unsigned int *n_bvd)
1640 {
1641 /*
1642 * Find the index of the n-th valid physical disk in this BVD
1643 */
1644 unsigned int i, j;
1645 for (i = 0, j = 0; i < ddf->mppe &&
1646 j < __be16_to_cpu(conf->prim_elmnt_count); i++) {
1647 if (conf->phys_refnum[i] != 0xffffffff) {
1648 if (n == j) {
1649 *n_bvd = i;
1650 return 1;
1651 }
1652 j++;
1653 }
1654 }
1655 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1656 __func__, n, __be16_to_cpu(conf->prim_elmnt_count));
1657 return 0;
1658 }
1659
1660 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1661 unsigned int n,
1662 unsigned int *n_bvd, struct vcl **vcl)
1663 {
1664 struct vcl *v;
1665
1666 for (v = ddf->conflist; v; v = v->next) {
1667 unsigned int nsec, ibvd;
1668 struct vd_config *conf;
1669 if (inst != v->vcnum)
1670 continue;
1671 conf = &v->conf;
1672 if (conf->sec_elmnt_count == 1) {
1673 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1674 *vcl = v;
1675 return conf;
1676 } else
1677 goto bad;
1678 }
1679 if (v->other_bvds == NULL) {
1680 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1681 __func__, conf->sec_elmnt_count);
1682 goto bad;
1683 }
1684 nsec = n / __be16_to_cpu(conf->prim_elmnt_count);
1685 if (conf->sec_elmnt_seq != nsec) {
1686 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1687 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1688 == nsec)
1689 break;
1690 }
1691 if (ibvd == conf->sec_elmnt_count)
1692 goto bad;
1693 conf = v->other_bvds[ibvd-1];
1694 }
1695 if (!find_index_in_bvd(ddf, conf,
1696 n - nsec*conf->sec_elmnt_count, n_bvd))
1697 goto bad;
1698 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1699 , __func__, n, *n_bvd, ibvd-1, inst);
1700 *vcl = v;
1701 return conf;
1702 }
1703 bad:
1704 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1705 return NULL;
1706 }
1707 #endif
1708
1709 static int find_phys(const struct ddf_super *ddf, __u32 phys_refnum)
1710 {
1711 /* Find the entry in phys_disk which has the given refnum
1712 * and return it's index
1713 */
1714 unsigned int i;
1715 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++)
1716 if (ddf->phys->entries[i].refnum == phys_refnum)
1717 return i;
1718 return -1;
1719 }
1720
1721 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1722 {
1723 char buf[20];
1724 struct sha1_ctx ctx;
1725 sha1_init_ctx(&ctx);
1726 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1727 sha1_finish_ctx(&ctx, buf);
1728 memcpy(uuid, buf, 4*4);
1729 }
1730
1731 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1732 {
1733 /* The uuid returned here is used for:
1734 * uuid to put into bitmap file (Create, Grow)
1735 * uuid for backup header when saving critical section (Grow)
1736 * comparing uuids when re-adding a device into an array
1737 * In these cases the uuid required is that of the data-array,
1738 * not the device-set.
1739 * uuid to recognise same set when adding a missing device back
1740 * to an array. This is a uuid for the device-set.
1741 *
1742 * For each of these we can make do with a truncated
1743 * or hashed uuid rather than the original, as long as
1744 * everyone agrees.
1745 * In the case of SVD we assume the BVD is of interest,
1746 * though that might be the case if a bitmap were made for
1747 * a mirrored SVD - worry about that later.
1748 * So we need to find the VD configuration record for the
1749 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1750 * The first 16 bytes of the sha1 of these is used.
1751 */
1752 struct ddf_super *ddf = st->sb;
1753 struct vcl *vcl = ddf->currentconf;
1754 char *guid;
1755
1756 if (vcl)
1757 guid = vcl->conf.guid;
1758 else
1759 guid = ddf->anchor.guid;
1760 uuid_from_ddf_guid(guid, uuid);
1761 }
1762
1763 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1764
1765 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1766 {
1767 struct ddf_super *ddf = st->sb;
1768 int map_disks = info->array.raid_disks;
1769 __u32 *cptr;
1770
1771 if (ddf->currentconf) {
1772 getinfo_super_ddf_bvd(st, info, map);
1773 return;
1774 }
1775 memset(info, 0, sizeof(*info));
1776
1777 info->array.raid_disks = __be16_to_cpu(ddf->phys->used_pdes);
1778 info->array.level = LEVEL_CONTAINER;
1779 info->array.layout = 0;
1780 info->array.md_minor = -1;
1781 cptr = (__u32 *)(ddf->anchor.guid + 16);
1782 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1783
1784 info->array.utime = 0;
1785 info->array.chunk_size = 0;
1786 info->container_enough = 1;
1787
1788 info->disk.major = 0;
1789 info->disk.minor = 0;
1790 if (ddf->dlist) {
1791 info->disk.number = __be32_to_cpu(ddf->dlist->disk.refnum);
1792 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1793
1794 info->data_offset = __be64_to_cpu(ddf->phys->
1795 entries[info->disk.raid_disk].
1796 config_size);
1797 info->component_size = ddf->dlist->size - info->data_offset;
1798 } else {
1799 info->disk.number = -1;
1800 info->disk.raid_disk = -1;
1801 // info->disk.raid_disk = find refnum in the table and use index;
1802 }
1803 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1804
1805 info->recovery_start = MaxSector;
1806 info->reshape_active = 0;
1807 info->recovery_blocked = 0;
1808 info->name[0] = 0;
1809
1810 info->array.major_version = -1;
1811 info->array.minor_version = -2;
1812 strcpy(info->text_version, "ddf");
1813 info->safe_mode_delay = 0;
1814
1815 uuid_from_super_ddf(st, info->uuid);
1816
1817 if (map) {
1818 int i;
1819 for (i = 0 ; i < map_disks; i++) {
1820 if (i < info->array.raid_disks &&
1821 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1822 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1823 map[i] = 1;
1824 else
1825 map[i] = 0;
1826 }
1827 }
1828 }
1829
1830 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1831 {
1832 struct ddf_super *ddf = st->sb;
1833 struct vcl *vc = ddf->currentconf;
1834 int cd = ddf->currentdev;
1835 int j;
1836 struct dl *dl;
1837 int map_disks = info->array.raid_disks;
1838 __u32 *cptr;
1839
1840 memset(info, 0, sizeof(*info));
1841 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1842 return;
1843 info->array.md_minor = -1;
1844 cptr = (__u32 *)(vc->conf.guid + 16);
1845 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1846 info->array.utime = DECADE + __be32_to_cpu(vc->conf.timestamp);
1847 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1848 info->custom_array_size = 0;
1849
1850 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1851 info->data_offset =
1852 __be64_to_cpu(LBA_OFFSET(ddf, &vc->conf)[cd]);
1853 if (vc->block_sizes)
1854 info->component_size = vc->block_sizes[cd];
1855 else
1856 info->component_size = __be64_to_cpu(vc->conf.blocks);
1857 }
1858
1859 for (dl = ddf->dlist; dl ; dl = dl->next)
1860 if (dl->raiddisk == ddf->currentdev)
1861 break;
1862
1863 info->disk.major = 0;
1864 info->disk.minor = 0;
1865 info->disk.state = 0;
1866 if (dl) {
1867 info->disk.major = dl->major;
1868 info->disk.minor = dl->minor;
1869 info->disk.raid_disk = dl->raiddisk;
1870 info->disk.number = dl->pdnum;
1871 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1872 }
1873
1874 info->container_member = ddf->currentconf->vcnum;
1875
1876 info->recovery_start = MaxSector;
1877 info->resync_start = 0;
1878 info->reshape_active = 0;
1879 info->recovery_blocked = 0;
1880 if (!(ddf->virt->entries[info->container_member].state
1881 & DDF_state_inconsistent) &&
1882 (ddf->virt->entries[info->container_member].init_state
1883 & DDF_initstate_mask)
1884 == DDF_init_full)
1885 info->resync_start = MaxSector;
1886
1887 uuid_from_super_ddf(st, info->uuid);
1888
1889 info->array.major_version = -1;
1890 info->array.minor_version = -2;
1891 sprintf(info->text_version, "/%s/%d",
1892 st->container_devnm,
1893 info->container_member);
1894 info->safe_mode_delay = 200;
1895
1896 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1897 info->name[16]=0;
1898 for(j=0; j<16; j++)
1899 if (info->name[j] == ' ')
1900 info->name[j] = 0;
1901
1902 if (map)
1903 for (j = 0; j < map_disks; j++) {
1904 map[j] = 0;
1905 if (j < info->array.raid_disks) {
1906 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1907 if (i >= 0 &&
1908 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1909 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1910 map[i] = 1;
1911 }
1912 }
1913 }
1914
1915 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
1916 char *update,
1917 char *devname, int verbose,
1918 int uuid_set, char *homehost)
1919 {
1920 /* For 'assemble' and 'force' we need to return non-zero if any
1921 * change was made. For others, the return value is ignored.
1922 * Update options are:
1923 * force-one : This device looks a bit old but needs to be included,
1924 * update age info appropriately.
1925 * assemble: clear any 'faulty' flag to allow this device to
1926 * be assembled.
1927 * force-array: Array is degraded but being forced, mark it clean
1928 * if that will be needed to assemble it.
1929 *
1930 * newdev: not used ????
1931 * grow: Array has gained a new device - this is currently for
1932 * linear only
1933 * resync: mark as dirty so a resync will happen.
1934 * uuid: Change the uuid of the array to match what is given
1935 * homehost: update the recorded homehost
1936 * name: update the name - preserving the homehost
1937 * _reshape_progress: record new reshape_progress position.
1938 *
1939 * Following are not relevant for this version:
1940 * sparc2.2 : update from old dodgey metadata
1941 * super-minor: change the preferred_minor number
1942 * summaries: update redundant counters.
1943 */
1944 int rv = 0;
1945 // struct ddf_super *ddf = st->sb;
1946 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
1947 // struct virtual_entry *ve = find_ve(ddf);
1948
1949 /* we don't need to handle "force-*" or "assemble" as
1950 * there is no need to 'trick' the kernel. We the metadata is
1951 * first updated to activate the array, all the implied modifications
1952 * will just happen.
1953 */
1954
1955 if (strcmp(update, "grow") == 0) {
1956 /* FIXME */
1957 } else if (strcmp(update, "resync") == 0) {
1958 // info->resync_checkpoint = 0;
1959 } else if (strcmp(update, "homehost") == 0) {
1960 /* homehost is stored in controller->vendor_data,
1961 * or it is when we are the vendor
1962 */
1963 // if (info->vendor_is_local)
1964 // strcpy(ddf->controller.vendor_data, homehost);
1965 rv = -1;
1966 } else if (strcmp(update, "name") == 0) {
1967 /* name is stored in virtual_entry->name */
1968 // memset(ve->name, ' ', 16);
1969 // strncpy(ve->name, info->name, 16);
1970 rv = -1;
1971 } else if (strcmp(update, "_reshape_progress") == 0) {
1972 /* We don't support reshape yet */
1973 } else if (strcmp(update, "assemble") == 0 ) {
1974 /* Do nothing, just succeed */
1975 rv = 0;
1976 } else
1977 rv = -1;
1978
1979 // update_all_csum(ddf);
1980
1981 return rv;
1982 }
1983
1984 static void make_header_guid(char *guid)
1985 {
1986 __u32 stamp;
1987 /* Create a DDF Header of Virtual Disk GUID */
1988
1989 /* 24 bytes of fiction required.
1990 * first 8 are a 'vendor-id' - "Linux-MD"
1991 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
1992 * Remaining 8 random number plus timestamp
1993 */
1994 memcpy(guid, T10, sizeof(T10));
1995 stamp = __cpu_to_be32(0xdeadbeef);
1996 memcpy(guid+8, &stamp, 4);
1997 stamp = __cpu_to_be32(0);
1998 memcpy(guid+12, &stamp, 4);
1999 stamp = __cpu_to_be32(time(0) - DECADE);
2000 memcpy(guid+16, &stamp, 4);
2001 stamp = random32();
2002 memcpy(guid+20, &stamp, 4);
2003 }
2004
2005 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2006 {
2007 unsigned int i;
2008 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
2009 if (all_ff(ddf->virt->entries[i].guid))
2010 return i;
2011 }
2012 return DDF_NOTFOUND;
2013 }
2014
2015 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2016 const char *name)
2017 {
2018 unsigned int i;
2019 if (name == NULL)
2020 return DDF_NOTFOUND;
2021 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
2022 if (all_ff(ddf->virt->entries[i].guid))
2023 continue;
2024 if (!strncmp(name, ddf->virt->entries[i].name,
2025 sizeof(ddf->virt->entries[i].name)))
2026 return i;
2027 }
2028 return DDF_NOTFOUND;
2029 }
2030
2031 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2032 const char *guid)
2033 {
2034 unsigned int i;
2035 if (guid == NULL || all_ff(guid))
2036 return DDF_NOTFOUND;
2037 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++)
2038 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2039 return i;
2040 return DDF_NOTFOUND;
2041 }
2042
2043 static int init_super_ddf_bvd(struct supertype *st,
2044 mdu_array_info_t *info,
2045 unsigned long long size,
2046 char *name, char *homehost,
2047 int *uuid, unsigned long long data_offset);
2048
2049 static int init_super_ddf(struct supertype *st,
2050 mdu_array_info_t *info,
2051 unsigned long long size, char *name, char *homehost,
2052 int *uuid, unsigned long long data_offset)
2053 {
2054 /* This is primarily called by Create when creating a new array.
2055 * We will then get add_to_super called for each component, and then
2056 * write_init_super called to write it out to each device.
2057 * For DDF, Create can create on fresh devices or on a pre-existing
2058 * array.
2059 * To create on a pre-existing array a different method will be called.
2060 * This one is just for fresh drives.
2061 *
2062 * We need to create the entire 'ddf' structure which includes:
2063 * DDF headers - these are easy.
2064 * Controller data - a Sector describing this controller .. not that
2065 * this is a controller exactly.
2066 * Physical Disk Record - one entry per device, so
2067 * leave plenty of space.
2068 * Virtual Disk Records - again, just leave plenty of space.
2069 * This just lists VDs, doesn't give details
2070 * Config records - describes the VDs that use this disk
2071 * DiskData - describes 'this' device.
2072 * BadBlockManagement - empty
2073 * Diag Space - empty
2074 * Vendor Logs - Could we put bitmaps here?
2075 *
2076 */
2077 struct ddf_super *ddf;
2078 char hostname[17];
2079 int hostlen;
2080 int max_phys_disks, max_virt_disks;
2081 unsigned long long sector;
2082 int clen;
2083 int i;
2084 int pdsize, vdsize;
2085 struct phys_disk *pd;
2086 struct virtual_disk *vd;
2087
2088 if (data_offset != INVALID_SECTORS) {
2089 pr_err("data-offset not supported by DDF\n");
2090 return 0;
2091 }
2092
2093 if (st->sb)
2094 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2095 data_offset);
2096
2097 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2098 pr_err("%s could not allocate superblock\n", __func__);
2099 return 0;
2100 }
2101 memset(ddf, 0, sizeof(*ddf));
2102 ddf->dlist = NULL; /* no physical disks yet */
2103 ddf->conflist = NULL; /* No virtual disks yet */
2104 st->sb = ddf;
2105
2106 if (info == NULL) {
2107 /* zeroing superblock */
2108 return 0;
2109 }
2110
2111 /* At least 32MB *must* be reserved for the ddf. So let's just
2112 * start 32MB from the end, and put the primary header there.
2113 * Don't do secondary for now.
2114 * We don't know exactly where that will be yet as it could be
2115 * different on each device. To just set up the lengths.
2116 *
2117 */
2118
2119 ddf->anchor.magic = DDF_HEADER_MAGIC;
2120 make_header_guid(ddf->anchor.guid);
2121
2122 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2123 ddf->anchor.seq = __cpu_to_be32(1);
2124 ddf->anchor.timestamp = __cpu_to_be32(time(0) - DECADE);
2125 ddf->anchor.openflag = 0xFF;
2126 ddf->anchor.foreignflag = 0;
2127 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2128 ddf->anchor.pad0 = 0xff;
2129 memset(ddf->anchor.pad1, 0xff, 12);
2130 memset(ddf->anchor.header_ext, 0xff, 32);
2131 ddf->anchor.primary_lba = ~(__u64)0;
2132 ddf->anchor.secondary_lba = ~(__u64)0;
2133 ddf->anchor.type = DDF_HEADER_ANCHOR;
2134 memset(ddf->anchor.pad2, 0xff, 3);
2135 ddf->anchor.workspace_len = __cpu_to_be32(32768); /* Must be reserved */
2136 ddf->anchor.workspace_lba = ~(__u64)0; /* Put this at bottom
2137 of 32M reserved.. */
2138 max_phys_disks = 1023; /* Should be enough */
2139 ddf->anchor.max_pd_entries = __cpu_to_be16(max_phys_disks);
2140 max_virt_disks = 255;
2141 ddf->anchor.max_vd_entries = __cpu_to_be16(max_virt_disks); /* ?? */
2142 ddf->anchor.max_partitions = __cpu_to_be16(64); /* ?? */
2143 ddf->max_part = 64;
2144 ddf->mppe = 256;
2145 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2146 ddf->anchor.config_record_len = __cpu_to_be16(ddf->conf_rec_len);
2147 ddf->anchor.max_primary_element_entries = __cpu_to_be16(ddf->mppe);
2148 memset(ddf->anchor.pad3, 0xff, 54);
2149 /* controller sections is one sector long immediately
2150 * after the ddf header */
2151 sector = 1;
2152 ddf->anchor.controller_section_offset = __cpu_to_be32(sector);
2153 ddf->anchor.controller_section_length = __cpu_to_be32(1);
2154 sector += 1;
2155
2156 /* phys is 8 sectors after that */
2157 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2158 sizeof(struct phys_disk_entry)*max_phys_disks,
2159 512);
2160 switch(pdsize/512) {
2161 case 2: case 8: case 32: case 128: case 512: break;
2162 default: abort();
2163 }
2164 ddf->anchor.phys_section_offset = __cpu_to_be32(sector);
2165 ddf->anchor.phys_section_length =
2166 __cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2167 sector += pdsize/512;
2168
2169 /* virt is another 32 sectors */
2170 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2171 sizeof(struct virtual_entry) * max_virt_disks,
2172 512);
2173 switch(vdsize/512) {
2174 case 2: case 8: case 32: case 128: case 512: break;
2175 default: abort();
2176 }
2177 ddf->anchor.virt_section_offset = __cpu_to_be32(sector);
2178 ddf->anchor.virt_section_length =
2179 __cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2180 sector += vdsize/512;
2181
2182 clen = ddf->conf_rec_len * (ddf->max_part+1);
2183 ddf->anchor.config_section_offset = __cpu_to_be32(sector);
2184 ddf->anchor.config_section_length = __cpu_to_be32(clen);
2185 sector += clen;
2186
2187 ddf->anchor.data_section_offset = __cpu_to_be32(sector);
2188 ddf->anchor.data_section_length = __cpu_to_be32(1);
2189 sector += 1;
2190
2191 ddf->anchor.bbm_section_length = __cpu_to_be32(0);
2192 ddf->anchor.bbm_section_offset = __cpu_to_be32(0xFFFFFFFF);
2193 ddf->anchor.diag_space_length = __cpu_to_be32(0);
2194 ddf->anchor.diag_space_offset = __cpu_to_be32(0xFFFFFFFF);
2195 ddf->anchor.vendor_length = __cpu_to_be32(0);
2196 ddf->anchor.vendor_offset = __cpu_to_be32(0xFFFFFFFF);
2197
2198 memset(ddf->anchor.pad4, 0xff, 256);
2199
2200 memcpy(&ddf->primary, &ddf->anchor, 512);
2201 memcpy(&ddf->secondary, &ddf->anchor, 512);
2202
2203 ddf->primary.openflag = 1; /* I guess.. */
2204 ddf->primary.type = DDF_HEADER_PRIMARY;
2205
2206 ddf->secondary.openflag = 1; /* I guess.. */
2207 ddf->secondary.type = DDF_HEADER_SECONDARY;
2208
2209 ddf->active = &ddf->primary;
2210
2211 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2212
2213 /* 24 more bytes of fiction required.
2214 * first 8 are a 'vendor-id' - "Linux-MD"
2215 * Remaining 16 are serial number.... maybe a hostname would do?
2216 */
2217 memcpy(ddf->controller.guid, T10, sizeof(T10));
2218 gethostname(hostname, sizeof(hostname));
2219 hostname[sizeof(hostname) - 1] = 0;
2220 hostlen = strlen(hostname);
2221 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2222 for (i = strlen(T10) ; i+hostlen < 24; i++)
2223 ddf->controller.guid[i] = ' ';
2224
2225 ddf->controller.type.vendor_id = __cpu_to_be16(0xDEAD);
2226 ddf->controller.type.device_id = __cpu_to_be16(0xBEEF);
2227 ddf->controller.type.sub_vendor_id = 0;
2228 ddf->controller.type.sub_device_id = 0;
2229 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2230 memset(ddf->controller.pad, 0xff, 8);
2231 memset(ddf->controller.vendor_data, 0xff, 448);
2232 if (homehost && strlen(homehost) < 440)
2233 strcpy((char*)ddf->controller.vendor_data, homehost);
2234
2235 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2236 pr_err("%s could not allocate pd\n", __func__);
2237 return 0;
2238 }
2239 ddf->phys = pd;
2240 ddf->pdsize = pdsize;
2241
2242 memset(pd, 0xff, pdsize);
2243 memset(pd, 0, sizeof(*pd));
2244 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2245 pd->used_pdes = __cpu_to_be16(0);
2246 pd->max_pdes = __cpu_to_be16(max_phys_disks);
2247 memset(pd->pad, 0xff, 52);
2248 for (i = 0; i < max_phys_disks; i++)
2249 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2250
2251 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2252 pr_err("%s could not allocate vd\n", __func__);
2253 return 0;
2254 }
2255 ddf->virt = vd;
2256 ddf->vdsize = vdsize;
2257 memset(vd, 0, vdsize);
2258 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2259 vd->populated_vdes = __cpu_to_be16(0);
2260 vd->max_vdes = __cpu_to_be16(max_virt_disks);
2261 memset(vd->pad, 0xff, 52);
2262
2263 for (i=0; i<max_virt_disks; i++)
2264 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2265
2266 st->sb = ddf;
2267 ddf_set_updates_pending(ddf);
2268 return 1;
2269 }
2270
2271 static int chunk_to_shift(int chunksize)
2272 {
2273 return ffs(chunksize/512)-1;
2274 }
2275
2276 #ifndef MDASSEMBLE
2277 struct extent {
2278 unsigned long long start, size;
2279 };
2280 static int cmp_extent(const void *av, const void *bv)
2281 {
2282 const struct extent *a = av;
2283 const struct extent *b = bv;
2284 if (a->start < b->start)
2285 return -1;
2286 if (a->start > b->start)
2287 return 1;
2288 return 0;
2289 }
2290
2291 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2292 {
2293 /* find a list of used extents on the give physical device
2294 * (dnum) of the given ddf.
2295 * Return a malloced array of 'struct extent'
2296
2297 * FIXME ignore DDF_Legacy devices?
2298
2299 */
2300 struct extent *rv;
2301 int n = 0;
2302 unsigned int i;
2303
2304 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2305
2306 for (i = 0; i < ddf->max_part; i++) {
2307 const struct vd_config *bvd;
2308 unsigned int ibvd;
2309 struct vcl *v = dl->vlist[i];
2310 if (v == NULL ||
2311 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2312 &bvd, &ibvd) == DDF_NOTFOUND)
2313 continue;
2314 rv[n].start = __be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2315 rv[n].size = __be64_to_cpu(bvd->blocks);
2316 n++;
2317 }
2318 qsort(rv, n, sizeof(*rv), cmp_extent);
2319
2320 rv[n].start = __be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2321 rv[n].size = 0;
2322 return rv;
2323 }
2324 #endif
2325
2326 static int init_super_ddf_bvd(struct supertype *st,
2327 mdu_array_info_t *info,
2328 unsigned long long size,
2329 char *name, char *homehost,
2330 int *uuid, unsigned long long data_offset)
2331 {
2332 /* We are creating a BVD inside a pre-existing container.
2333 * so st->sb is already set.
2334 * We need to create a new vd_config and a new virtual_entry
2335 */
2336 struct ddf_super *ddf = st->sb;
2337 unsigned int venum, i;
2338 struct virtual_entry *ve;
2339 struct vcl *vcl;
2340 struct vd_config *vc;
2341
2342 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2343 pr_err("This ddf already has an array called %s\n", name);
2344 return 0;
2345 }
2346 venum = find_unused_vde(ddf);
2347 if (venum == DDF_NOTFOUND) {
2348 pr_err("Cannot find spare slot for virtual disk\n");
2349 return 0;
2350 }
2351 ve = &ddf->virt->entries[venum];
2352
2353 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2354 * timestamp, random number
2355 */
2356 make_header_guid(ve->guid);
2357 ve->unit = __cpu_to_be16(info->md_minor);
2358 ve->pad0 = 0xFFFF;
2359 ve->guid_crc = crc32(0, (unsigned char*)ddf->anchor.guid, DDF_GUID_LEN);
2360 ve->type = 0;
2361 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2362 if (info->state & 1) /* clean */
2363 ve->init_state = DDF_init_full;
2364 else
2365 ve->init_state = DDF_init_not;
2366
2367 memset(ve->pad1, 0xff, 14);
2368 memset(ve->name, ' ', 16);
2369 if (name)
2370 strncpy(ve->name, name, 16);
2371 ddf->virt->populated_vdes =
2372 __cpu_to_be16(__be16_to_cpu(ddf->virt->populated_vdes)+1);
2373
2374 /* Now create a new vd_config */
2375 if (posix_memalign((void**)&vcl, 512,
2376 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2377 pr_err("%s could not allocate vd_config\n", __func__);
2378 return 0;
2379 }
2380 vcl->vcnum = venum;
2381 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2382 vc = &vcl->conf;
2383
2384 vc->magic = DDF_VD_CONF_MAGIC;
2385 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2386 vc->timestamp = __cpu_to_be32(time(0)-DECADE);
2387 vc->seqnum = __cpu_to_be32(1);
2388 memset(vc->pad0, 0xff, 24);
2389 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2390 if (layout_md2ddf(info, vc) == -1 ||
2391 __be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2392 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2393 __func__, info->level, info->layout, info->raid_disks);
2394 free(vcl);
2395 return 0;
2396 }
2397 vc->sec_elmnt_seq = 0;
2398 if (alloc_other_bvds(ddf, vcl) != 0) {
2399 pr_err("%s could not allocate other bvds\n",
2400 __func__);
2401 free(vcl);
2402 return 0;
2403 }
2404 vc->blocks = __cpu_to_be64(info->size * 2);
2405 vc->array_blocks = __cpu_to_be64(
2406 calc_array_size(info->level, info->raid_disks, info->layout,
2407 info->chunk_size, info->size*2));
2408 memset(vc->pad1, 0xff, 8);
2409 vc->spare_refs[0] = 0xffffffff;
2410 vc->spare_refs[1] = 0xffffffff;
2411 vc->spare_refs[2] = 0xffffffff;
2412 vc->spare_refs[3] = 0xffffffff;
2413 vc->spare_refs[4] = 0xffffffff;
2414 vc->spare_refs[5] = 0xffffffff;
2415 vc->spare_refs[6] = 0xffffffff;
2416 vc->spare_refs[7] = 0xffffffff;
2417 memset(vc->cache_pol, 0, 8);
2418 vc->bg_rate = 0x80;
2419 memset(vc->pad2, 0xff, 3);
2420 memset(vc->pad3, 0xff, 52);
2421 memset(vc->pad4, 0xff, 192);
2422 memset(vc->v0, 0xff, 32);
2423 memset(vc->v1, 0xff, 32);
2424 memset(vc->v2, 0xff, 16);
2425 memset(vc->v3, 0xff, 16);
2426 memset(vc->vendor, 0xff, 32);
2427
2428 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2429 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2430
2431 for (i = 1; i < vc->sec_elmnt_count; i++) {
2432 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2433 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2434 }
2435
2436 vcl->next = ddf->conflist;
2437 ddf->conflist = vcl;
2438 ddf->currentconf = vcl;
2439 ddf_set_updates_pending(ddf);
2440 return 1;
2441 }
2442
2443 #ifndef MDASSEMBLE
2444 static void add_to_super_ddf_bvd(struct supertype *st,
2445 mdu_disk_info_t *dk, int fd, char *devname)
2446 {
2447 /* fd and devname identify a device with-in the ddf container (st).
2448 * dk identifies a location in the new BVD.
2449 * We need to find suitable free space in that device and update
2450 * the phys_refnum and lba_offset for the newly created vd_config.
2451 * We might also want to update the type in the phys_disk
2452 * section.
2453 *
2454 * Alternately: fd == -1 and we have already chosen which device to
2455 * use and recorded in dlist->raid_disk;
2456 */
2457 struct dl *dl;
2458 struct ddf_super *ddf = st->sb;
2459 struct vd_config *vc;
2460 unsigned int working;
2461 unsigned int i;
2462 unsigned long long blocks, pos, esize;
2463 struct extent *ex;
2464 unsigned int raid_disk = dk->raid_disk;
2465
2466 if (fd == -1) {
2467 for (dl = ddf->dlist; dl ; dl = dl->next)
2468 if (dl->raiddisk == dk->raid_disk)
2469 break;
2470 } else {
2471 for (dl = ddf->dlist; dl ; dl = dl->next)
2472 if (dl->major == dk->major &&
2473 dl->minor == dk->minor)
2474 break;
2475 }
2476 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2477 return;
2478
2479 vc = &ddf->currentconf->conf;
2480 if (vc->sec_elmnt_count > 1) {
2481 unsigned int n = __be16_to_cpu(vc->prim_elmnt_count);
2482 if (raid_disk >= n)
2483 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2484 raid_disk %= n;
2485 }
2486
2487 ex = get_extents(ddf, dl);
2488 if (!ex)
2489 return;
2490
2491 i = 0; pos = 0;
2492 blocks = __be64_to_cpu(vc->blocks);
2493 if (ddf->currentconf->block_sizes)
2494 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2495
2496 do {
2497 esize = ex[i].start - pos;
2498 if (esize >= blocks)
2499 break;
2500 pos = ex[i].start + ex[i].size;
2501 i++;
2502 } while (ex[i-1].size);
2503
2504 free(ex);
2505 if (esize < blocks)
2506 return;
2507
2508 ddf->currentdev = dk->raid_disk;
2509 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2510 LBA_OFFSET(ddf, vc)[raid_disk] = __cpu_to_be64(pos);
2511
2512 for (i = 0; i < ddf->max_part ; i++)
2513 if (dl->vlist[i] == NULL)
2514 break;
2515 if (i == ddf->max_part)
2516 return;
2517 dl->vlist[i] = ddf->currentconf;
2518
2519 if (fd >= 0)
2520 dl->fd = fd;
2521 if (devname)
2522 dl->devname = devname;
2523
2524 /* Check how many working raid_disks, and if we can mark
2525 * array as optimal yet
2526 */
2527 working = 0;
2528
2529 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++)
2530 if (vc->phys_refnum[i] != 0xffffffff)
2531 working++;
2532
2533 /* Find which virtual_entry */
2534 i = ddf->currentconf->vcnum;
2535 if (working == __be16_to_cpu(vc->prim_elmnt_count))
2536 ddf->virt->entries[i].state =
2537 (ddf->virt->entries[i].state & ~DDF_state_mask)
2538 | DDF_state_optimal;
2539
2540 if (vc->prl == DDF_RAID6 &&
2541 working+1 == __be16_to_cpu(vc->prim_elmnt_count))
2542 ddf->virt->entries[i].state =
2543 (ddf->virt->entries[i].state & ~DDF_state_mask)
2544 | DDF_state_part_optimal;
2545
2546 ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
2547 ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
2548 ddf_set_updates_pending(ddf);
2549 }
2550
2551 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2552 {
2553 unsigned int i;
2554 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++) {
2555 if (all_ff(ddf->phys->entries[i].guid))
2556 return i;
2557 }
2558 return DDF_NOTFOUND;
2559 }
2560
2561 /* add a device to a container, either while creating it or while
2562 * expanding a pre-existing container
2563 */
2564 static int add_to_super_ddf(struct supertype *st,
2565 mdu_disk_info_t *dk, int fd, char *devname,
2566 unsigned long long data_offset)
2567 {
2568 struct ddf_super *ddf = st->sb;
2569 struct dl *dd;
2570 time_t now;
2571 struct tm *tm;
2572 unsigned long long size;
2573 struct phys_disk_entry *pde;
2574 unsigned int n, i;
2575 struct stat stb;
2576 __u32 *tptr;
2577
2578 if (ddf->currentconf) {
2579 add_to_super_ddf_bvd(st, dk, fd, devname);
2580 return 0;
2581 }
2582
2583 /* This is device numbered dk->number. We need to create
2584 * a phys_disk entry and a more detailed disk_data entry.
2585 */
2586 fstat(fd, &stb);
2587 n = find_unused_pde(ddf);
2588 if (n == DDF_NOTFOUND) {
2589 pr_err("%s: No free slot in array, cannot add disk\n",
2590 __func__);
2591 return 1;
2592 }
2593 pde = &ddf->phys->entries[n];
2594 get_dev_size(fd, NULL, &size);
2595 if (size <= 32*1024*1024) {
2596 pr_err("%s: device size must be at least 32MB\n",
2597 __func__);
2598 return 1;
2599 }
2600 size >>= 9;
2601
2602 if (posix_memalign((void**)&dd, 512,
2603 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2604 pr_err("%s could allocate buffer for new disk, aborting\n",
2605 __func__);
2606 return 1;
2607 }
2608 dd->major = major(stb.st_rdev);
2609 dd->minor = minor(stb.st_rdev);
2610 dd->devname = devname;
2611 dd->fd = fd;
2612 dd->spare = NULL;
2613
2614 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2615 now = time(0);
2616 tm = localtime(&now);
2617 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2618 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2619 tptr = (__u32 *)(dd->disk.guid + 16);
2620 *tptr++ = random32();
2621 *tptr = random32();
2622
2623 do {
2624 /* Cannot be bothered finding a CRC of some irrelevant details*/
2625 dd->disk.refnum = random32();
2626 for (i = __be16_to_cpu(ddf->active->max_pd_entries);
2627 i > 0; i--)
2628 if (ddf->phys->entries[i-1].refnum == dd->disk.refnum)
2629 break;
2630 } while (i > 0);
2631
2632 dd->disk.forced_ref = 1;
2633 dd->disk.forced_guid = 1;
2634 memset(dd->disk.vendor, ' ', 32);
2635 memcpy(dd->disk.vendor, "Linux", 5);
2636 memset(dd->disk.pad, 0xff, 442);
2637 for (i = 0; i < ddf->max_part ; i++)
2638 dd->vlist[i] = NULL;
2639
2640 dd->pdnum = n;
2641
2642 if (st->update_tail) {
2643 int len = (sizeof(struct phys_disk) +
2644 sizeof(struct phys_disk_entry));
2645 struct phys_disk *pd;
2646
2647 pd = xmalloc(len);
2648 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2649 pd->used_pdes = __cpu_to_be16(n);
2650 pde = &pd->entries[0];
2651 dd->mdupdate = pd;
2652 } else
2653 ddf->phys->used_pdes = __cpu_to_be16(
2654 1 + __be16_to_cpu(ddf->phys->used_pdes));
2655
2656 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2657 pde->refnum = dd->disk.refnum;
2658 pde->type = __cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2659 pde->state = __cpu_to_be16(DDF_Online);
2660 dd->size = size;
2661 /*
2662 * If there is already a device in dlist, try to reserve the same
2663 * amount of workspace. Otherwise, use 32MB.
2664 * We checked disk size above already.
2665 */
2666 #define __calc_lba(new, old, lba, mb) do { \
2667 unsigned long long dif; \
2668 if ((old) != NULL) \
2669 dif = (old)->size - __be64_to_cpu((old)->lba); \
2670 else \
2671 dif = (new)->size; \
2672 if ((new)->size > dif) \
2673 (new)->lba = __cpu_to_be64((new)->size - dif); \
2674 else \
2675 (new)->lba = __cpu_to_be64((new)->size - (mb*1024*2)); \
2676 } while (0)
2677 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2678 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2679 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2680 pde->config_size = dd->workspace_lba;
2681
2682 sprintf(pde->path, "%17.17s","Information: nil") ;
2683 memset(pde->pad, 0xff, 6);
2684
2685 if (st->update_tail) {
2686 dd->next = ddf->add_list;
2687 ddf->add_list = dd;
2688 } else {
2689 dd->next = ddf->dlist;
2690 ddf->dlist = dd;
2691 ddf_set_updates_pending(ddf);
2692 }
2693
2694 return 0;
2695 }
2696
2697 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2698 {
2699 struct ddf_super *ddf = st->sb;
2700 struct dl *dl;
2701
2702 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2703 * disappeared from the container.
2704 * We need to arrange that it disappears from the metadata and
2705 * internal data structures too.
2706 * Most of the work is done by ddf_process_update which edits
2707 * the metadata and closes the file handle and attaches the memory
2708 * where free_updates will free it.
2709 */
2710 for (dl = ddf->dlist; dl ; dl = dl->next)
2711 if (dl->major == dk->major &&
2712 dl->minor == dk->minor)
2713 break;
2714 if (!dl)
2715 return -1;
2716
2717 if (st->update_tail) {
2718 int len = (sizeof(struct phys_disk) +
2719 sizeof(struct phys_disk_entry));
2720 struct phys_disk *pd;
2721
2722 pd = xmalloc(len);
2723 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2724 pd->used_pdes = __cpu_to_be16(dl->pdnum);
2725 pd->entries[0].state = __cpu_to_be16(DDF_Missing);
2726 append_metadata_update(st, pd, len);
2727 }
2728 return 0;
2729 }
2730
2731 /*
2732 * This is the write_init_super method for a ddf container. It is
2733 * called when creating a container or adding another device to a
2734 * container.
2735 */
2736 #define NULL_CONF_SZ 4096
2737
2738 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
2739 char *null_aligned)
2740 {
2741 unsigned long long sector;
2742 struct ddf_header *header;
2743 int fd, i, n_config, conf_size;
2744 int ret = 0;
2745
2746 fd = d->fd;
2747
2748 switch (type) {
2749 case DDF_HEADER_PRIMARY:
2750 header = &ddf->primary;
2751 sector = __be64_to_cpu(header->primary_lba);
2752 break;
2753 case DDF_HEADER_SECONDARY:
2754 header = &ddf->secondary;
2755 sector = __be64_to_cpu(header->secondary_lba);
2756 break;
2757 default:
2758 return 0;
2759 }
2760
2761 header->type = type;
2762 header->openflag = 1;
2763 header->crc = calc_crc(header, 512);
2764
2765 lseek64(fd, sector<<9, 0);
2766 if (write(fd, header, 512) < 0)
2767 goto out;
2768
2769 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2770 if (write(fd, &ddf->controller, 512) < 0)
2771 goto out;
2772
2773 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2774 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2775 goto out;
2776 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2777 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2778 goto out;
2779
2780 /* Now write lots of config records. */
2781 n_config = ddf->max_part;
2782 conf_size = ddf->conf_rec_len * 512;
2783 for (i = 0 ; i <= n_config ; i++) {
2784 struct vcl *c;
2785 struct vd_config *vdc = NULL;
2786 if (i == n_config) {
2787 c = (struct vcl *)d->spare;
2788 if (c)
2789 vdc = &c->conf;
2790 } else {
2791 unsigned int dummy;
2792 c = d->vlist[i];
2793 if (c)
2794 get_pd_index_from_refnum(
2795 c, d->disk.refnum,
2796 ddf->mppe,
2797 (const struct vd_config **)&vdc,
2798 &dummy);
2799 }
2800 if (c) {
2801 vdc->seqnum = header->seq;
2802 vdc->crc = calc_crc(vdc, conf_size);
2803 if (write(fd, vdc, conf_size) < 0)
2804 break;
2805 } else {
2806 unsigned int togo = conf_size;
2807 while (togo > NULL_CONF_SZ) {
2808 if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
2809 break;
2810 togo -= NULL_CONF_SZ;
2811 }
2812 if (write(fd, null_aligned, togo) < 0)
2813 break;
2814 }
2815 }
2816 if (i <= n_config)
2817 goto out;
2818
2819 d->disk.crc = calc_crc(&d->disk, 512);
2820 if (write(fd, &d->disk, 512) < 0)
2821 goto out;
2822
2823 ret = 1;
2824 out:
2825 header->openflag = 0;
2826 header->crc = calc_crc(header, 512);
2827
2828 lseek64(fd, sector<<9, 0);
2829 if (write(fd, header, 512) < 0)
2830 ret = 0;
2831
2832 return ret;
2833 }
2834
2835 static int __write_init_super_ddf(struct supertype *st)
2836 {
2837 struct ddf_super *ddf = st->sb;
2838 struct dl *d;
2839 int attempts = 0;
2840 int successes = 0;
2841 unsigned long long size;
2842 char *null_aligned;
2843 __u32 seq;
2844
2845 pr_state(ddf, __func__);
2846 if (posix_memalign((void**)&null_aligned, 4096, NULL_CONF_SZ) != 0) {
2847 return -ENOMEM;
2848 }
2849 memset(null_aligned, 0xff, NULL_CONF_SZ);
2850
2851 seq = ddf->active->seq + 1;
2852
2853 /* try to write updated metadata,
2854 * if we catch a failure move on to the next disk
2855 */
2856 for (d = ddf->dlist; d; d=d->next) {
2857 int fd = d->fd;
2858
2859 if (fd < 0)
2860 continue;
2861
2862 attempts++;
2863 /* We need to fill in the primary, (secondary) and workspace
2864 * lba's in the headers, set their checksums,
2865 * Also checksum phys, virt....
2866 *
2867 * Then write everything out, finally the anchor is written.
2868 */
2869 get_dev_size(fd, NULL, &size);
2870 size /= 512;
2871 if (d->workspace_lba != 0)
2872 ddf->anchor.workspace_lba = d->workspace_lba;
2873 else
2874 ddf->anchor.workspace_lba =
2875 __cpu_to_be64(size - 32*1024*2);
2876 if (d->primary_lba != 0)
2877 ddf->anchor.primary_lba = d->primary_lba;
2878 else
2879 ddf->anchor.primary_lba =
2880 __cpu_to_be64(size - 16*1024*2);
2881 if (d->secondary_lba != 0)
2882 ddf->anchor.secondary_lba = d->secondary_lba;
2883 else
2884 ddf->anchor.secondary_lba =
2885 __cpu_to_be64(size - 32*1024*2);
2886 ddf->anchor.seq = seq;
2887 memcpy(&ddf->primary, &ddf->anchor, 512);
2888 memcpy(&ddf->secondary, &ddf->anchor, 512);
2889
2890 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2891 ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
2892 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2893
2894 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY,
2895 null_aligned))
2896 continue;
2897
2898 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY,
2899 null_aligned))
2900 continue;
2901
2902 lseek64(fd, (size-1)*512, SEEK_SET);
2903 if (write(fd, &ddf->anchor, 512) < 0)
2904 continue;
2905 successes++;
2906 }
2907 free(null_aligned);
2908
2909 return attempts != successes;
2910 }
2911
2912 static int write_init_super_ddf(struct supertype *st)
2913 {
2914 struct ddf_super *ddf = st->sb;
2915 struct vcl *currentconf = ddf->currentconf;
2916
2917 /* we are done with currentconf reset it to point st at the container */
2918 ddf->currentconf = NULL;
2919
2920 if (st->update_tail) {
2921 /* queue the virtual_disk and vd_config as metadata updates */
2922 struct virtual_disk *vd;
2923 struct vd_config *vc;
2924 int len;
2925
2926 if (!currentconf) {
2927 int len = (sizeof(struct phys_disk) +
2928 sizeof(struct phys_disk_entry));
2929
2930 /* adding a disk to the container. */
2931 if (!ddf->add_list)
2932 return 0;
2933
2934 append_metadata_update(st, ddf->add_list->mdupdate, len);
2935 ddf->add_list->mdupdate = NULL;
2936 return 0;
2937 }
2938
2939 /* Newly created VD */
2940
2941 /* First the virtual disk. We have a slightly fake header */
2942 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
2943 vd = xmalloc(len);
2944 *vd = *ddf->virt;
2945 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
2946 vd->populated_vdes = __cpu_to_be16(currentconf->vcnum);
2947 append_metadata_update(st, vd, len);
2948
2949 /* Then the vd_config */
2950 len = ddf->conf_rec_len * 512;
2951 vc = xmalloc(len);
2952 memcpy(vc, &currentconf->conf, len);
2953 append_metadata_update(st, vc, len);
2954
2955 /* FIXME I need to close the fds! */
2956 return 0;
2957 } else {
2958 struct dl *d;
2959 for (d = ddf->dlist; d; d=d->next)
2960 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
2961 return __write_init_super_ddf(st);
2962 }
2963 }
2964
2965 #endif
2966
2967 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
2968 unsigned long long data_offset)
2969 {
2970 /* We must reserve the last 32Meg */
2971 if (devsize <= 32*1024*2)
2972 return 0;
2973 return devsize - 32*1024*2;
2974 }
2975
2976 #ifndef MDASSEMBLE
2977
2978 static int reserve_space(struct supertype *st, int raiddisks,
2979 unsigned long long size, int chunk,
2980 unsigned long long *freesize)
2981 {
2982 /* Find 'raiddisks' spare extents at least 'size' big (but
2983 * only caring about multiples of 'chunk') and remember
2984 * them.
2985 * If the cannot be found, fail.
2986 */
2987 struct dl *dl;
2988 struct ddf_super *ddf = st->sb;
2989 int cnt = 0;
2990
2991 for (dl = ddf->dlist; dl ; dl=dl->next) {
2992 dl->raiddisk = -1;
2993 dl->esize = 0;
2994 }
2995 /* Now find largest extent on each device */
2996 for (dl = ddf->dlist ; dl ; dl=dl->next) {
2997 struct extent *e = get_extents(ddf, dl);
2998 unsigned long long pos = 0;
2999 int i = 0;
3000 int found = 0;
3001 unsigned long long minsize = size;
3002
3003 if (size == 0)
3004 minsize = chunk;
3005
3006 if (!e)
3007 continue;
3008 do {
3009 unsigned long long esize;
3010 esize = e[i].start - pos;
3011 if (esize >= minsize) {
3012 found = 1;
3013 minsize = esize;
3014 }
3015 pos = e[i].start + e[i].size;
3016 i++;
3017 } while (e[i-1].size);
3018 if (found) {
3019 cnt++;
3020 dl->esize = minsize;
3021 }
3022 free(e);
3023 }
3024 if (cnt < raiddisks) {
3025 pr_err("not enough devices with space to create array.\n");
3026 return 0; /* No enough free spaces large enough */
3027 }
3028 if (size == 0) {
3029 /* choose the largest size of which there are at least 'raiddisk' */
3030 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3031 struct dl *dl2;
3032 if (dl->esize <= size)
3033 continue;
3034 /* This is bigger than 'size', see if there are enough */
3035 cnt = 0;
3036 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3037 if (dl2->esize >= dl->esize)
3038 cnt++;
3039 if (cnt >= raiddisks)
3040 size = dl->esize;
3041 }
3042 if (chunk) {
3043 size = size / chunk;
3044 size *= chunk;
3045 }
3046 *freesize = size;
3047 if (size < 32) {
3048 pr_err("not enough spare devices to create array.\n");
3049 return 0;
3050 }
3051 }
3052 /* We have a 'size' of which there are enough spaces.
3053 * We simply do a first-fit */
3054 cnt = 0;
3055 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3056 if (dl->esize < size)
3057 continue;
3058
3059 dl->raiddisk = cnt;
3060 cnt++;
3061 }
3062 return 1;
3063 }
3064
3065 static int
3066 validate_geometry_ddf_container(struct supertype *st,
3067 int level, int layout, int raiddisks,
3068 int chunk, unsigned long long size,
3069 unsigned long long data_offset,
3070 char *dev, unsigned long long *freesize,
3071 int verbose);
3072
3073 static int validate_geometry_ddf_bvd(struct supertype *st,
3074 int level, int layout, int raiddisks,
3075 int *chunk, unsigned long long size,
3076 unsigned long long data_offset,
3077 char *dev, unsigned long long *freesize,
3078 int verbose);
3079
3080 static int validate_geometry_ddf(struct supertype *st,
3081 int level, int layout, int raiddisks,
3082 int *chunk, unsigned long long size,
3083 unsigned long long data_offset,
3084 char *dev, unsigned long long *freesize,
3085 int verbose)
3086 {
3087 int fd;
3088 struct mdinfo *sra;
3089 int cfd;
3090
3091 /* ddf potentially supports lots of things, but it depends on
3092 * what devices are offered (and maybe kernel version?)
3093 * If given unused devices, we will make a container.
3094 * If given devices in a container, we will make a BVD.
3095 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3096 */
3097
3098 if (chunk && *chunk == UnSet)
3099 *chunk = DEFAULT_CHUNK;
3100
3101 if (level == -1000000) level = LEVEL_CONTAINER;
3102 if (level == LEVEL_CONTAINER) {
3103 /* Must be a fresh device to add to a container */
3104 return validate_geometry_ddf_container(st, level, layout,
3105 raiddisks, chunk?*chunk:0,
3106 size, data_offset, dev,
3107 freesize,
3108 verbose);
3109 }
3110
3111 if (!dev) {
3112 mdu_array_info_t array = {
3113 .level = level, .layout = layout,
3114 .raid_disks = raiddisks
3115 };
3116 struct vd_config conf;
3117 if (layout_md2ddf(&array, &conf) == -1) {
3118 if (verbose)
3119 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3120 level, layout, raiddisks);
3121 return 0;
3122 }
3123 /* Should check layout? etc */
3124
3125 if (st->sb && freesize) {
3126 /* --create was given a container to create in.
3127 * So we need to check that there are enough
3128 * free spaces and return the amount of space.
3129 * We may as well remember which drives were
3130 * chosen so that add_to_super/getinfo_super
3131 * can return them.
3132 */
3133 return reserve_space(st, raiddisks, size, chunk?*chunk:0, freesize);
3134 }
3135 return 1;
3136 }
3137
3138 if (st->sb) {
3139 /* A container has already been opened, so we are
3140 * creating in there. Maybe a BVD, maybe an SVD.
3141 * Should make a distinction one day.
3142 */
3143 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3144 chunk, size, data_offset, dev,
3145 freesize,
3146 verbose);
3147 }
3148 /* This is the first device for the array.
3149 * If it is a container, we read it in and do automagic allocations,
3150 * no other devices should be given.
3151 * Otherwise it must be a member device of a container, and we
3152 * do manual allocation.
3153 * Later we should check for a BVD and make an SVD.
3154 */
3155 fd = open(dev, O_RDONLY|O_EXCL, 0);
3156 if (fd >= 0) {
3157 sra = sysfs_read(fd, NULL, GET_VERSION);
3158 close(fd);
3159 if (sra && sra->array.major_version == -1 &&
3160 strcmp(sra->text_version, "ddf") == 0) {
3161
3162 /* load super */
3163 /* find space for 'n' devices. */
3164 /* remember the devices */
3165 /* Somehow return the fact that we have enough */
3166 }
3167
3168 if (verbose)
3169 pr_err("ddf: Cannot create this array "
3170 "on device %s - a container is required.\n",
3171 dev);
3172 return 0;
3173 }
3174 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3175 if (verbose)
3176 pr_err("ddf: Cannot open %s: %s\n",
3177 dev, strerror(errno));
3178 return 0;
3179 }
3180 /* Well, it is in use by someone, maybe a 'ddf' container. */
3181 cfd = open_container(fd);
3182 if (cfd < 0) {
3183 close(fd);
3184 if (verbose)
3185 pr_err("ddf: Cannot use %s: %s\n",
3186 dev, strerror(EBUSY));
3187 return 0;
3188 }
3189 sra = sysfs_read(cfd, NULL, GET_VERSION);
3190 close(fd);
3191 if (sra && sra->array.major_version == -1 &&
3192 strcmp(sra->text_version, "ddf") == 0) {
3193 /* This is a member of a ddf container. Load the container
3194 * and try to create a bvd
3195 */
3196 struct ddf_super *ddf;
3197 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3198 st->sb = ddf;
3199 strcpy(st->container_devnm, fd2devnm(cfd));
3200 close(cfd);
3201 return validate_geometry_ddf_bvd(st, level, layout,
3202 raiddisks, chunk, size,
3203 data_offset,
3204 dev, freesize,
3205 verbose);
3206 }
3207 close(cfd);
3208 } else /* device may belong to a different container */
3209 return 0;
3210
3211 return 1;
3212 }
3213
3214 static int
3215 validate_geometry_ddf_container(struct supertype *st,
3216 int level, int layout, int raiddisks,
3217 int chunk, unsigned long long size,
3218 unsigned long long data_offset,
3219 char *dev, unsigned long long *freesize,
3220 int verbose)
3221 {
3222 int fd;
3223 unsigned long long ldsize;
3224
3225 if (level != LEVEL_CONTAINER)
3226 return 0;
3227 if (!dev)
3228 return 1;
3229
3230 fd = open(dev, O_RDONLY|O_EXCL, 0);
3231 if (fd < 0) {
3232 if (verbose)
3233 pr_err("ddf: Cannot open %s: %s\n",
3234 dev, strerror(errno));
3235 return 0;
3236 }
3237 if (!get_dev_size(fd, dev, &ldsize)) {
3238 close(fd);
3239 return 0;
3240 }
3241 close(fd);
3242
3243 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3244 if (*freesize == 0)
3245 return 0;
3246
3247 return 1;
3248 }
3249
3250 static int validate_geometry_ddf_bvd(struct supertype *st,
3251 int level, int layout, int raiddisks,
3252 int *chunk, unsigned long long size,
3253 unsigned long long data_offset,
3254 char *dev, unsigned long long *freesize,
3255 int verbose)
3256 {
3257 struct stat stb;
3258 struct ddf_super *ddf = st->sb;
3259 struct dl *dl;
3260 unsigned long long pos = 0;
3261 unsigned long long maxsize;
3262 struct extent *e;
3263 int i;
3264 /* ddf/bvd supports lots of things, but not containers */
3265 if (level == LEVEL_CONTAINER) {
3266 if (verbose)
3267 pr_err("DDF cannot create a container within an container\n");
3268 return 0;
3269 }
3270 /* We must have the container info already read in. */
3271 if (!ddf)
3272 return 0;
3273
3274 if (!dev) {
3275 /* General test: make sure there is space for
3276 * 'raiddisks' device extents of size 'size'.
3277 */
3278 unsigned long long minsize = size;
3279 int dcnt = 0;
3280 if (minsize == 0)
3281 minsize = 8;
3282 for (dl = ddf->dlist; dl ; dl = dl->next)
3283 {
3284 int found = 0;
3285 pos = 0;
3286
3287 i = 0;
3288 e = get_extents(ddf, dl);
3289 if (!e) continue;
3290 do {
3291 unsigned long long esize;
3292 esize = e[i].start - pos;
3293 if (esize >= minsize)
3294 found = 1;
3295 pos = e[i].start + e[i].size;
3296 i++;
3297 } while (e[i-1].size);
3298 if (found)
3299 dcnt++;
3300 free(e);
3301 }
3302 if (dcnt < raiddisks) {
3303 if (verbose)
3304 pr_err("ddf: Not enough devices with "
3305 "space for this array (%d < %d)\n",
3306 dcnt, raiddisks);
3307 return 0;
3308 }
3309 return 1;
3310 }
3311 /* This device must be a member of the set */
3312 if (stat(dev, &stb) < 0)
3313 return 0;
3314 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3315 return 0;
3316 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3317 if (dl->major == (int)major(stb.st_rdev) &&
3318 dl->minor == (int)minor(stb.st_rdev))
3319 break;
3320 }
3321 if (!dl) {
3322 if (verbose)
3323 pr_err("ddf: %s is not in the "
3324 "same DDF set\n",
3325 dev);
3326 return 0;
3327 }
3328 e = get_extents(ddf, dl);
3329 maxsize = 0;
3330 i = 0;
3331 if (e) do {
3332 unsigned long long esize;
3333 esize = e[i].start - pos;
3334 if (esize >= maxsize)
3335 maxsize = esize;
3336 pos = e[i].start + e[i].size;
3337 i++;
3338 } while (e[i-1].size);
3339 *freesize = maxsize;
3340 // FIXME here I am
3341
3342 return 1;
3343 }
3344
3345 static int load_super_ddf_all(struct supertype *st, int fd,
3346 void **sbp, char *devname)
3347 {
3348 struct mdinfo *sra;
3349 struct ddf_super *super;
3350 struct mdinfo *sd, *best = NULL;
3351 int bestseq = 0;
3352 int seq;
3353 char nm[20];
3354 int dfd;
3355
3356 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3357 if (!sra)
3358 return 1;
3359 if (sra->array.major_version != -1 ||
3360 sra->array.minor_version != -2 ||
3361 strcmp(sra->text_version, "ddf") != 0)
3362 return 1;
3363
3364 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3365 return 1;
3366 memset(super, 0, sizeof(*super));
3367
3368 /* first, try each device, and choose the best ddf */
3369 for (sd = sra->devs ; sd ; sd = sd->next) {
3370 int rv;
3371 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3372 dfd = dev_open(nm, O_RDONLY);
3373 if (dfd < 0)
3374 return 2;
3375 rv = load_ddf_headers(dfd, super, NULL);
3376 close(dfd);
3377 if (rv == 0) {
3378 seq = __be32_to_cpu(super->active->seq);
3379 if (super->active->openflag)
3380 seq--;
3381 if (!best || seq > bestseq) {
3382 bestseq = seq;
3383 best = sd;
3384 }
3385 }
3386 }
3387 if (!best)
3388 return 1;
3389 /* OK, load this ddf */
3390 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3391 dfd = dev_open(nm, O_RDONLY);
3392 if (dfd < 0)
3393 return 1;
3394 load_ddf_headers(dfd, super, NULL);
3395 load_ddf_global(dfd, super, NULL);
3396 close(dfd);
3397 /* Now we need the device-local bits */
3398 for (sd = sra->devs ; sd ; sd = sd->next) {
3399 int rv;
3400
3401 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3402 dfd = dev_open(nm, O_RDWR);
3403 if (dfd < 0)
3404 return 2;
3405 rv = load_ddf_headers(dfd, super, NULL);
3406 if (rv == 0)
3407 rv = load_ddf_local(dfd, super, NULL, 1);
3408 if (rv)
3409 return 1;
3410 }
3411
3412 *sbp = super;
3413 if (st->ss == NULL) {
3414 st->ss = &super_ddf;
3415 st->minor_version = 0;
3416 st->max_devs = 512;
3417 }
3418 strcpy(st->container_devnm, fd2devnm(fd));
3419 return 0;
3420 }
3421
3422 static int load_container_ddf(struct supertype *st, int fd,
3423 char *devname)
3424 {
3425 return load_super_ddf_all(st, fd, &st->sb, devname);
3426 }
3427
3428 #endif /* MDASSEMBLE */
3429
3430 static int check_secondary(const struct vcl *vc)
3431 {
3432 const struct vd_config *conf = &vc->conf;
3433 int i;
3434
3435 /* The only DDF secondary RAID level md can support is
3436 * RAID 10, if the stripe sizes and Basic volume sizes
3437 * are all equal.
3438 * Other configurations could in theory be supported by exposing
3439 * the BVDs to user space and using device mapper for the secondary
3440 * mapping. So far we don't support that.
3441 */
3442
3443 __u64 sec_elements[4] = {0, 0, 0, 0};
3444 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3445 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3446
3447 if (vc->other_bvds == NULL) {
3448 pr_err("No BVDs for secondary RAID found\n");
3449 return -1;
3450 }
3451 if (conf->prl != DDF_RAID1) {
3452 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3453 return -1;
3454 }
3455 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3456 pr_err("Secondary RAID level %d is unsupported\n",
3457 conf->srl);
3458 return -1;
3459 }
3460 __set_sec_seen(conf->sec_elmnt_seq);
3461 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3462 const struct vd_config *bvd = vc->other_bvds[i];
3463 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3464 continue;
3465 if (bvd->srl != conf->srl) {
3466 pr_err("Inconsistent secondary RAID level across BVDs\n");
3467 return -1;
3468 }
3469 if (bvd->prl != conf->prl) {
3470 pr_err("Different RAID levels for BVDs are unsupported\n");
3471 return -1;
3472 }
3473 if (bvd->prim_elmnt_count != conf->prim_elmnt_count) {
3474 pr_err("All BVDs must have the same number of primary elements\n");
3475 return -1;
3476 }
3477 if (bvd->chunk_shift != conf->chunk_shift) {
3478 pr_err("Different strip sizes for BVDs are unsupported\n");
3479 return -1;
3480 }
3481 if (bvd->array_blocks != conf->array_blocks) {
3482 pr_err("Different BVD sizes are unsupported\n");
3483 return -1;
3484 }
3485 __set_sec_seen(bvd->sec_elmnt_seq);
3486 }
3487 for (i = 0; i < conf->sec_elmnt_count; i++) {
3488 if (!__was_sec_seen(i)) {
3489 pr_err("BVD %d is missing\n", i);
3490 return -1;
3491 }
3492 }
3493 return 0;
3494 }
3495
3496 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3497 __u32 refnum, unsigned int nmax,
3498 const struct vd_config **bvd,
3499 unsigned int *idx)
3500 {
3501 unsigned int i, j, n, sec, cnt;
3502
3503 cnt = __be16_to_cpu(vc->conf.prim_elmnt_count);
3504 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3505
3506 for (i = 0, j = 0 ; i < nmax ; i++) {
3507 /* j counts valid entries for this BVD */
3508 if (vc->conf.phys_refnum[i] != 0xffffffff)
3509 j++;
3510 if (vc->conf.phys_refnum[i] == refnum) {
3511 *bvd = &vc->conf;
3512 *idx = i;
3513 return sec * cnt + j - 1;
3514 }
3515 }
3516 if (vc->other_bvds == NULL)
3517 goto bad;
3518
3519 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3520 struct vd_config *vd = vc->other_bvds[n-1];
3521 sec = vd->sec_elmnt_seq;
3522 if (sec == DDF_UNUSED_BVD)
3523 continue;
3524 for (i = 0, j = 0 ; i < nmax ; i++) {
3525 if (vd->phys_refnum[i] != 0xffffffff)
3526 j++;
3527 if (vd->phys_refnum[i] == refnum) {
3528 *bvd = vd;
3529 *idx = i;
3530 return sec * cnt + j - 1;
3531 }
3532 }
3533 }
3534 bad:
3535 *bvd = NULL;
3536 return DDF_NOTFOUND;
3537 }
3538
3539 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3540 {
3541 /* Given a container loaded by load_super_ddf_all,
3542 * extract information about all the arrays into
3543 * an mdinfo tree.
3544 *
3545 * For each vcl in conflist: create an mdinfo, fill it in,
3546 * then look for matching devices (phys_refnum) in dlist
3547 * and create appropriate device mdinfo.
3548 */
3549 struct ddf_super *ddf = st->sb;
3550 struct mdinfo *rest = NULL;
3551 struct vcl *vc;
3552
3553 for (vc = ddf->conflist ; vc ; vc=vc->next)
3554 {
3555 unsigned int i;
3556 unsigned int j;
3557 struct mdinfo *this;
3558 char *ep;
3559 __u32 *cptr;
3560 unsigned int pd;
3561
3562 if (subarray &&
3563 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3564 *ep != '\0'))
3565 continue;
3566
3567 if (vc->conf.sec_elmnt_count > 1) {
3568 if (check_secondary(vc) != 0)
3569 continue;
3570 }
3571
3572 this = xcalloc(1, sizeof(*this));
3573 this->next = rest;
3574 rest = this;
3575
3576 if (layout_ddf2md(&vc->conf, &this->array))
3577 continue;
3578 this->array.md_minor = -1;
3579 this->array.major_version = -1;
3580 this->array.minor_version = -2;
3581 cptr = (__u32 *)(vc->conf.guid + 16);
3582 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3583 this->array.utime = DECADE +
3584 __be32_to_cpu(vc->conf.timestamp);
3585 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3586
3587 i = vc->vcnum;
3588 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3589 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3590 DDF_init_full) {
3591 this->array.state = 0;
3592 this->resync_start = 0;
3593 } else {
3594 this->array.state = 1;
3595 this->resync_start = MaxSector;
3596 }
3597 memcpy(this->name, ddf->virt->entries[i].name, 16);
3598 this->name[16]=0;
3599 for(j=0; j<16; j++)
3600 if (this->name[j] == ' ')
3601 this->name[j] = 0;
3602
3603 memset(this->uuid, 0, sizeof(this->uuid));
3604 this->component_size = __be64_to_cpu(vc->conf.blocks);
3605 this->array.size = this->component_size / 2;
3606 this->container_member = i;
3607
3608 ddf->currentconf = vc;
3609 uuid_from_super_ddf(st, this->uuid);
3610 ddf->currentconf = NULL;
3611
3612 sprintf(this->text_version, "/%s/%d",
3613 st->container_devnm, this->container_member);
3614
3615 for (pd = 0; pd < __be16_to_cpu(ddf->phys->used_pdes); pd++) {
3616 struct mdinfo *dev;
3617 struct dl *d;
3618 const struct vd_config *bvd;
3619 unsigned int iphys;
3620 int stt;
3621
3622 if (ddf->phys->entries[pd].refnum == 0xFFFFFFFF)
3623 continue;
3624
3625 stt = __be16_to_cpu(ddf->phys->entries[pd].state);
3626 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3627 != DDF_Online)
3628 continue;
3629
3630 i = get_pd_index_from_refnum(
3631 vc, ddf->phys->entries[pd].refnum,
3632 ddf->mppe, &bvd, &iphys);
3633 if (i == DDF_NOTFOUND)
3634 continue;
3635
3636 this->array.working_disks++;
3637
3638 for (d = ddf->dlist; d ; d=d->next)
3639 if (d->disk.refnum ==
3640 ddf->phys->entries[pd].refnum)
3641 break;
3642 if (d == NULL)
3643 /* Haven't found that one yet, maybe there are others */
3644 continue;
3645
3646 dev = xcalloc(1, sizeof(*dev));
3647 dev->next = this->devs;
3648 this->devs = dev;
3649
3650 dev->disk.number = __be32_to_cpu(d->disk.refnum);
3651 dev->disk.major = d->major;
3652 dev->disk.minor = d->minor;
3653 dev->disk.raid_disk = i;
3654 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3655 dev->recovery_start = MaxSector;
3656
3657 dev->events = __be32_to_cpu(ddf->primary.seq);
3658 dev->data_offset =
3659 __be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3660 dev->component_size = __be64_to_cpu(bvd->blocks);
3661 if (d->devname)
3662 strcpy(dev->name, d->devname);
3663 }
3664 }
3665 return rest;
3666 }
3667
3668 static int store_super_ddf(struct supertype *st, int fd)
3669 {
3670 struct ddf_super *ddf = st->sb;
3671 unsigned long long dsize;
3672 void *buf;
3673 int rc;
3674
3675 if (!ddf)
3676 return 1;
3677
3678 if (!get_dev_size(fd, NULL, &dsize))
3679 return 1;
3680
3681 if (ddf->dlist || ddf->conflist) {
3682 struct stat sta;
3683 struct dl *dl;
3684 int ofd, ret;
3685
3686 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3687 pr_err("%s: file descriptor for invalid device\n",
3688 __func__);
3689 return 1;
3690 }
3691 for (dl = ddf->dlist; dl; dl = dl->next)
3692 if (dl->major == (int)major(sta.st_rdev) &&
3693 dl->minor == (int)minor(sta.st_rdev))
3694 break;
3695 if (!dl) {
3696 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3697 (int)major(sta.st_rdev),
3698 (int)minor(sta.st_rdev));
3699 return 1;
3700 }
3701 /*
3702 For DDF, writing to just one disk makes no sense.
3703 We would run the risk of writing inconsistent meta data
3704 to the devices. So just call __write_init_super_ddf and
3705 write to all devices, including this one.
3706 Use the fd passed to this function, just in case dl->fd
3707 is invalid.
3708 */
3709 ofd = dl->fd;
3710 dl->fd = fd;
3711 ret = __write_init_super_ddf(st);
3712 dl->fd = ofd;
3713 return ret;
3714 }
3715
3716 if (posix_memalign(&buf, 512, 512) != 0)
3717 return 1;
3718 memset(buf, 0, 512);
3719
3720 lseek64(fd, dsize-512, 0);
3721 rc = write(fd, buf, 512);
3722 free(buf);
3723 if (rc < 0)
3724 return 1;
3725 return 0;
3726 }
3727
3728 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3729 {
3730 /*
3731 * return:
3732 * 0 same, or first was empty, and second was copied
3733 * 1 second had wrong number
3734 * 2 wrong uuid
3735 * 3 wrong other info
3736 */
3737 struct ddf_super *first = st->sb;
3738 struct ddf_super *second = tst->sb;
3739 struct dl *dl1, *dl2;
3740 struct vcl *vl1, *vl2;
3741 unsigned int max_vds, max_pds, pd, vd;
3742
3743 if (!first) {
3744 st->sb = tst->sb;
3745 tst->sb = NULL;
3746 return 0;
3747 }
3748
3749 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3750 return 2;
3751
3752 if (first->anchor.seq != second->anchor.seq) {
3753 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3754 __be32_to_cpu(first->anchor.seq),
3755 __be32_to_cpu(second->anchor.seq));
3756 return 3;
3757 }
3758 if (first->max_part != second->max_part ||
3759 first->phys->used_pdes != second->phys->used_pdes ||
3760 first->virt->populated_vdes != second->virt->populated_vdes) {
3761 dprintf("%s: PD/VD number mismatch\n", __func__);
3762 return 3;
3763 }
3764
3765 max_pds = __be16_to_cpu(first->phys->used_pdes);
3766 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3767 for (pd = 0; pd < max_pds; pd++)
3768 if (first->phys->entries[pd].refnum == dl2->disk.refnum)
3769 break;
3770 if (pd == max_pds) {
3771 dprintf("%s: no match for disk %08x\n", __func__,
3772 __be32_to_cpu(dl2->disk.refnum));
3773 return 3;
3774 }
3775 }
3776
3777 max_vds = __be16_to_cpu(first->active->max_vd_entries);
3778 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3779 if (vl2->conf.magic != DDF_VD_CONF_MAGIC)
3780 continue;
3781 for (vd = 0; vd < max_vds; vd++)
3782 if (!memcmp(first->virt->entries[vd].guid,
3783 vl2->conf.guid, DDF_GUID_LEN))
3784 break;
3785 if (vd == max_vds) {
3786 dprintf("%s: no match for VD config\n", __func__);
3787 return 3;
3788 }
3789 }
3790 /* FIXME should I look at anything else? */
3791
3792 /*
3793 At this point we are fairly sure that the meta data matches.
3794 But the new disk may contain additional local data.
3795 Add it to the super block.
3796 */
3797 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3798 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3799 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3800 DDF_GUID_LEN))
3801 break;
3802 if (vl1) {
3803 if (vl1->other_bvds != NULL &&
3804 vl1->conf.sec_elmnt_seq !=
3805 vl2->conf.sec_elmnt_seq) {
3806 dprintf("%s: adding BVD %u\n", __func__,
3807 vl2->conf.sec_elmnt_seq);
3808 add_other_bvd(vl1, &vl2->conf,
3809 first->conf_rec_len*512);
3810 }
3811 continue;
3812 }
3813
3814 if (posix_memalign((void **)&vl1, 512,
3815 (first->conf_rec_len*512 +
3816 offsetof(struct vcl, conf))) != 0) {
3817 pr_err("%s could not allocate vcl buf\n",
3818 __func__);
3819 return 3;
3820 }
3821
3822 vl1->next = first->conflist;
3823 vl1->block_sizes = NULL;
3824 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3825 if (alloc_other_bvds(first, vl1) != 0) {
3826 pr_err("%s could not allocate other bvds\n",
3827 __func__);
3828 free(vl1);
3829 return 3;
3830 }
3831 for (vd = 0; vd < max_vds; vd++)
3832 if (!memcmp(first->virt->entries[vd].guid,
3833 vl1->conf.guid, DDF_GUID_LEN))
3834 break;
3835 vl1->vcnum = vd;
3836 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3837 first->conflist = vl1;
3838 }
3839
3840 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3841 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3842 if (dl1->disk.refnum == dl2->disk.refnum)
3843 break;
3844 if (dl1)
3845 continue;
3846
3847 if (posix_memalign((void **)&dl1, 512,
3848 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3849 != 0) {
3850 pr_err("%s could not allocate disk info buffer\n",
3851 __func__);
3852 return 3;
3853 }
3854 memcpy(dl1, dl2, sizeof(*dl1));
3855 dl1->mdupdate = NULL;
3856 dl1->next = first->dlist;
3857 dl1->fd = -1;
3858 for (pd = 0; pd < max_pds; pd++)
3859 if (first->phys->entries[pd].refnum == dl1->disk.refnum)
3860 break;
3861 dl1->pdnum = pd;
3862 if (dl2->spare) {
3863 if (posix_memalign((void **)&dl1->spare, 512,
3864 first->conf_rec_len*512) != 0) {
3865 pr_err("%s could not allocate spare info buf\n",
3866 __func__);
3867 return 3;
3868 }
3869 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3870 }
3871 for (vd = 0 ; vd < first->max_part ; vd++) {
3872 if (!dl2->vlist[vd]) {
3873 dl1->vlist[vd] = NULL;
3874 continue;
3875 }
3876 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3877 if (!memcmp(vl1->conf.guid,
3878 dl2->vlist[vd]->conf.guid,
3879 DDF_GUID_LEN))
3880 break;
3881 dl1->vlist[vd] = vl1;
3882 }
3883 }
3884 first->dlist = dl1;
3885 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3886 dl1->disk.refnum);
3887 }
3888
3889 return 0;
3890 }
3891
3892 #ifndef MDASSEMBLE
3893 /*
3894 * A new array 'a' has been started which claims to be instance 'inst'
3895 * within container 'c'.
3896 * We need to confirm that the array matches the metadata in 'c' so
3897 * that we don't corrupt any metadata.
3898 */
3899 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3900 {
3901 struct ddf_super *ddf = c->sb;
3902 int n = atoi(inst);
3903 if (all_ff(ddf->virt->entries[n].guid)) {
3904 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
3905 return -ENODEV;
3906 }
3907 dprintf("ddf: open_new %d\n", n);
3908 a->info.container_member = n;
3909 return 0;
3910 }
3911
3912 /*
3913 * The array 'a' is to be marked clean in the metadata.
3914 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
3915 * clean up to the point (in sectors). If that cannot be recorded in the
3916 * metadata, then leave it as dirty.
3917 *
3918 * For DDF, we need to clear the DDF_state_inconsistent bit in the
3919 * !global! virtual_disk.virtual_entry structure.
3920 */
3921 static int ddf_set_array_state(struct active_array *a, int consistent)
3922 {
3923 struct ddf_super *ddf = a->container->sb;
3924 int inst = a->info.container_member;
3925 int old = ddf->virt->entries[inst].state;
3926 if (consistent == 2) {
3927 /* Should check if a recovery should be started FIXME */
3928 consistent = 1;
3929 if (!is_resync_complete(&a->info))
3930 consistent = 0;
3931 }
3932 if (consistent)
3933 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
3934 else
3935 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
3936 if (old != ddf->virt->entries[inst].state)
3937 ddf_set_updates_pending(ddf);
3938
3939 old = ddf->virt->entries[inst].init_state;
3940 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
3941 if (is_resync_complete(&a->info))
3942 ddf->virt->entries[inst].init_state |= DDF_init_full;
3943 else if (a->info.resync_start == 0)
3944 ddf->virt->entries[inst].init_state |= DDF_init_not;
3945 else
3946 ddf->virt->entries[inst].init_state |= DDF_init_quick;
3947 if (old != ddf->virt->entries[inst].init_state)
3948 ddf_set_updates_pending(ddf);
3949
3950 dprintf("ddf mark %d %s %llu\n", inst, consistent?"clean":"dirty",
3951 a->info.resync_start);
3952 return consistent;
3953 }
3954
3955 static int get_bvd_state(const struct ddf_super *ddf,
3956 const struct vd_config *vc)
3957 {
3958 unsigned int i, n_bvd, working = 0;
3959 unsigned int n_prim = __be16_to_cpu(vc->prim_elmnt_count);
3960 int pd, st, state;
3961 for (i = 0; i < n_prim; i++) {
3962 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
3963 continue;
3964 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
3965 if (pd < 0)
3966 continue;
3967 st = __be16_to_cpu(ddf->phys->entries[pd].state);
3968 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3969 == DDF_Online)
3970 working++;
3971 }
3972
3973 state = DDF_state_degraded;
3974 if (working == n_prim)
3975 state = DDF_state_optimal;
3976 else
3977 switch (vc->prl) {
3978 case DDF_RAID0:
3979 case DDF_CONCAT:
3980 case DDF_JBOD:
3981 state = DDF_state_failed;
3982 break;
3983 case DDF_RAID1:
3984 if (working == 0)
3985 state = DDF_state_failed;
3986 else if (working >= 2)
3987 state = DDF_state_part_optimal;
3988 break;
3989 case DDF_RAID4:
3990 case DDF_RAID5:
3991 if (working < n_prim - 1)
3992 state = DDF_state_failed;
3993 break;
3994 case DDF_RAID6:
3995 if (working < n_prim - 2)
3996 state = DDF_state_failed;
3997 else if (working == n_prim - 1)
3998 state = DDF_state_part_optimal;
3999 break;
4000 }
4001 return state;
4002 }
4003
4004 static int secondary_state(int state, int other, int seclevel)
4005 {
4006 if (state == DDF_state_optimal && other == DDF_state_optimal)
4007 return DDF_state_optimal;
4008 if (seclevel == DDF_2MIRRORED) {
4009 if (state == DDF_state_optimal || other == DDF_state_optimal)
4010 return DDF_state_part_optimal;
4011 if (state == DDF_state_failed && other == DDF_state_failed)
4012 return DDF_state_failed;
4013 return DDF_state_degraded;
4014 } else {
4015 if (state == DDF_state_failed || other == DDF_state_failed)
4016 return DDF_state_failed;
4017 if (state == DDF_state_degraded || other == DDF_state_degraded)
4018 return DDF_state_degraded;
4019 return DDF_state_part_optimal;
4020 }
4021 }
4022
4023 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4024 {
4025 int state = get_bvd_state(ddf, &vcl->conf);
4026 unsigned int i;
4027 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4028 state = secondary_state(
4029 state,
4030 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4031 vcl->conf.srl);
4032 }
4033 return state;
4034 }
4035
4036 /*
4037 * The state of each disk is stored in the global phys_disk structure
4038 * in phys_disk.entries[n].state.
4039 * This makes various combinations awkward.
4040 * - When a device fails in any array, it must be failed in all arrays
4041 * that include a part of this device.
4042 * - When a component is rebuilding, we cannot include it officially in the
4043 * array unless this is the only array that uses the device.
4044 *
4045 * So: when transitioning:
4046 * Online -> failed, just set failed flag. monitor will propagate
4047 * spare -> online, the device might need to be added to the array.
4048 * spare -> failed, just set failed. Don't worry if in array or not.
4049 */
4050 static void ddf_set_disk(struct active_array *a, int n, int state)
4051 {
4052 struct ddf_super *ddf = a->container->sb;
4053 unsigned int inst = a->info.container_member, n_bvd;
4054 struct vcl *vcl;
4055 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4056 &n_bvd, &vcl);
4057 int pd;
4058 struct mdinfo *mdi;
4059 struct dl *dl;
4060
4061 if (vc == NULL) {
4062 dprintf("ddf: cannot find instance %d!!\n", inst);
4063 return;
4064 }
4065 /* Find the matching slot in 'info'. */
4066 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4067 if (mdi->disk.raid_disk == n)
4068 break;
4069 if (!mdi)
4070 return;
4071
4072 /* and find the 'dl' entry corresponding to that. */
4073 for (dl = ddf->dlist; dl; dl = dl->next)
4074 if (mdi->state_fd >= 0 &&
4075 mdi->disk.major == dl->major &&
4076 mdi->disk.minor == dl->minor)
4077 break;
4078 if (!dl)
4079 return;
4080
4081 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4082 if (pd < 0 || pd != dl->pdnum) {
4083 /* disk doesn't currently exist or has changed.
4084 * If it is now in_sync, insert it. */
4085 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4086 __func__, dl->pdnum, dl->major, dl->minor,
4087 dl->disk.refnum);
4088 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4089 __func__, inst, n_bvd, vc->phys_refnum[n_bvd], pd);
4090 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4091 pd = dl->pdnum; /* FIXME: is this really correct ? */
4092 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4093 LBA_OFFSET(ddf, vc)[n_bvd] =
4094 __cpu_to_be64(mdi->data_offset);
4095 ddf->phys->entries[pd].type &=
4096 ~__cpu_to_be16(DDF_Global_Spare);
4097 ddf->phys->entries[pd].type |=
4098 __cpu_to_be16(DDF_Active_in_VD);
4099 ddf_set_updates_pending(ddf);
4100 }
4101 } else {
4102 int old = ddf->phys->entries[pd].state;
4103 if (state & DS_FAULTY)
4104 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Failed);
4105 if (state & DS_INSYNC) {
4106 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Online);
4107 ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
4108 }
4109 if (old != ddf->phys->entries[pd].state)
4110 ddf_set_updates_pending(ddf);
4111 }
4112
4113 dprintf("ddf: set_disk %d to %x\n", n, state);
4114
4115 /* Now we need to check the state of the array and update
4116 * virtual_disk.entries[n].state.
4117 * It needs to be one of "optimal", "degraded", "failed".
4118 * I don't understand 'deleted' or 'missing'.
4119 */
4120 state = get_svd_state(ddf, vcl);
4121
4122 if (ddf->virt->entries[inst].state !=
4123 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4124 | state)) {
4125
4126 ddf->virt->entries[inst].state =
4127 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4128 | state;
4129 ddf_set_updates_pending(ddf);
4130 }
4131
4132 }
4133
4134 static void ddf_sync_metadata(struct supertype *st)
4135 {
4136
4137 /*
4138 * Write all data to all devices.
4139 * Later, we might be able to track whether only local changes
4140 * have been made, or whether any global data has been changed,
4141 * but ddf is sufficiently weird that it probably always
4142 * changes global data ....
4143 */
4144 struct ddf_super *ddf = st->sb;
4145 if (!ddf->updates_pending)
4146 return;
4147 ddf->updates_pending = 0;
4148 __write_init_super_ddf(st);
4149 dprintf("ddf: sync_metadata\n");
4150 }
4151
4152 static void ddf_process_update(struct supertype *st,
4153 struct metadata_update *update)
4154 {
4155 /* Apply this update to the metadata.
4156 * The first 4 bytes are a DDF_*_MAGIC which guides
4157 * our actions.
4158 * Possible update are:
4159 * DDF_PHYS_RECORDS_MAGIC
4160 * Add a new physical device or remove an old one.
4161 * Changes to this record only happen implicitly.
4162 * used_pdes is the device number.
4163 * DDF_VIRT_RECORDS_MAGIC
4164 * Add a new VD. Possibly also change the 'access' bits.
4165 * populated_vdes is the entry number.
4166 * DDF_VD_CONF_MAGIC
4167 * New or updated VD. the VIRT_RECORD must already
4168 * exist. For an update, phys_refnum and lba_offset
4169 * (at least) are updated, and the VD_CONF must
4170 * be written to precisely those devices listed with
4171 * a phys_refnum.
4172 * DDF_SPARE_ASSIGN_MAGIC
4173 * replacement Spare Assignment Record... but for which device?
4174 *
4175 * So, e.g.:
4176 * - to create a new array, we send a VIRT_RECORD and
4177 * a VD_CONF. Then assemble and start the array.
4178 * - to activate a spare we send a VD_CONF to add the phys_refnum
4179 * and offset. This will also mark the spare as active with
4180 * a spare-assignment record.
4181 */
4182 struct ddf_super *ddf = st->sb;
4183 __u32 *magic = (__u32*)update->buf;
4184 struct phys_disk *pd;
4185 struct virtual_disk *vd;
4186 struct vd_config *vc;
4187 struct vcl *vcl;
4188 struct dl *dl;
4189 unsigned int mppe;
4190 unsigned int ent;
4191 unsigned int pdnum, pd2;
4192
4193 dprintf("Process update %x\n", *magic);
4194
4195 switch (*magic) {
4196 case DDF_PHYS_RECORDS_MAGIC:
4197
4198 if (update->len != (sizeof(struct phys_disk) +
4199 sizeof(struct phys_disk_entry)))
4200 return;
4201 pd = (struct phys_disk*)update->buf;
4202
4203 ent = __be16_to_cpu(pd->used_pdes);
4204 if (ent >= __be16_to_cpu(ddf->phys->max_pdes))
4205 return;
4206 if (pd->entries[0].state & __cpu_to_be16(DDF_Missing)) {
4207 struct dl **dlp;
4208 /* removing this disk. */
4209 ddf->phys->entries[ent].state |= __cpu_to_be16(DDF_Missing);
4210 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4211 struct dl *dl = *dlp;
4212 if (dl->pdnum == (signed)ent) {
4213 close(dl->fd);
4214 dl->fd = -1;
4215 /* FIXME this doesn't free
4216 * dl->devname */
4217 update->space = dl;
4218 *dlp = dl->next;
4219 break;
4220 }
4221 }
4222 ddf_set_updates_pending(ddf);
4223 return;
4224 }
4225 if (!all_ff(ddf->phys->entries[ent].guid))
4226 return;
4227 ddf->phys->entries[ent] = pd->entries[0];
4228 ddf->phys->used_pdes = __cpu_to_be16(1 +
4229 __be16_to_cpu(ddf->phys->used_pdes));
4230 ddf_set_updates_pending(ddf);
4231 if (ddf->add_list) {
4232 struct active_array *a;
4233 struct dl *al = ddf->add_list;
4234 ddf->add_list = al->next;
4235
4236 al->next = ddf->dlist;
4237 ddf->dlist = al;
4238
4239 /* As a device has been added, we should check
4240 * for any degraded devices that might make
4241 * use of this spare */
4242 for (a = st->arrays ; a; a=a->next)
4243 a->check_degraded = 1;
4244 }
4245 break;
4246
4247 case DDF_VIRT_RECORDS_MAGIC:
4248
4249 if (update->len != (sizeof(struct virtual_disk) +
4250 sizeof(struct virtual_entry)))
4251 return;
4252 vd = (struct virtual_disk*)update->buf;
4253
4254 ent = find_unused_vde(ddf);
4255 if (ent == DDF_NOTFOUND)
4256 return;
4257 ddf->virt->entries[ent] = vd->entries[0];
4258 ddf->virt->populated_vdes = __cpu_to_be16(1 +
4259 __be16_to_cpu(ddf->virt->populated_vdes));
4260 ddf_set_updates_pending(ddf);
4261 break;
4262
4263 case DDF_VD_CONF_MAGIC:
4264 dprintf("len %d %d\n", update->len, ddf->conf_rec_len);
4265
4266 mppe = __be16_to_cpu(ddf->anchor.max_primary_element_entries);
4267 if ((unsigned)update->len != ddf->conf_rec_len * 512)
4268 return;
4269 vc = (struct vd_config*)update->buf;
4270 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4271 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4272 break;
4273 dprintf("vcl = %p\n", vcl);
4274 if (vcl) {
4275 /* An update, just copy the phys_refnum and lba_offset
4276 * fields
4277 */
4278 memcpy(vcl->conf.phys_refnum, vc->phys_refnum,
4279 mppe * (sizeof(__u32) + sizeof(__u64)));
4280 } else {
4281 /* A new VD_CONF */
4282 if (!update->space)
4283 return;
4284 vcl = update->space;
4285 update->space = NULL;
4286 vcl->next = ddf->conflist;
4287 memcpy(&vcl->conf, vc, update->len);
4288 ent = find_vde_by_guid(ddf, vc->guid);
4289 if (ent == DDF_NOTFOUND)
4290 return;
4291 vcl->vcnum = ent;
4292 ddf->conflist = vcl;
4293 }
4294 /* Set DDF_Transition on all Failed devices - to help
4295 * us detect those that are no longer in use
4296 */
4297 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4298 if (ddf->phys->entries[pdnum].state
4299 & __be16_to_cpu(DDF_Failed))
4300 ddf->phys->entries[pdnum].state
4301 |= __be16_to_cpu(DDF_Transition);
4302 /* Now make sure vlist is correct for each dl. */
4303 for (dl = ddf->dlist; dl; dl = dl->next) {
4304 unsigned int dn;
4305 unsigned int vn = 0;
4306 int in_degraded = 0;
4307 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4308 for (dn=0; dn < ddf->mppe ; dn++)
4309 if (vcl->conf.phys_refnum[dn] ==
4310 dl->disk.refnum) {
4311 int vstate;
4312 dprintf("dev %d has %p at %d\n",
4313 dl->pdnum, vcl, vn);
4314 /* Clear the Transition flag */
4315 if (ddf->phys->entries[dl->pdnum].state
4316 & __be16_to_cpu(DDF_Failed))
4317 ddf->phys->entries[dl->pdnum].state &=
4318 ~__be16_to_cpu(DDF_Transition);
4319
4320 dl->vlist[vn++] = vcl;
4321 vstate = ddf->virt->entries[vcl->vcnum].state
4322 & DDF_state_mask;
4323 if (vstate == DDF_state_degraded ||
4324 vstate == DDF_state_part_optimal)
4325 in_degraded = 1;
4326 break;
4327 }
4328 while (vn < ddf->max_part)
4329 dl->vlist[vn++] = NULL;
4330 if (dl->vlist[0]) {
4331 ddf->phys->entries[dl->pdnum].type &=
4332 ~__cpu_to_be16(DDF_Global_Spare);
4333 if (!(ddf->phys->entries[dl->pdnum].type &
4334 __cpu_to_be16(DDF_Active_in_VD))) {
4335 ddf->phys->entries[dl->pdnum].type |=
4336 __cpu_to_be16(DDF_Active_in_VD);
4337 if (in_degraded)
4338 ddf->phys->entries[dl->pdnum].state |=
4339 __cpu_to_be16(DDF_Rebuilding);
4340 }
4341 }
4342 if (dl->spare) {
4343 ddf->phys->entries[dl->pdnum].type &=
4344 ~__cpu_to_be16(DDF_Global_Spare);
4345 ddf->phys->entries[dl->pdnum].type |=
4346 __cpu_to_be16(DDF_Spare);
4347 }
4348 if (!dl->vlist[0] && !dl->spare) {
4349 ddf->phys->entries[dl->pdnum].type |=
4350 __cpu_to_be16(DDF_Global_Spare);
4351 ddf->phys->entries[dl->pdnum].type &=
4352 ~__cpu_to_be16(DDF_Spare |
4353 DDF_Active_in_VD);
4354 }
4355 }
4356
4357 /* Now remove any 'Failed' devices that are not part
4358 * of any VD. They will have the Transition flag set.
4359 * Once done, we need to update all dl->pdnum numbers.
4360 */
4361 pd2 = 0;
4362 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4363 if ((ddf->phys->entries[pdnum].state
4364 & __be16_to_cpu(DDF_Failed))
4365 && (ddf->phys->entries[pdnum].state
4366 & __be16_to_cpu(DDF_Transition)))
4367 /* skip this one */;
4368 else if (pdnum == pd2)
4369 pd2++;
4370 else {
4371 ddf->phys->entries[pd2] = ddf->phys->entries[pdnum];
4372 for (dl = ddf->dlist; dl; dl = dl->next)
4373 if (dl->pdnum == (int)pdnum)
4374 dl->pdnum = pd2;
4375 pd2++;
4376 }
4377 ddf->phys->used_pdes = __cpu_to_be16(pd2);
4378 while (pd2 < pdnum) {
4379 memset(ddf->phys->entries[pd2].guid, 0xff, DDF_GUID_LEN);
4380 pd2++;
4381 }
4382
4383 ddf_set_updates_pending(ddf);
4384 break;
4385 case DDF_SPARE_ASSIGN_MAGIC:
4386 default: break;
4387 }
4388 }
4389
4390 static void ddf_prepare_update(struct supertype *st,
4391 struct metadata_update *update)
4392 {
4393 /* This update arrived at managemon.
4394 * We are about to pass it to monitor.
4395 * If a malloc is needed, do it here.
4396 */
4397 struct ddf_super *ddf = st->sb;
4398 __u32 *magic = (__u32*)update->buf;
4399 if (*magic == DDF_VD_CONF_MAGIC)
4400 if (posix_memalign(&update->space, 512,
4401 offsetof(struct vcl, conf)
4402 + ddf->conf_rec_len * 512) != 0)
4403 update->space = NULL;
4404 }
4405
4406 /*
4407 * Check if the array 'a' is degraded but not failed.
4408 * If it is, find as many spares as are available and needed and
4409 * arrange for their inclusion.
4410 * We only choose devices which are not already in the array,
4411 * and prefer those with a spare-assignment to this array.
4412 * otherwise we choose global spares - assuming always that
4413 * there is enough room.
4414 * For each spare that we assign, we return an 'mdinfo' which
4415 * describes the position for the device in the array.
4416 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4417 * the new phys_refnum and lba_offset values.
4418 *
4419 * Only worry about BVDs at the moment.
4420 */
4421 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4422 struct metadata_update **updates)
4423 {
4424 int working = 0;
4425 struct mdinfo *d;
4426 struct ddf_super *ddf = a->container->sb;
4427 int global_ok = 0;
4428 struct mdinfo *rv = NULL;
4429 struct mdinfo *di;
4430 struct metadata_update *mu;
4431 struct dl *dl;
4432 int i;
4433 struct vcl *vcl;
4434 struct vd_config *vc;
4435 unsigned int n_bvd;
4436
4437 for (d = a->info.devs ; d ; d = d->next) {
4438 if ((d->curr_state & DS_FAULTY) &&
4439 d->state_fd >= 0)
4440 /* wait for Removal to happen */
4441 return NULL;
4442 if (d->state_fd >= 0)
4443 working ++;
4444 }
4445
4446 dprintf("ddf_activate: working=%d (%d) level=%d\n", working, a->info.array.raid_disks,
4447 a->info.array.level);
4448 if (working == a->info.array.raid_disks)
4449 return NULL; /* array not degraded */
4450 switch (a->info.array.level) {
4451 case 1:
4452 if (working == 0)
4453 return NULL; /* failed */
4454 break;
4455 case 4:
4456 case 5:
4457 if (working < a->info.array.raid_disks - 1)
4458 return NULL; /* failed */
4459 break;
4460 case 6:
4461 if (working < a->info.array.raid_disks - 2)
4462 return NULL; /* failed */
4463 break;
4464 default: /* concat or stripe */
4465 return NULL; /* failed */
4466 }
4467
4468 /* For each slot, if it is not working, find a spare */
4469 dl = ddf->dlist;
4470 for (i = 0; i < a->info.array.raid_disks; i++) {
4471 for (d = a->info.devs ; d ; d = d->next)
4472 if (d->disk.raid_disk == i)
4473 break;
4474 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4475 if (d && (d->state_fd >= 0))
4476 continue;
4477
4478 /* OK, this device needs recovery. Find a spare */
4479 again:
4480 for ( ; dl ; dl = dl->next) {
4481 unsigned long long esize;
4482 unsigned long long pos;
4483 struct mdinfo *d2;
4484 int is_global = 0;
4485 int is_dedicated = 0;
4486 struct extent *ex;
4487 unsigned int j;
4488 /* If in this array, skip */
4489 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4490 if (d2->state_fd >= 0 &&
4491 d2->disk.major == dl->major &&
4492 d2->disk.minor == dl->minor) {
4493 dprintf("%x:%x already in array\n", dl->major, dl->minor);
4494 break;
4495 }
4496 if (d2)
4497 continue;
4498 if (ddf->phys->entries[dl->pdnum].type &
4499 __cpu_to_be16(DDF_Spare)) {
4500 /* Check spare assign record */
4501 if (dl->spare) {
4502 if (dl->spare->type & DDF_spare_dedicated) {
4503 /* check spare_ents for guid */
4504 for (j = 0 ;
4505 j < __be16_to_cpu(dl->spare->populated);
4506 j++) {
4507 if (memcmp(dl->spare->spare_ents[j].guid,
4508 ddf->virt->entries[a->info.container_member].guid,
4509 DDF_GUID_LEN) == 0)
4510 is_dedicated = 1;
4511 }
4512 } else
4513 is_global = 1;
4514 }
4515 } else if (ddf->phys->entries[dl->pdnum].type &
4516 __cpu_to_be16(DDF_Global_Spare)) {
4517 is_global = 1;
4518 } else if (!(ddf->phys->entries[dl->pdnum].state &
4519 __cpu_to_be16(DDF_Failed))) {
4520 /* we can possibly use some of this */
4521 is_global = 1;
4522 }
4523 if ( ! (is_dedicated ||
4524 (is_global && global_ok))) {
4525 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4526 is_dedicated, is_global);
4527 continue;
4528 }
4529
4530 /* We are allowed to use this device - is there space?
4531 * We need a->info.component_size sectors */
4532 ex = get_extents(ddf, dl);
4533 if (!ex) {
4534 dprintf("cannot get extents\n");
4535 continue;
4536 }
4537 j = 0; pos = 0;
4538 esize = 0;
4539
4540 do {
4541 esize = ex[j].start - pos;
4542 if (esize >= a->info.component_size)
4543 break;
4544 pos = ex[j].start + ex[j].size;
4545 j++;
4546 } while (ex[j-1].size);
4547
4548 free(ex);
4549 if (esize < a->info.component_size) {
4550 dprintf("%x:%x has no room: %llu %llu\n",
4551 dl->major, dl->minor,
4552 esize, a->info.component_size);
4553 /* No room */
4554 continue;
4555 }
4556
4557 /* Cool, we have a device with some space at pos */
4558 di = xcalloc(1, sizeof(*di));
4559 di->disk.number = i;
4560 di->disk.raid_disk = i;
4561 di->disk.major = dl->major;
4562 di->disk.minor = dl->minor;
4563 di->disk.state = 0;
4564 di->recovery_start = 0;
4565 di->data_offset = pos;
4566 di->component_size = a->info.component_size;
4567 di->container_member = dl->pdnum;
4568 di->next = rv;
4569 rv = di;
4570 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
4571 i, pos);
4572
4573 break;
4574 }
4575 if (!dl && ! global_ok) {
4576 /* not enough dedicated spares, try global */
4577 global_ok = 1;
4578 dl = ddf->dlist;
4579 goto again;
4580 }
4581 }
4582
4583 if (!rv)
4584 /* No spares found */
4585 return rv;
4586 /* Now 'rv' has a list of devices to return.
4587 * Create a metadata_update record to update the
4588 * phys_refnum and lba_offset values
4589 */
4590 mu = xmalloc(sizeof(*mu));
4591 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4592 free(mu);
4593 mu = NULL;
4594 }
4595 mu->buf = xmalloc(ddf->conf_rec_len * 512);
4596 mu->len = ddf->conf_rec_len * 512;
4597 mu->space = NULL;
4598 mu->space_list = NULL;
4599 mu->next = *updates;
4600 vc = find_vdcr(ddf, a->info.container_member, di->disk.raid_disk,
4601 &n_bvd, &vcl);
4602 memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
4603
4604 vc = (struct vd_config*)mu->buf;
4605 for (di = rv ; di ; di = di->next) {
4606 vc->phys_refnum[di->disk.raid_disk] =
4607 ddf->phys->entries[dl->pdnum].refnum;
4608 LBA_OFFSET(ddf, vc)[di->disk.raid_disk]
4609 = __cpu_to_be64(di->data_offset);
4610 }
4611 *updates = mu;
4612 return rv;
4613 }
4614 #endif /* MDASSEMBLE */
4615
4616 static int ddf_level_to_layout(int level)
4617 {
4618 switch(level) {
4619 case 0:
4620 case 1:
4621 return 0;
4622 case 5:
4623 return ALGORITHM_LEFT_SYMMETRIC;
4624 case 6:
4625 return ALGORITHM_ROTATING_N_CONTINUE;
4626 case 10:
4627 return 0x102;
4628 default:
4629 return UnSet;
4630 }
4631 }
4632
4633 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
4634 {
4635 if (level && *level == UnSet)
4636 *level = LEVEL_CONTAINER;
4637
4638 if (level && layout && *layout == UnSet)
4639 *layout = ddf_level_to_layout(*level);
4640 }
4641
4642 struct superswitch super_ddf = {
4643 #ifndef MDASSEMBLE
4644 .examine_super = examine_super_ddf,
4645 .brief_examine_super = brief_examine_super_ddf,
4646 .brief_examine_subarrays = brief_examine_subarrays_ddf,
4647 .export_examine_super = export_examine_super_ddf,
4648 .detail_super = detail_super_ddf,
4649 .brief_detail_super = brief_detail_super_ddf,
4650 .validate_geometry = validate_geometry_ddf,
4651 .write_init_super = write_init_super_ddf,
4652 .add_to_super = add_to_super_ddf,
4653 .remove_from_super = remove_from_super_ddf,
4654 .load_container = load_container_ddf,
4655 .copy_metadata = copy_metadata_ddf,
4656 #endif
4657 .match_home = match_home_ddf,
4658 .uuid_from_super= uuid_from_super_ddf,
4659 .getinfo_super = getinfo_super_ddf,
4660 .update_super = update_super_ddf,
4661
4662 .avail_size = avail_size_ddf,
4663
4664 .compare_super = compare_super_ddf,
4665
4666 .load_super = load_super_ddf,
4667 .init_super = init_super_ddf,
4668 .store_super = store_super_ddf,
4669 .free_super = free_super_ddf,
4670 .match_metadata_desc = match_metadata_desc_ddf,
4671 .container_content = container_content_ddf,
4672 .default_geometry = default_geometry_ddf,
4673
4674 .external = 1,
4675
4676 #ifndef MDASSEMBLE
4677 /* for mdmon */
4678 .open_new = ddf_open_new,
4679 .set_array_state= ddf_set_array_state,
4680 .set_disk = ddf_set_disk,
4681 .sync_metadata = ddf_sync_metadata,
4682 .process_update = ddf_process_update,
4683 .prepare_update = ddf_prepare_update,
4684 .activate_spare = ddf_activate_spare,
4685 #endif
4686 .name = "ddf",
4687 };