]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: ddf_process_update: add debug messages fore adding VDs
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 /* Primary Raid Level (PRL) */
62 #define DDF_RAID0 0x00
63 #define DDF_RAID1 0x01
64 #define DDF_RAID3 0x03
65 #define DDF_RAID4 0x04
66 #define DDF_RAID5 0x05
67 #define DDF_RAID1E 0x11
68 #define DDF_JBOD 0x0f
69 #define DDF_CONCAT 0x1f
70 #define DDF_RAID5E 0x15
71 #define DDF_RAID5EE 0x25
72 #define DDF_RAID6 0x06
73
74 /* Raid Level Qualifier (RLQ) */
75 #define DDF_RAID0_SIMPLE 0x00
76 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
77 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
78 #define DDF_RAID3_0 0x00 /* parity in first extent */
79 #define DDF_RAID3_N 0x01 /* parity in last extent */
80 #define DDF_RAID4_0 0x00 /* parity in first extent */
81 #define DDF_RAID4_N 0x01 /* parity in last extent */
82 /* these apply to raid5e and raid5ee as well */
83 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
84 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
85 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
86 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
87
88 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
89 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
90
91 /* Secondary RAID Level (SRL) */
92 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
93 #define DDF_2MIRRORED 0x01
94 #define DDF_2CONCAT 0x02
95 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
96
97 /* Magic numbers */
98 #define DDF_HEADER_MAGIC __cpu_to_be32(0xDE11DE11)
99 #define DDF_CONTROLLER_MAGIC __cpu_to_be32(0xAD111111)
100 #define DDF_PHYS_RECORDS_MAGIC __cpu_to_be32(0x22222222)
101 #define DDF_PHYS_DATA_MAGIC __cpu_to_be32(0x33333333)
102 #define DDF_VIRT_RECORDS_MAGIC __cpu_to_be32(0xDDDDDDDD)
103 #define DDF_VD_CONF_MAGIC __cpu_to_be32(0xEEEEEEEE)
104 #define DDF_SPARE_ASSIGN_MAGIC __cpu_to_be32(0x55555555)
105 #define DDF_VU_CONF_MAGIC __cpu_to_be32(0x88888888)
106 #define DDF_VENDOR_LOG_MAGIC __cpu_to_be32(0x01dBEEF0)
107 #define DDF_BBM_LOG_MAGIC __cpu_to_be32(0xABADB10C)
108
109 #define DDF_GUID_LEN 24
110 #define DDF_REVISION_0 "01.00.00"
111 #define DDF_REVISION_2 "01.02.00"
112
113 struct ddf_header {
114 __u32 magic; /* DDF_HEADER_MAGIC */
115 __u32 crc;
116 char guid[DDF_GUID_LEN];
117 char revision[8]; /* 01.02.00 */
118 __u32 seq; /* starts at '1' */
119 __u32 timestamp;
120 __u8 openflag;
121 __u8 foreignflag;
122 __u8 enforcegroups;
123 __u8 pad0; /* 0xff */
124 __u8 pad1[12]; /* 12 * 0xff */
125 /* 64 bytes so far */
126 __u8 header_ext[32]; /* reserved: fill with 0xff */
127 __u64 primary_lba;
128 __u64 secondary_lba;
129 __u8 type;
130 __u8 pad2[3]; /* 0xff */
131 __u32 workspace_len; /* sectors for vendor space -
132 * at least 32768(sectors) */
133 __u64 workspace_lba;
134 __u16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
135 __u16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
136 __u16 max_partitions; /* i.e. max num of configuration
137 record entries per disk */
138 __u16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
139 *12/512) */
140 __u16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
141 __u8 pad3[54]; /* 0xff */
142 /* 192 bytes so far */
143 __u32 controller_section_offset;
144 __u32 controller_section_length;
145 __u32 phys_section_offset;
146 __u32 phys_section_length;
147 __u32 virt_section_offset;
148 __u32 virt_section_length;
149 __u32 config_section_offset;
150 __u32 config_section_length;
151 __u32 data_section_offset;
152 __u32 data_section_length;
153 __u32 bbm_section_offset;
154 __u32 bbm_section_length;
155 __u32 diag_space_offset;
156 __u32 diag_space_length;
157 __u32 vendor_offset;
158 __u32 vendor_length;
159 /* 256 bytes so far */
160 __u8 pad4[256]; /* 0xff */
161 };
162
163 /* type field */
164 #define DDF_HEADER_ANCHOR 0x00
165 #define DDF_HEADER_PRIMARY 0x01
166 #define DDF_HEADER_SECONDARY 0x02
167
168 /* The content of the 'controller section' - global scope */
169 struct ddf_controller_data {
170 __u32 magic; /* DDF_CONTROLLER_MAGIC */
171 __u32 crc;
172 char guid[DDF_GUID_LEN];
173 struct controller_type {
174 __u16 vendor_id;
175 __u16 device_id;
176 __u16 sub_vendor_id;
177 __u16 sub_device_id;
178 } type;
179 char product_id[16];
180 __u8 pad[8]; /* 0xff */
181 __u8 vendor_data[448];
182 };
183
184 /* The content of phys_section - global scope */
185 struct phys_disk {
186 __u32 magic; /* DDF_PHYS_RECORDS_MAGIC */
187 __u32 crc;
188 __u16 used_pdes;
189 __u16 max_pdes;
190 __u8 pad[52];
191 struct phys_disk_entry {
192 char guid[DDF_GUID_LEN];
193 __u32 refnum;
194 __u16 type;
195 __u16 state;
196 __u64 config_size; /* DDF structures must be after here */
197 char path[18]; /* another horrible structure really */
198 __u8 pad[6];
199 } entries[0];
200 };
201
202 /* phys_disk_entry.type is a bitmap - bigendian remember */
203 #define DDF_Forced_PD_GUID 1
204 #define DDF_Active_in_VD 2
205 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
206 #define DDF_Spare 8 /* overrides Global_spare */
207 #define DDF_Foreign 16
208 #define DDF_Legacy 32 /* no DDF on this device */
209
210 #define DDF_Interface_mask 0xf00
211 #define DDF_Interface_SCSI 0x100
212 #define DDF_Interface_SAS 0x200
213 #define DDF_Interface_SATA 0x300
214 #define DDF_Interface_FC 0x400
215
216 /* phys_disk_entry.state is a bigendian bitmap */
217 #define DDF_Online 1
218 #define DDF_Failed 2 /* overrides 1,4,8 */
219 #define DDF_Rebuilding 4
220 #define DDF_Transition 8
221 #define DDF_SMART 16
222 #define DDF_ReadErrors 32
223 #define DDF_Missing 64
224
225 /* The content of the virt_section global scope */
226 struct virtual_disk {
227 __u32 magic; /* DDF_VIRT_RECORDS_MAGIC */
228 __u32 crc;
229 __u16 populated_vdes;
230 __u16 max_vdes;
231 __u8 pad[52];
232 struct virtual_entry {
233 char guid[DDF_GUID_LEN];
234 __u16 unit;
235 __u16 pad0; /* 0xffff */
236 __u16 guid_crc;
237 __u16 type;
238 __u8 state;
239 __u8 init_state;
240 __u8 pad1[14];
241 char name[16];
242 } entries[0];
243 };
244
245 /* virtual_entry.type is a bitmap - bigendian */
246 #define DDF_Shared 1
247 #define DDF_Enforce_Groups 2
248 #define DDF_Unicode 4
249 #define DDF_Owner_Valid 8
250
251 /* virtual_entry.state is a bigendian bitmap */
252 #define DDF_state_mask 0x7
253 #define DDF_state_optimal 0x0
254 #define DDF_state_degraded 0x1
255 #define DDF_state_deleted 0x2
256 #define DDF_state_missing 0x3
257 #define DDF_state_failed 0x4
258 #define DDF_state_part_optimal 0x5
259
260 #define DDF_state_morphing 0x8
261 #define DDF_state_inconsistent 0x10
262
263 /* virtual_entry.init_state is a bigendian bitmap */
264 #define DDF_initstate_mask 0x03
265 #define DDF_init_not 0x00
266 #define DDF_init_quick 0x01 /* initialisation is progress.
267 * i.e. 'state_inconsistent' */
268 #define DDF_init_full 0x02
269
270 #define DDF_access_mask 0xc0
271 #define DDF_access_rw 0x00
272 #define DDF_access_ro 0x80
273 #define DDF_access_blocked 0xc0
274
275 /* The content of the config_section - local scope
276 * It has multiple records each config_record_len sectors
277 * They can be vd_config or spare_assign
278 */
279
280 struct vd_config {
281 __u32 magic; /* DDF_VD_CONF_MAGIC */
282 __u32 crc;
283 char guid[DDF_GUID_LEN];
284 __u32 timestamp;
285 __u32 seqnum;
286 __u8 pad0[24];
287 __u16 prim_elmnt_count;
288 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
289 __u8 prl;
290 __u8 rlq;
291 __u8 sec_elmnt_count;
292 __u8 sec_elmnt_seq;
293 __u8 srl;
294 __u64 blocks; /* blocks per component could be different
295 * on different component devices...(only
296 * for concat I hope) */
297 __u64 array_blocks; /* blocks in array */
298 __u8 pad1[8];
299 __u32 spare_refs[8];
300 __u8 cache_pol[8];
301 __u8 bg_rate;
302 __u8 pad2[3];
303 __u8 pad3[52];
304 __u8 pad4[192];
305 __u8 v0[32]; /* reserved- 0xff */
306 __u8 v1[32]; /* reserved- 0xff */
307 __u8 v2[16]; /* reserved- 0xff */
308 __u8 v3[16]; /* reserved- 0xff */
309 __u8 vendor[32];
310 __u32 phys_refnum[0]; /* refnum of each disk in sequence */
311 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
312 bvd are always the same size */
313 };
314 #define LBA_OFFSET(ddf, vd) ((__u64 *) &(vd)->phys_refnum[(ddf)->mppe])
315
316 /* vd_config.cache_pol[7] is a bitmap */
317 #define DDF_cache_writeback 1 /* else writethrough */
318 #define DDF_cache_wadaptive 2 /* only applies if writeback */
319 #define DDF_cache_readahead 4
320 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
321 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
322 #define DDF_cache_wallowed 32 /* enable write caching */
323 #define DDF_cache_rallowed 64 /* enable read caching */
324
325 struct spare_assign {
326 __u32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
327 __u32 crc;
328 __u32 timestamp;
329 __u8 reserved[7];
330 __u8 type;
331 __u16 populated; /* SAEs used */
332 __u16 max; /* max SAEs */
333 __u8 pad[8];
334 struct spare_assign_entry {
335 char guid[DDF_GUID_LEN];
336 __u16 secondary_element;
337 __u8 pad[6];
338 } spare_ents[0];
339 };
340 /* spare_assign.type is a bitmap */
341 #define DDF_spare_dedicated 0x1 /* else global */
342 #define DDF_spare_revertible 0x2 /* else committable */
343 #define DDF_spare_active 0x4 /* else not active */
344 #define DDF_spare_affinity 0x8 /* enclosure affinity */
345
346 /* The data_section contents - local scope */
347 struct disk_data {
348 __u32 magic; /* DDF_PHYS_DATA_MAGIC */
349 __u32 crc;
350 char guid[DDF_GUID_LEN];
351 __u32 refnum; /* crc of some magic drive data ... */
352 __u8 forced_ref; /* set when above was not result of magic */
353 __u8 forced_guid; /* set if guid was forced rather than magic */
354 __u8 vendor[32];
355 __u8 pad[442];
356 };
357
358 /* bbm_section content */
359 struct bad_block_log {
360 __u32 magic;
361 __u32 crc;
362 __u16 entry_count;
363 __u32 spare_count;
364 __u8 pad[10];
365 __u64 first_spare;
366 struct mapped_block {
367 __u64 defective_start;
368 __u32 replacement_start;
369 __u16 remap_count;
370 __u8 pad[2];
371 } entries[0];
372 };
373
374 /* Struct for internally holding ddf structures */
375 /* The DDF structure stored on each device is potentially
376 * quite different, as some data is global and some is local.
377 * The global data is:
378 * - ddf header
379 * - controller_data
380 * - Physical disk records
381 * - Virtual disk records
382 * The local data is:
383 * - Configuration records
384 * - Physical Disk data section
385 * ( and Bad block and vendor which I don't care about yet).
386 *
387 * The local data is parsed into separate lists as it is read
388 * and reconstructed for writing. This means that we only need
389 * to make config changes once and they are automatically
390 * propagated to all devices.
391 * Note that the ddf_super has space of the conf and disk data
392 * for this disk and also for a list of all such data.
393 * The list is only used for the superblock that is being
394 * built in Create or Assemble to describe the whole array.
395 */
396 struct ddf_super {
397 struct ddf_header anchor, primary, secondary;
398 struct ddf_controller_data controller;
399 struct ddf_header *active;
400 struct phys_disk *phys;
401 struct virtual_disk *virt;
402 int pdsize, vdsize;
403 unsigned int max_part, mppe, conf_rec_len;
404 int currentdev;
405 int updates_pending;
406 struct vcl {
407 union {
408 char space[512];
409 struct {
410 struct vcl *next;
411 unsigned int vcnum; /* index into ->virt */
412 struct vd_config **other_bvds;
413 __u64 *block_sizes; /* NULL if all the same */
414 };
415 };
416 struct vd_config conf;
417 } *conflist, *currentconf;
418 struct dl {
419 union {
420 char space[512];
421 struct {
422 struct dl *next;
423 int major, minor;
424 char *devname;
425 int fd;
426 unsigned long long size; /* sectors */
427 unsigned long long primary_lba; /* sectors */
428 unsigned long long secondary_lba; /* sectors */
429 unsigned long long workspace_lba; /* sectors */
430 int pdnum; /* index in ->phys */
431 struct spare_assign *spare;
432 void *mdupdate; /* hold metadata update */
433
434 /* These fields used by auto-layout */
435 int raiddisk; /* slot to fill in autolayout */
436 __u64 esize;
437 };
438 };
439 struct disk_data disk;
440 struct vcl *vlist[0]; /* max_part in size */
441 } *dlist, *add_list;
442 };
443
444 #ifndef offsetof
445 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
446 #endif
447
448 #if DEBUG
449 static int all_ff(const char *guid);
450 static void pr_state(struct ddf_super *ddf, const char *msg)
451 {
452 unsigned int i;
453 dprintf("%s/%s: ", __func__, msg);
454 for (i = 0; i < __be16_to_cpu(ddf->active->max_vd_entries); i++) {
455 if (all_ff(ddf->virt->entries[i].guid))
456 continue;
457 dprintf("%u(s=%02x i=%02x) ", i,
458 ddf->virt->entries[i].state,
459 ddf->virt->entries[i].init_state);
460 }
461 dprintf("\n");
462 }
463 #else
464 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
465 #endif
466
467 #define ddf_set_updates_pending(x) \
468 do { (x)->updates_pending = 1; pr_state(x, __func__); } while (0)
469
470 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
471 __u32 refnum, unsigned int nmax,
472 const struct vd_config **bvd,
473 unsigned int *idx);
474
475 static unsigned int calc_crc(void *buf, int len)
476 {
477 /* crcs are always at the same place as in the ddf_header */
478 struct ddf_header *ddf = buf;
479 __u32 oldcrc = ddf->crc;
480 __u32 newcrc;
481 ddf->crc = 0xffffffff;
482
483 newcrc = crc32(0, buf, len);
484 ddf->crc = oldcrc;
485 /* The crc is store (like everything) bigendian, so convert
486 * here for simplicity
487 */
488 return __cpu_to_be32(newcrc);
489 }
490
491 #define DDF_INVALID_LEVEL 0xff
492 #define DDF_NO_SECONDARY 0xff
493 static int err_bad_md_layout(const mdu_array_info_t *array)
494 {
495 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
496 array->level, array->layout, array->raid_disks);
497 return DDF_INVALID_LEVEL;
498 }
499
500 static int layout_md2ddf(const mdu_array_info_t *array,
501 struct vd_config *conf)
502 {
503 __u16 prim_elmnt_count = __cpu_to_be16(array->raid_disks);
504 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
505 __u8 sec_elmnt_count = 1;
506 __u8 srl = DDF_NO_SECONDARY;
507
508 switch (array->level) {
509 case LEVEL_LINEAR:
510 prl = DDF_CONCAT;
511 break;
512 case 0:
513 rlq = DDF_RAID0_SIMPLE;
514 prl = DDF_RAID0;
515 break;
516 case 1:
517 switch (array->raid_disks) {
518 case 2:
519 rlq = DDF_RAID1_SIMPLE;
520 break;
521 case 3:
522 rlq = DDF_RAID1_MULTI;
523 break;
524 default:
525 return err_bad_md_layout(array);
526 }
527 prl = DDF_RAID1;
528 break;
529 case 4:
530 if (array->layout != 0)
531 return err_bad_md_layout(array);
532 rlq = DDF_RAID4_N;
533 prl = DDF_RAID4;
534 break;
535 case 5:
536 switch (array->layout) {
537 case ALGORITHM_LEFT_ASYMMETRIC:
538 rlq = DDF_RAID5_N_RESTART;
539 break;
540 case ALGORITHM_RIGHT_ASYMMETRIC:
541 rlq = DDF_RAID5_0_RESTART;
542 break;
543 case ALGORITHM_LEFT_SYMMETRIC:
544 rlq = DDF_RAID5_N_CONTINUE;
545 break;
546 case ALGORITHM_RIGHT_SYMMETRIC:
547 /* not mentioned in standard */
548 default:
549 return err_bad_md_layout(array);
550 }
551 prl = DDF_RAID5;
552 break;
553 case 6:
554 switch (array->layout) {
555 case ALGORITHM_ROTATING_N_RESTART:
556 rlq = DDF_RAID5_N_RESTART;
557 break;
558 case ALGORITHM_ROTATING_ZERO_RESTART:
559 rlq = DDF_RAID6_0_RESTART;
560 break;
561 case ALGORITHM_ROTATING_N_CONTINUE:
562 rlq = DDF_RAID5_N_CONTINUE;
563 break;
564 default:
565 return err_bad_md_layout(array);
566 }
567 prl = DDF_RAID6;
568 break;
569 case 10:
570 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
571 rlq = DDF_RAID1_SIMPLE;
572 prim_elmnt_count = __cpu_to_be16(2);
573 sec_elmnt_count = array->raid_disks / 2;
574 } else if (array->raid_disks % 3 == 0
575 && array->layout == 0x103) {
576 rlq = DDF_RAID1_MULTI;
577 prim_elmnt_count = __cpu_to_be16(3);
578 sec_elmnt_count = array->raid_disks / 3;
579 } else
580 return err_bad_md_layout(array);
581 srl = DDF_2SPANNED;
582 prl = DDF_RAID1;
583 break;
584 default:
585 return err_bad_md_layout(array);
586 }
587 conf->prl = prl;
588 conf->prim_elmnt_count = prim_elmnt_count;
589 conf->rlq = rlq;
590 conf->srl = srl;
591 conf->sec_elmnt_count = sec_elmnt_count;
592 return 0;
593 }
594
595 static int err_bad_ddf_layout(const struct vd_config *conf)
596 {
597 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
598 conf->prl, conf->rlq, __be16_to_cpu(conf->prim_elmnt_count));
599 return -1;
600 }
601
602 static int layout_ddf2md(const struct vd_config *conf,
603 mdu_array_info_t *array)
604 {
605 int level = LEVEL_UNSUPPORTED;
606 int layout = 0;
607 int raiddisks = __be16_to_cpu(conf->prim_elmnt_count);
608
609 if (conf->sec_elmnt_count > 1) {
610 /* see also check_secondary() */
611 if (conf->prl != DDF_RAID1 ||
612 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
613 pr_err("Unsupported secondary RAID level %u/%u\n",
614 conf->prl, conf->srl);
615 return -1;
616 }
617 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
618 layout = 0x102;
619 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
620 layout = 0x103;
621 else
622 return err_bad_ddf_layout(conf);
623 raiddisks *= conf->sec_elmnt_count;
624 level = 10;
625 goto good;
626 }
627
628 switch (conf->prl) {
629 case DDF_CONCAT:
630 level = LEVEL_LINEAR;
631 break;
632 case DDF_RAID0:
633 if (conf->rlq != DDF_RAID0_SIMPLE)
634 return err_bad_ddf_layout(conf);
635 level = 0;
636 break;
637 case DDF_RAID1:
638 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
639 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
640 return err_bad_ddf_layout(conf);
641 level = 1;
642 break;
643 case DDF_RAID4:
644 if (conf->rlq != DDF_RAID4_N)
645 return err_bad_ddf_layout(conf);
646 level = 4;
647 break;
648 case DDF_RAID5:
649 switch (conf->rlq) {
650 case DDF_RAID5_N_RESTART:
651 layout = ALGORITHM_LEFT_ASYMMETRIC;
652 break;
653 case DDF_RAID5_0_RESTART:
654 layout = ALGORITHM_RIGHT_ASYMMETRIC;
655 break;
656 case DDF_RAID5_N_CONTINUE:
657 layout = ALGORITHM_LEFT_SYMMETRIC;
658 break;
659 default:
660 return err_bad_ddf_layout(conf);
661 }
662 level = 5;
663 break;
664 case DDF_RAID6:
665 switch (conf->rlq) {
666 case DDF_RAID5_N_RESTART:
667 layout = ALGORITHM_ROTATING_N_RESTART;
668 break;
669 case DDF_RAID6_0_RESTART:
670 layout = ALGORITHM_ROTATING_ZERO_RESTART;
671 break;
672 case DDF_RAID5_N_CONTINUE:
673 layout = ALGORITHM_ROTATING_N_CONTINUE;
674 break;
675 default:
676 return err_bad_ddf_layout(conf);
677 }
678 level = 6;
679 break;
680 default:
681 return err_bad_ddf_layout(conf);
682 };
683
684 good:
685 array->level = level;
686 array->layout = layout;
687 array->raid_disks = raiddisks;
688 return 0;
689 }
690
691 static int load_ddf_header(int fd, unsigned long long lba,
692 unsigned long long size,
693 int type,
694 struct ddf_header *hdr, struct ddf_header *anchor)
695 {
696 /* read a ddf header (primary or secondary) from fd/lba
697 * and check that it is consistent with anchor
698 * Need to check:
699 * magic, crc, guid, rev, and LBA's header_type, and
700 * everything after header_type must be the same
701 */
702 if (lba >= size-1)
703 return 0;
704
705 if (lseek64(fd, lba<<9, 0) < 0)
706 return 0;
707
708 if (read(fd, hdr, 512) != 512)
709 return 0;
710
711 if (hdr->magic != DDF_HEADER_MAGIC)
712 return 0;
713 if (calc_crc(hdr, 512) != hdr->crc)
714 return 0;
715 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
716 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
717 anchor->primary_lba != hdr->primary_lba ||
718 anchor->secondary_lba != hdr->secondary_lba ||
719 hdr->type != type ||
720 memcmp(anchor->pad2, hdr->pad2, 512 -
721 offsetof(struct ddf_header, pad2)) != 0)
722 return 0;
723
724 /* Looks good enough to me... */
725 return 1;
726 }
727
728 static void *load_section(int fd, struct ddf_super *super, void *buf,
729 __u32 offset_be, __u32 len_be, int check)
730 {
731 unsigned long long offset = __be32_to_cpu(offset_be);
732 unsigned long long len = __be32_to_cpu(len_be);
733 int dofree = (buf == NULL);
734
735 if (check)
736 if (len != 2 && len != 8 && len != 32
737 && len != 128 && len != 512)
738 return NULL;
739
740 if (len > 1024)
741 return NULL;
742 if (buf) {
743 /* All pre-allocated sections are a single block */
744 if (len != 1)
745 return NULL;
746 } else if (posix_memalign(&buf, 512, len<<9) != 0)
747 buf = NULL;
748
749 if (!buf)
750 return NULL;
751
752 if (super->active->type == 1)
753 offset += __be64_to_cpu(super->active->primary_lba);
754 else
755 offset += __be64_to_cpu(super->active->secondary_lba);
756
757 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
758 if (dofree)
759 free(buf);
760 return NULL;
761 }
762 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
763 if (dofree)
764 free(buf);
765 return NULL;
766 }
767 return buf;
768 }
769
770 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
771 {
772 unsigned long long dsize;
773
774 get_dev_size(fd, NULL, &dsize);
775
776 if (lseek64(fd, dsize-512, 0) < 0) {
777 if (devname)
778 pr_err("Cannot seek to anchor block on %s: %s\n",
779 devname, strerror(errno));
780 return 1;
781 }
782 if (read(fd, &super->anchor, 512) != 512) {
783 if (devname)
784 pr_err("Cannot read anchor block on %s: %s\n",
785 devname, strerror(errno));
786 return 1;
787 }
788 if (super->anchor.magic != DDF_HEADER_MAGIC) {
789 if (devname)
790 pr_err("no DDF anchor found on %s\n",
791 devname);
792 return 2;
793 }
794 if (calc_crc(&super->anchor, 512) != super->anchor.crc) {
795 if (devname)
796 pr_err("bad CRC on anchor on %s\n",
797 devname);
798 return 2;
799 }
800 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
801 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
802 if (devname)
803 pr_err("can only support super revision"
804 " %.8s and earlier, not %.8s on %s\n",
805 DDF_REVISION_2, super->anchor.revision,devname);
806 return 2;
807 }
808 super->active = NULL;
809 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.primary_lba),
810 dsize >> 9, 1,
811 &super->primary, &super->anchor) == 0) {
812 if (devname)
813 pr_err("Failed to load primary DDF header "
814 "on %s\n", devname);
815 } else
816 super->active = &super->primary;
817 if (load_ddf_header(fd, __be64_to_cpu(super->anchor.secondary_lba),
818 dsize >> 9, 2,
819 &super->secondary, &super->anchor)) {
820 if ((__be32_to_cpu(super->primary.seq)
821 < __be32_to_cpu(super->secondary.seq) &&
822 !super->secondary.openflag)
823 || (__be32_to_cpu(super->primary.seq)
824 == __be32_to_cpu(super->secondary.seq) &&
825 super->primary.openflag && !super->secondary.openflag)
826 || super->active == NULL
827 )
828 super->active = &super->secondary;
829 } else if (devname)
830 pr_err("Failed to load secondary DDF header on %s\n",
831 devname);
832 if (super->active == NULL)
833 return 2;
834 return 0;
835 }
836
837 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
838 {
839 void *ok;
840 ok = load_section(fd, super, &super->controller,
841 super->active->controller_section_offset,
842 super->active->controller_section_length,
843 0);
844 super->phys = load_section(fd, super, NULL,
845 super->active->phys_section_offset,
846 super->active->phys_section_length,
847 1);
848 super->pdsize = __be32_to_cpu(super->active->phys_section_length) * 512;
849
850 super->virt = load_section(fd, super, NULL,
851 super->active->virt_section_offset,
852 super->active->virt_section_length,
853 1);
854 super->vdsize = __be32_to_cpu(super->active->virt_section_length) * 512;
855 if (!ok ||
856 !super->phys ||
857 !super->virt) {
858 free(super->phys);
859 free(super->virt);
860 super->phys = NULL;
861 super->virt = NULL;
862 return 2;
863 }
864 super->conflist = NULL;
865 super->dlist = NULL;
866
867 super->max_part = __be16_to_cpu(super->active->max_partitions);
868 super->mppe = __be16_to_cpu(super->active->max_primary_element_entries);
869 super->conf_rec_len = __be16_to_cpu(super->active->config_record_len);
870 return 0;
871 }
872
873 #define DDF_UNUSED_BVD 0xff
874 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
875 {
876 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
877 unsigned int i, vdsize;
878 void *p;
879 if (n_vds == 0) {
880 vcl->other_bvds = NULL;
881 return 0;
882 }
883 vdsize = ddf->conf_rec_len * 512;
884 if (posix_memalign(&p, 512, n_vds *
885 (vdsize + sizeof(struct vd_config *))) != 0)
886 return -1;
887 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
888 for (i = 0; i < n_vds; i++) {
889 vcl->other_bvds[i] = p + i * vdsize;
890 memset(vcl->other_bvds[i], 0, vdsize);
891 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
892 }
893 return 0;
894 }
895
896 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
897 unsigned int len)
898 {
899 int i;
900 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
901 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
902 break;
903
904 if (i < vcl->conf.sec_elmnt_count-1) {
905 if (vd->seqnum <= vcl->other_bvds[i]->seqnum)
906 return;
907 } else {
908 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
909 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
910 break;
911 if (i == vcl->conf.sec_elmnt_count-1) {
912 pr_err("no space for sec level config %u, count is %u\n",
913 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
914 return;
915 }
916 }
917 memcpy(vcl->other_bvds[i], vd, len);
918 }
919
920 static int load_ddf_local(int fd, struct ddf_super *super,
921 char *devname, int keep)
922 {
923 struct dl *dl;
924 struct stat stb;
925 char *conf;
926 unsigned int i;
927 unsigned int confsec;
928 int vnum;
929 unsigned int max_virt_disks = __be16_to_cpu(super->active->max_vd_entries);
930 unsigned long long dsize;
931
932 /* First the local disk info */
933 if (posix_memalign((void**)&dl, 512,
934 sizeof(*dl) +
935 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
936 pr_err("%s could not allocate disk info buffer\n",
937 __func__);
938 return 1;
939 }
940
941 load_section(fd, super, &dl->disk,
942 super->active->data_section_offset,
943 super->active->data_section_length,
944 0);
945 dl->devname = devname ? xstrdup(devname) : NULL;
946
947 fstat(fd, &stb);
948 dl->major = major(stb.st_rdev);
949 dl->minor = minor(stb.st_rdev);
950 dl->next = super->dlist;
951 dl->fd = keep ? fd : -1;
952
953 dl->size = 0;
954 if (get_dev_size(fd, devname, &dsize))
955 dl->size = dsize >> 9;
956 /* If the disks have different sizes, the LBAs will differ
957 * between phys disks.
958 * At this point here, the values in super->active must be valid
959 * for this phys disk. */
960 dl->primary_lba = super->active->primary_lba;
961 dl->secondary_lba = super->active->secondary_lba;
962 dl->workspace_lba = super->active->workspace_lba;
963 dl->spare = NULL;
964 for (i = 0 ; i < super->max_part ; i++)
965 dl->vlist[i] = NULL;
966 super->dlist = dl;
967 dl->pdnum = -1;
968 for (i = 0; i < __be16_to_cpu(super->active->max_pd_entries); i++)
969 if (memcmp(super->phys->entries[i].guid,
970 dl->disk.guid, DDF_GUID_LEN) == 0)
971 dl->pdnum = i;
972
973 /* Now the config list. */
974 /* 'conf' is an array of config entries, some of which are
975 * probably invalid. Those which are good need to be copied into
976 * the conflist
977 */
978
979 conf = load_section(fd, super, NULL,
980 super->active->config_section_offset,
981 super->active->config_section_length,
982 0);
983
984 vnum = 0;
985 for (confsec = 0;
986 confsec < __be32_to_cpu(super->active->config_section_length);
987 confsec += super->conf_rec_len) {
988 struct vd_config *vd =
989 (struct vd_config *)((char*)conf + confsec*512);
990 struct vcl *vcl;
991
992 if (vd->magic == DDF_SPARE_ASSIGN_MAGIC) {
993 if (dl->spare)
994 continue;
995 if (posix_memalign((void**)&dl->spare, 512,
996 super->conf_rec_len*512) != 0) {
997 pr_err("%s could not allocate spare info buf\n",
998 __func__);
999 return 1;
1000 }
1001
1002 memcpy(dl->spare, vd, super->conf_rec_len*512);
1003 continue;
1004 }
1005 if (vd->magic != DDF_VD_CONF_MAGIC)
1006 continue;
1007 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1008 if (memcmp(vcl->conf.guid,
1009 vd->guid, DDF_GUID_LEN) == 0)
1010 break;
1011 }
1012
1013 if (vcl) {
1014 dl->vlist[vnum++] = vcl;
1015 if (vcl->other_bvds != NULL &&
1016 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1017 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1018 continue;
1019 }
1020 if (__be32_to_cpu(vd->seqnum) <=
1021 __be32_to_cpu(vcl->conf.seqnum))
1022 continue;
1023 } else {
1024 if (posix_memalign((void**)&vcl, 512,
1025 (super->conf_rec_len*512 +
1026 offsetof(struct vcl, conf))) != 0) {
1027 pr_err("%s could not allocate vcl buf\n",
1028 __func__);
1029 return 1;
1030 }
1031 vcl->next = super->conflist;
1032 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1033 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1034 if (alloc_other_bvds(super, vcl) != 0) {
1035 pr_err("%s could not allocate other bvds\n",
1036 __func__);
1037 free(vcl);
1038 return 1;
1039 };
1040 super->conflist = vcl;
1041 dl->vlist[vnum++] = vcl;
1042 }
1043 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1044 for (i=0; i < max_virt_disks ; i++)
1045 if (memcmp(super->virt->entries[i].guid,
1046 vcl->conf.guid, DDF_GUID_LEN)==0)
1047 break;
1048 if (i < max_virt_disks)
1049 vcl->vcnum = i;
1050 }
1051 free(conf);
1052
1053 return 0;
1054 }
1055
1056 #ifndef MDASSEMBLE
1057 static int load_super_ddf_all(struct supertype *st, int fd,
1058 void **sbp, char *devname);
1059 #endif
1060
1061 static void free_super_ddf(struct supertype *st);
1062
1063 static int load_super_ddf(struct supertype *st, int fd,
1064 char *devname)
1065 {
1066 unsigned long long dsize;
1067 struct ddf_super *super;
1068 int rv;
1069
1070 if (get_dev_size(fd, devname, &dsize) == 0)
1071 return 1;
1072
1073 if (!st->ignore_hw_compat && test_partition(fd))
1074 /* DDF is not allowed on partitions */
1075 return 1;
1076
1077 /* 32M is a lower bound */
1078 if (dsize <= 32*1024*1024) {
1079 if (devname)
1080 pr_err("%s is too small for ddf: "
1081 "size is %llu sectors.\n",
1082 devname, dsize>>9);
1083 return 1;
1084 }
1085 if (dsize & 511) {
1086 if (devname)
1087 pr_err("%s is an odd size for ddf: "
1088 "size is %llu bytes.\n",
1089 devname, dsize);
1090 return 1;
1091 }
1092
1093 free_super_ddf(st);
1094
1095 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1096 pr_err("malloc of %zu failed.\n",
1097 sizeof(*super));
1098 return 1;
1099 }
1100 memset(super, 0, sizeof(*super));
1101
1102 rv = load_ddf_headers(fd, super, devname);
1103 if (rv) {
1104 free(super);
1105 return rv;
1106 }
1107
1108 /* Have valid headers and have chosen the best. Let's read in the rest*/
1109
1110 rv = load_ddf_global(fd, super, devname);
1111
1112 if (rv) {
1113 if (devname)
1114 pr_err("Failed to load all information "
1115 "sections on %s\n", devname);
1116 free(super);
1117 return rv;
1118 }
1119
1120 rv = load_ddf_local(fd, super, devname, 0);
1121
1122 if (rv) {
1123 if (devname)
1124 pr_err("Failed to load all information "
1125 "sections on %s\n", devname);
1126 free(super);
1127 return rv;
1128 }
1129
1130 /* Should possibly check the sections .... */
1131
1132 st->sb = super;
1133 if (st->ss == NULL) {
1134 st->ss = &super_ddf;
1135 st->minor_version = 0;
1136 st->max_devs = 512;
1137 }
1138 return 0;
1139
1140 }
1141
1142 static void free_super_ddf(struct supertype *st)
1143 {
1144 struct ddf_super *ddf = st->sb;
1145 if (ddf == NULL)
1146 return;
1147 free(ddf->phys);
1148 free(ddf->virt);
1149 while (ddf->conflist) {
1150 struct vcl *v = ddf->conflist;
1151 ddf->conflist = v->next;
1152 if (v->block_sizes)
1153 free(v->block_sizes);
1154 if (v->other_bvds)
1155 /*
1156 v->other_bvds[0] points to beginning of buffer,
1157 see alloc_other_bvds()
1158 */
1159 free(v->other_bvds[0]);
1160 free(v);
1161 }
1162 while (ddf->dlist) {
1163 struct dl *d = ddf->dlist;
1164 ddf->dlist = d->next;
1165 if (d->fd >= 0)
1166 close(d->fd);
1167 if (d->spare)
1168 free(d->spare);
1169 free(d);
1170 }
1171 while (ddf->add_list) {
1172 struct dl *d = ddf->add_list;
1173 ddf->add_list = d->next;
1174 if (d->fd >= 0)
1175 close(d->fd);
1176 if (d->spare)
1177 free(d->spare);
1178 free(d);
1179 }
1180 free(ddf);
1181 st->sb = NULL;
1182 }
1183
1184 static struct supertype *match_metadata_desc_ddf(char *arg)
1185 {
1186 /* 'ddf' only support containers */
1187 struct supertype *st;
1188 if (strcmp(arg, "ddf") != 0 &&
1189 strcmp(arg, "default") != 0
1190 )
1191 return NULL;
1192
1193 st = xcalloc(1, sizeof(*st));
1194 st->ss = &super_ddf;
1195 st->max_devs = 512;
1196 st->minor_version = 0;
1197 st->sb = NULL;
1198 return st;
1199 }
1200
1201 #ifndef MDASSEMBLE
1202
1203 static mapping_t ddf_state[] = {
1204 { "Optimal", 0},
1205 { "Degraded", 1},
1206 { "Deleted", 2},
1207 { "Missing", 3},
1208 { "Failed", 4},
1209 { "Partially Optimal", 5},
1210 { "-reserved-", 6},
1211 { "-reserved-", 7},
1212 { NULL, 0}
1213 };
1214
1215 static mapping_t ddf_init_state[] = {
1216 { "Not Initialised", 0},
1217 { "QuickInit in Progress", 1},
1218 { "Fully Initialised", 2},
1219 { "*UNKNOWN*", 3},
1220 { NULL, 0}
1221 };
1222 static mapping_t ddf_access[] = {
1223 { "Read/Write", 0},
1224 { "Reserved", 1},
1225 { "Read Only", 2},
1226 { "Blocked (no access)", 3},
1227 { NULL ,0}
1228 };
1229
1230 static mapping_t ddf_level[] = {
1231 { "RAID0", DDF_RAID0},
1232 { "RAID1", DDF_RAID1},
1233 { "RAID3", DDF_RAID3},
1234 { "RAID4", DDF_RAID4},
1235 { "RAID5", DDF_RAID5},
1236 { "RAID1E",DDF_RAID1E},
1237 { "JBOD", DDF_JBOD},
1238 { "CONCAT",DDF_CONCAT},
1239 { "RAID5E",DDF_RAID5E},
1240 { "RAID5EE",DDF_RAID5EE},
1241 { "RAID6", DDF_RAID6},
1242 { NULL, 0}
1243 };
1244 static mapping_t ddf_sec_level[] = {
1245 { "Striped", DDF_2STRIPED},
1246 { "Mirrored", DDF_2MIRRORED},
1247 { "Concat", DDF_2CONCAT},
1248 { "Spanned", DDF_2SPANNED},
1249 { NULL, 0}
1250 };
1251 #endif
1252
1253 static int all_ff(const char *guid)
1254 {
1255 int i;
1256 for (i = 0; i < DDF_GUID_LEN; i++)
1257 if (guid[i] != (char)0xff)
1258 return 0;
1259 return 1;
1260 }
1261
1262 #ifndef MDASSEMBLE
1263 static void print_guid(char *guid, int tstamp)
1264 {
1265 /* A GUIDs are part (or all) ASCII and part binary.
1266 * They tend to be space padded.
1267 * We print the GUID in HEX, then in parentheses add
1268 * any initial ASCII sequence, and a possible
1269 * time stamp from bytes 16-19
1270 */
1271 int l = DDF_GUID_LEN;
1272 int i;
1273
1274 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1275 if ((i&3)==0 && i != 0) printf(":");
1276 printf("%02X", guid[i]&255);
1277 }
1278
1279 printf("\n (");
1280 while (l && guid[l-1] == ' ')
1281 l--;
1282 for (i=0 ; i<l ; i++) {
1283 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1284 fputc(guid[i], stdout);
1285 else
1286 break;
1287 }
1288 if (tstamp) {
1289 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1290 char tbuf[100];
1291 struct tm *tm;
1292 tm = localtime(&then);
1293 strftime(tbuf, 100, " %D %T",tm);
1294 fputs(tbuf, stdout);
1295 }
1296 printf(")");
1297 }
1298
1299 static const char *guid_str(const char *guid)
1300 {
1301 static char buf[DDF_GUID_LEN*2+1];
1302 int i;
1303 char *p = buf;
1304 for (i = 0; i < DDF_GUID_LEN; i++)
1305 p += sprintf(p, "%02x", (unsigned char)guid[i]);
1306 *p = '\0';
1307 return (const char *) buf;
1308 }
1309
1310 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1311 {
1312 int crl = sb->conf_rec_len;
1313 struct vcl *vcl;
1314
1315 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1316 unsigned int i;
1317 struct vd_config *vc = &vcl->conf;
1318
1319 if (calc_crc(vc, crl*512) != vc->crc)
1320 continue;
1321 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1322 continue;
1323
1324 /* Ok, we know about this VD, let's give more details */
1325 printf(" Raid Devices[%d] : %d (", n,
1326 __be16_to_cpu(vc->prim_elmnt_count));
1327 for (i = 0; i < __be16_to_cpu(vc->prim_elmnt_count); i++) {
1328 int j;
1329 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1330 for (j=0; j<cnt; j++)
1331 if (vc->phys_refnum[i] == sb->phys->entries[j].refnum)
1332 break;
1333 if (i) printf(" ");
1334 if (j < cnt)
1335 printf("%d", j);
1336 else
1337 printf("--");
1338 }
1339 printf(")\n");
1340 if (vc->chunk_shift != 255)
1341 printf(" Chunk Size[%d] : %d sectors\n", n,
1342 1 << vc->chunk_shift);
1343 printf(" Raid Level[%d] : %s\n", n,
1344 map_num(ddf_level, vc->prl)?:"-unknown-");
1345 if (vc->sec_elmnt_count != 1) {
1346 printf(" Secondary Position[%d] : %d of %d\n", n,
1347 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1348 printf(" Secondary Level[%d] : %s\n", n,
1349 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1350 }
1351 printf(" Device Size[%d] : %llu\n", n,
1352 (unsigned long long)__be64_to_cpu(vc->blocks)/2);
1353 printf(" Array Size[%d] : %llu\n", n,
1354 (unsigned long long)__be64_to_cpu(vc->array_blocks)/2);
1355 }
1356 }
1357
1358 static void examine_vds(struct ddf_super *sb)
1359 {
1360 int cnt = __be16_to_cpu(sb->virt->populated_vdes);
1361 unsigned int i;
1362 printf(" Virtual Disks : %d\n", cnt);
1363
1364 for (i = 0; i < __be16_to_cpu(sb->virt->max_vdes); i++) {
1365 struct virtual_entry *ve = &sb->virt->entries[i];
1366 if (all_ff(ve->guid))
1367 continue;
1368 printf("\n");
1369 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1370 printf("\n");
1371 printf(" unit[%d] : %d\n", i, __be16_to_cpu(ve->unit));
1372 printf(" state[%d] : %s, %s%s\n", i,
1373 map_num(ddf_state, ve->state & 7),
1374 (ve->state & 8) ? "Morphing, ": "",
1375 (ve->state & 16)? "Not Consistent" : "Consistent");
1376 printf(" init state[%d] : %s\n", i,
1377 map_num(ddf_init_state, ve->init_state&3));
1378 printf(" access[%d] : %s\n", i,
1379 map_num(ddf_access, (ve->init_state>>6) & 3));
1380 printf(" Name[%d] : %.16s\n", i, ve->name);
1381 examine_vd(i, sb, ve->guid);
1382 }
1383 if (cnt) printf("\n");
1384 }
1385
1386 static void examine_pds(struct ddf_super *sb)
1387 {
1388 int cnt = __be16_to_cpu(sb->phys->used_pdes);
1389 int i;
1390 struct dl *dl;
1391 printf(" Physical Disks : %d\n", cnt);
1392 printf(" Number RefNo Size Device Type/State\n");
1393
1394 for (i=0 ; i<cnt ; i++) {
1395 struct phys_disk_entry *pd = &sb->phys->entries[i];
1396 int type = __be16_to_cpu(pd->type);
1397 int state = __be16_to_cpu(pd->state);
1398
1399 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1400 //printf("\n");
1401 printf(" %3d %08x ", i,
1402 __be32_to_cpu(pd->refnum));
1403 printf("%8lluK ",
1404 (unsigned long long)__be64_to_cpu(pd->config_size)>>1);
1405 for (dl = sb->dlist; dl ; dl = dl->next) {
1406 if (dl->disk.refnum == pd->refnum) {
1407 char *dv = map_dev(dl->major, dl->minor, 0);
1408 if (dv) {
1409 printf("%-15s", dv);
1410 break;
1411 }
1412 }
1413 }
1414 if (!dl)
1415 printf("%15s","");
1416 printf(" %s%s%s%s%s",
1417 (type&2) ? "active":"",
1418 (type&4) ? "Global-Spare":"",
1419 (type&8) ? "spare" : "",
1420 (type&16)? ", foreign" : "",
1421 (type&32)? "pass-through" : "");
1422 if (state & DDF_Failed)
1423 /* This over-rides these three */
1424 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1425 printf("/%s%s%s%s%s%s%s",
1426 (state&1)? "Online": "Offline",
1427 (state&2)? ", Failed": "",
1428 (state&4)? ", Rebuilding": "",
1429 (state&8)? ", in-transition": "",
1430 (state&16)? ", SMART-errors": "",
1431 (state&32)? ", Unrecovered-Read-Errors": "",
1432 (state&64)? ", Missing" : "");
1433 printf("\n");
1434 }
1435 }
1436
1437 static void examine_super_ddf(struct supertype *st, char *homehost)
1438 {
1439 struct ddf_super *sb = st->sb;
1440
1441 printf(" Magic : %08x\n", __be32_to_cpu(sb->anchor.magic));
1442 printf(" Version : %.8s\n", sb->anchor.revision);
1443 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1444 printf("\n");
1445 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1446 printf("\n");
1447 printf(" Seq : %08x\n", __be32_to_cpu(sb->active->seq));
1448 printf(" Redundant hdr : %s\n", sb->secondary.magic == DDF_HEADER_MAGIC
1449 ?"yes" : "no");
1450 examine_vds(sb);
1451 examine_pds(sb);
1452 }
1453
1454 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1455
1456 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1457 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1458
1459 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1460 {
1461 /*
1462 * Figure out the VD number for this supertype.
1463 * Returns DDF_CONTAINER for the container itself,
1464 * and DDF_NOTFOUND on error.
1465 */
1466 struct ddf_super *ddf = st->sb;
1467 struct mdinfo *sra;
1468 char *sub, *end;
1469 unsigned int vcnum;
1470
1471 if (*st->container_devnm == '\0')
1472 return DDF_CONTAINER;
1473
1474 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1475 if (!sra || sra->array.major_version != -1 ||
1476 sra->array.minor_version != -2 ||
1477 !is_subarray(sra->text_version))
1478 return DDF_NOTFOUND;
1479
1480 sub = strchr(sra->text_version + 1, '/');
1481 if (sub != NULL)
1482 vcnum = strtoul(sub + 1, &end, 10);
1483 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1484 vcnum >= __be16_to_cpu(ddf->active->max_vd_entries))
1485 return DDF_NOTFOUND;
1486
1487 return vcnum;
1488 }
1489
1490 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1491 {
1492 /* We just write a generic DDF ARRAY entry
1493 */
1494 struct mdinfo info;
1495 char nbuf[64];
1496 getinfo_super_ddf(st, &info, NULL);
1497 fname_from_uuid(st, &info, nbuf, ':');
1498
1499 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1500 }
1501
1502 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1503 {
1504 /* We just write a generic DDF ARRAY entry
1505 */
1506 struct ddf_super *ddf = st->sb;
1507 struct mdinfo info;
1508 unsigned int i;
1509 char nbuf[64];
1510 getinfo_super_ddf(st, &info, NULL);
1511 fname_from_uuid(st, &info, nbuf, ':');
1512
1513 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
1514 struct virtual_entry *ve = &ddf->virt->entries[i];
1515 struct vcl vcl;
1516 char nbuf1[64];
1517 if (all_ff(ve->guid))
1518 continue;
1519 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1520 ddf->currentconf =&vcl;
1521 uuid_from_super_ddf(st, info.uuid);
1522 fname_from_uuid(st, &info, nbuf1, ':');
1523 printf("ARRAY container=%s member=%d UUID=%s\n",
1524 nbuf+5, i, nbuf1+5);
1525 }
1526 }
1527
1528 static void export_examine_super_ddf(struct supertype *st)
1529 {
1530 struct mdinfo info;
1531 char nbuf[64];
1532 getinfo_super_ddf(st, &info, NULL);
1533 fname_from_uuid(st, &info, nbuf, ':');
1534 printf("MD_METADATA=ddf\n");
1535 printf("MD_LEVEL=container\n");
1536 printf("MD_UUID=%s\n", nbuf+5);
1537 }
1538
1539 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1540 {
1541 void *buf;
1542 unsigned long long dsize, offset;
1543 int bytes;
1544 struct ddf_header *ddf;
1545 int written = 0;
1546
1547 /* The meta consists of an anchor, a primary, and a secondary.
1548 * This all lives at the end of the device.
1549 * So it is easiest to find the earliest of primary and
1550 * secondary, and copy everything from there.
1551 *
1552 * Anchor is 512 from end It contains primary_lba and secondary_lba
1553 * we choose one of those
1554 */
1555
1556 if (posix_memalign(&buf, 4096, 4096) != 0)
1557 return 1;
1558
1559 if (!get_dev_size(from, NULL, &dsize))
1560 goto err;
1561
1562 if (lseek64(from, dsize-512, 0) < 0)
1563 goto err;
1564 if (read(from, buf, 512) != 512)
1565 goto err;
1566 ddf = buf;
1567 if (ddf->magic != DDF_HEADER_MAGIC ||
1568 calc_crc(ddf, 512) != ddf->crc ||
1569 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1570 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1571 goto err;
1572
1573 offset = dsize - 512;
1574 if ((__be64_to_cpu(ddf->primary_lba) << 9) < offset)
1575 offset = __be64_to_cpu(ddf->primary_lba) << 9;
1576 if ((__be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1577 offset = __be64_to_cpu(ddf->secondary_lba) << 9;
1578
1579 bytes = dsize - offset;
1580
1581 if (lseek64(from, offset, 0) < 0 ||
1582 lseek64(to, offset, 0) < 0)
1583 goto err;
1584 while (written < bytes) {
1585 int n = bytes - written;
1586 if (n > 4096)
1587 n = 4096;
1588 if (read(from, buf, n) != n)
1589 goto err;
1590 if (write(to, buf, n) != n)
1591 goto err;
1592 written += n;
1593 }
1594 free(buf);
1595 return 0;
1596 err:
1597 free(buf);
1598 return 1;
1599 }
1600
1601 static void detail_super_ddf(struct supertype *st, char *homehost)
1602 {
1603 /* FIXME later
1604 * Could print DDF GUID
1605 * Need to find which array
1606 * If whole, briefly list all arrays
1607 * If one, give name
1608 */
1609 }
1610
1611 static void brief_detail_super_ddf(struct supertype *st)
1612 {
1613 struct mdinfo info;
1614 char nbuf[64];
1615 struct ddf_super *ddf = st->sb;
1616 unsigned int vcnum = get_vd_num_of_subarray(st);
1617 if (vcnum == DDF_CONTAINER)
1618 uuid_from_super_ddf(st, info.uuid);
1619 else if (vcnum == DDF_NOTFOUND)
1620 return;
1621 else
1622 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1623 fname_from_uuid(st, &info, nbuf,':');
1624 printf(" UUID=%s", nbuf + 5);
1625 }
1626 #endif
1627
1628 static int match_home_ddf(struct supertype *st, char *homehost)
1629 {
1630 /* It matches 'this' host if the controller is a
1631 * Linux-MD controller with vendor_data matching
1632 * the hostname
1633 */
1634 struct ddf_super *ddf = st->sb;
1635 unsigned int len;
1636
1637 if (!homehost)
1638 return 0;
1639 len = strlen(homehost);
1640
1641 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1642 len < sizeof(ddf->controller.vendor_data) &&
1643 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1644 ddf->controller.vendor_data[len] == 0);
1645 }
1646
1647 #ifndef MDASSEMBLE
1648 static int find_index_in_bvd(const struct ddf_super *ddf,
1649 const struct vd_config *conf, unsigned int n,
1650 unsigned int *n_bvd)
1651 {
1652 /*
1653 * Find the index of the n-th valid physical disk in this BVD
1654 */
1655 unsigned int i, j;
1656 for (i = 0, j = 0; i < ddf->mppe &&
1657 j < __be16_to_cpu(conf->prim_elmnt_count); i++) {
1658 if (conf->phys_refnum[i] != 0xffffffff) {
1659 if (n == j) {
1660 *n_bvd = i;
1661 return 1;
1662 }
1663 j++;
1664 }
1665 }
1666 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1667 __func__, n, __be16_to_cpu(conf->prim_elmnt_count));
1668 return 0;
1669 }
1670
1671 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1672 unsigned int n,
1673 unsigned int *n_bvd, struct vcl **vcl)
1674 {
1675 struct vcl *v;
1676
1677 for (v = ddf->conflist; v; v = v->next) {
1678 unsigned int nsec, ibvd;
1679 struct vd_config *conf;
1680 if (inst != v->vcnum)
1681 continue;
1682 conf = &v->conf;
1683 if (conf->sec_elmnt_count == 1) {
1684 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1685 *vcl = v;
1686 return conf;
1687 } else
1688 goto bad;
1689 }
1690 if (v->other_bvds == NULL) {
1691 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1692 __func__, conf->sec_elmnt_count);
1693 goto bad;
1694 }
1695 nsec = n / __be16_to_cpu(conf->prim_elmnt_count);
1696 if (conf->sec_elmnt_seq != nsec) {
1697 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1698 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1699 == nsec)
1700 break;
1701 }
1702 if (ibvd == conf->sec_elmnt_count)
1703 goto bad;
1704 conf = v->other_bvds[ibvd-1];
1705 }
1706 if (!find_index_in_bvd(ddf, conf,
1707 n - nsec*conf->sec_elmnt_count, n_bvd))
1708 goto bad;
1709 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1710 , __func__, n, *n_bvd, ibvd-1, inst);
1711 *vcl = v;
1712 return conf;
1713 }
1714 bad:
1715 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1716 return NULL;
1717 }
1718 #endif
1719
1720 static int find_phys(const struct ddf_super *ddf, __u32 phys_refnum)
1721 {
1722 /* Find the entry in phys_disk which has the given refnum
1723 * and return it's index
1724 */
1725 unsigned int i;
1726 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++)
1727 if (ddf->phys->entries[i].refnum == phys_refnum)
1728 return i;
1729 return -1;
1730 }
1731
1732 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1733 {
1734 char buf[20];
1735 struct sha1_ctx ctx;
1736 sha1_init_ctx(&ctx);
1737 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1738 sha1_finish_ctx(&ctx, buf);
1739 memcpy(uuid, buf, 4*4);
1740 }
1741
1742 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1743 {
1744 /* The uuid returned here is used for:
1745 * uuid to put into bitmap file (Create, Grow)
1746 * uuid for backup header when saving critical section (Grow)
1747 * comparing uuids when re-adding a device into an array
1748 * In these cases the uuid required is that of the data-array,
1749 * not the device-set.
1750 * uuid to recognise same set when adding a missing device back
1751 * to an array. This is a uuid for the device-set.
1752 *
1753 * For each of these we can make do with a truncated
1754 * or hashed uuid rather than the original, as long as
1755 * everyone agrees.
1756 * In the case of SVD we assume the BVD is of interest,
1757 * though that might be the case if a bitmap were made for
1758 * a mirrored SVD - worry about that later.
1759 * So we need to find the VD configuration record for the
1760 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1761 * The first 16 bytes of the sha1 of these is used.
1762 */
1763 struct ddf_super *ddf = st->sb;
1764 struct vcl *vcl = ddf->currentconf;
1765 char *guid;
1766
1767 if (vcl)
1768 guid = vcl->conf.guid;
1769 else
1770 guid = ddf->anchor.guid;
1771 uuid_from_ddf_guid(guid, uuid);
1772 }
1773
1774 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1775
1776 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1777 {
1778 struct ddf_super *ddf = st->sb;
1779 int map_disks = info->array.raid_disks;
1780 __u32 *cptr;
1781
1782 if (ddf->currentconf) {
1783 getinfo_super_ddf_bvd(st, info, map);
1784 return;
1785 }
1786 memset(info, 0, sizeof(*info));
1787
1788 info->array.raid_disks = __be16_to_cpu(ddf->phys->used_pdes);
1789 info->array.level = LEVEL_CONTAINER;
1790 info->array.layout = 0;
1791 info->array.md_minor = -1;
1792 cptr = (__u32 *)(ddf->anchor.guid + 16);
1793 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1794
1795 info->array.utime = 0;
1796 info->array.chunk_size = 0;
1797 info->container_enough = 1;
1798
1799 info->disk.major = 0;
1800 info->disk.minor = 0;
1801 if (ddf->dlist) {
1802 info->disk.number = __be32_to_cpu(ddf->dlist->disk.refnum);
1803 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1804
1805 info->data_offset = __be64_to_cpu(ddf->phys->
1806 entries[info->disk.raid_disk].
1807 config_size);
1808 info->component_size = ddf->dlist->size - info->data_offset;
1809 } else {
1810 info->disk.number = -1;
1811 info->disk.raid_disk = -1;
1812 // info->disk.raid_disk = find refnum in the table and use index;
1813 }
1814 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1815
1816 info->recovery_start = MaxSector;
1817 info->reshape_active = 0;
1818 info->recovery_blocked = 0;
1819 info->name[0] = 0;
1820
1821 info->array.major_version = -1;
1822 info->array.minor_version = -2;
1823 strcpy(info->text_version, "ddf");
1824 info->safe_mode_delay = 0;
1825
1826 uuid_from_super_ddf(st, info->uuid);
1827
1828 if (map) {
1829 int i;
1830 for (i = 0 ; i < map_disks; i++) {
1831 if (i < info->array.raid_disks &&
1832 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1833 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1834 map[i] = 1;
1835 else
1836 map[i] = 0;
1837 }
1838 }
1839 }
1840
1841 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1842 {
1843 struct ddf_super *ddf = st->sb;
1844 struct vcl *vc = ddf->currentconf;
1845 int cd = ddf->currentdev;
1846 int n_prim;
1847 int j;
1848 struct dl *dl;
1849 int map_disks = info->array.raid_disks;
1850 __u32 *cptr;
1851 struct vd_config *conf;
1852
1853 memset(info, 0, sizeof(*info));
1854 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1855 return;
1856 info->array.md_minor = -1;
1857 cptr = (__u32 *)(vc->conf.guid + 16);
1858 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1859 info->array.utime = DECADE + __be32_to_cpu(vc->conf.timestamp);
1860 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1861 info->custom_array_size = 0;
1862
1863 conf = &vc->conf;
1864 n_prim = __be16_to_cpu(conf->prim_elmnt_count);
1865 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1866 int ibvd = cd / n_prim - 1;
1867 cd %= n_prim;
1868 conf = vc->other_bvds[ibvd];
1869 }
1870
1871 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1872 info->data_offset =
1873 __be64_to_cpu(LBA_OFFSET(ddf, &vc->conf)[cd]);
1874 if (vc->block_sizes)
1875 info->component_size = vc->block_sizes[cd];
1876 else
1877 info->component_size = __be64_to_cpu(vc->conf.blocks);
1878 }
1879
1880 for (dl = ddf->dlist; dl ; dl = dl->next)
1881 if (dl->disk.refnum == conf->phys_refnum[cd])
1882 break;
1883
1884 info->disk.major = 0;
1885 info->disk.minor = 0;
1886 info->disk.state = 0;
1887 if (dl) {
1888 info->disk.major = dl->major;
1889 info->disk.minor = dl->minor;
1890 info->disk.raid_disk = cd + conf->sec_elmnt_seq
1891 * __be16_to_cpu(conf->prim_elmnt_count);
1892 info->disk.number = dl->pdnum;
1893 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1894 }
1895
1896 info->container_member = ddf->currentconf->vcnum;
1897
1898 info->recovery_start = MaxSector;
1899 info->resync_start = 0;
1900 info->reshape_active = 0;
1901 info->recovery_blocked = 0;
1902 if (!(ddf->virt->entries[info->container_member].state
1903 & DDF_state_inconsistent) &&
1904 (ddf->virt->entries[info->container_member].init_state
1905 & DDF_initstate_mask)
1906 == DDF_init_full)
1907 info->resync_start = MaxSector;
1908
1909 uuid_from_super_ddf(st, info->uuid);
1910
1911 info->array.major_version = -1;
1912 info->array.minor_version = -2;
1913 sprintf(info->text_version, "/%s/%d",
1914 st->container_devnm,
1915 info->container_member);
1916 info->safe_mode_delay = 200;
1917
1918 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1919 info->name[16]=0;
1920 for(j=0; j<16; j++)
1921 if (info->name[j] == ' ')
1922 info->name[j] = 0;
1923
1924 if (map)
1925 for (j = 0; j < map_disks; j++) {
1926 map[j] = 0;
1927 if (j < info->array.raid_disks) {
1928 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1929 if (i >= 0 &&
1930 (__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Online) &&
1931 !(__be16_to_cpu(ddf->phys->entries[i].state) & DDF_Failed))
1932 map[i] = 1;
1933 }
1934 }
1935 }
1936
1937 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
1938 char *update,
1939 char *devname, int verbose,
1940 int uuid_set, char *homehost)
1941 {
1942 /* For 'assemble' and 'force' we need to return non-zero if any
1943 * change was made. For others, the return value is ignored.
1944 * Update options are:
1945 * force-one : This device looks a bit old but needs to be included,
1946 * update age info appropriately.
1947 * assemble: clear any 'faulty' flag to allow this device to
1948 * be assembled.
1949 * force-array: Array is degraded but being forced, mark it clean
1950 * if that will be needed to assemble it.
1951 *
1952 * newdev: not used ????
1953 * grow: Array has gained a new device - this is currently for
1954 * linear only
1955 * resync: mark as dirty so a resync will happen.
1956 * uuid: Change the uuid of the array to match what is given
1957 * homehost: update the recorded homehost
1958 * name: update the name - preserving the homehost
1959 * _reshape_progress: record new reshape_progress position.
1960 *
1961 * Following are not relevant for this version:
1962 * sparc2.2 : update from old dodgey metadata
1963 * super-minor: change the preferred_minor number
1964 * summaries: update redundant counters.
1965 */
1966 int rv = 0;
1967 // struct ddf_super *ddf = st->sb;
1968 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
1969 // struct virtual_entry *ve = find_ve(ddf);
1970
1971 /* we don't need to handle "force-*" or "assemble" as
1972 * there is no need to 'trick' the kernel. We the metadata is
1973 * first updated to activate the array, all the implied modifications
1974 * will just happen.
1975 */
1976
1977 if (strcmp(update, "grow") == 0) {
1978 /* FIXME */
1979 } else if (strcmp(update, "resync") == 0) {
1980 // info->resync_checkpoint = 0;
1981 } else if (strcmp(update, "homehost") == 0) {
1982 /* homehost is stored in controller->vendor_data,
1983 * or it is when we are the vendor
1984 */
1985 // if (info->vendor_is_local)
1986 // strcpy(ddf->controller.vendor_data, homehost);
1987 rv = -1;
1988 } else if (strcmp(update, "name") == 0) {
1989 /* name is stored in virtual_entry->name */
1990 // memset(ve->name, ' ', 16);
1991 // strncpy(ve->name, info->name, 16);
1992 rv = -1;
1993 } else if (strcmp(update, "_reshape_progress") == 0) {
1994 /* We don't support reshape yet */
1995 } else if (strcmp(update, "assemble") == 0 ) {
1996 /* Do nothing, just succeed */
1997 rv = 0;
1998 } else
1999 rv = -1;
2000
2001 // update_all_csum(ddf);
2002
2003 return rv;
2004 }
2005
2006 static void make_header_guid(char *guid)
2007 {
2008 __u32 stamp;
2009 /* Create a DDF Header of Virtual Disk GUID */
2010
2011 /* 24 bytes of fiction required.
2012 * first 8 are a 'vendor-id' - "Linux-MD"
2013 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2014 * Remaining 8 random number plus timestamp
2015 */
2016 memcpy(guid, T10, sizeof(T10));
2017 stamp = __cpu_to_be32(0xdeadbeef);
2018 memcpy(guid+8, &stamp, 4);
2019 stamp = __cpu_to_be32(0);
2020 memcpy(guid+12, &stamp, 4);
2021 stamp = __cpu_to_be32(time(0) - DECADE);
2022 memcpy(guid+16, &stamp, 4);
2023 stamp = random32();
2024 memcpy(guid+20, &stamp, 4);
2025 }
2026
2027 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2028 {
2029 unsigned int i;
2030 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
2031 if (all_ff(ddf->virt->entries[i].guid))
2032 return i;
2033 }
2034 return DDF_NOTFOUND;
2035 }
2036
2037 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2038 const char *name)
2039 {
2040 unsigned int i;
2041 if (name == NULL)
2042 return DDF_NOTFOUND;
2043 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++) {
2044 if (all_ff(ddf->virt->entries[i].guid))
2045 continue;
2046 if (!strncmp(name, ddf->virt->entries[i].name,
2047 sizeof(ddf->virt->entries[i].name)))
2048 return i;
2049 }
2050 return DDF_NOTFOUND;
2051 }
2052
2053 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2054 const char *guid)
2055 {
2056 unsigned int i;
2057 if (guid == NULL || all_ff(guid))
2058 return DDF_NOTFOUND;
2059 for (i = 0; i < __be16_to_cpu(ddf->virt->max_vdes); i++)
2060 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2061 return i;
2062 return DDF_NOTFOUND;
2063 }
2064
2065 static int init_super_ddf_bvd(struct supertype *st,
2066 mdu_array_info_t *info,
2067 unsigned long long size,
2068 char *name, char *homehost,
2069 int *uuid, unsigned long long data_offset);
2070
2071 static int init_super_ddf(struct supertype *st,
2072 mdu_array_info_t *info,
2073 unsigned long long size, char *name, char *homehost,
2074 int *uuid, unsigned long long data_offset)
2075 {
2076 /* This is primarily called by Create when creating a new array.
2077 * We will then get add_to_super called for each component, and then
2078 * write_init_super called to write it out to each device.
2079 * For DDF, Create can create on fresh devices or on a pre-existing
2080 * array.
2081 * To create on a pre-existing array a different method will be called.
2082 * This one is just for fresh drives.
2083 *
2084 * We need to create the entire 'ddf' structure which includes:
2085 * DDF headers - these are easy.
2086 * Controller data - a Sector describing this controller .. not that
2087 * this is a controller exactly.
2088 * Physical Disk Record - one entry per device, so
2089 * leave plenty of space.
2090 * Virtual Disk Records - again, just leave plenty of space.
2091 * This just lists VDs, doesn't give details
2092 * Config records - describes the VDs that use this disk
2093 * DiskData - describes 'this' device.
2094 * BadBlockManagement - empty
2095 * Diag Space - empty
2096 * Vendor Logs - Could we put bitmaps here?
2097 *
2098 */
2099 struct ddf_super *ddf;
2100 char hostname[17];
2101 int hostlen;
2102 int max_phys_disks, max_virt_disks;
2103 unsigned long long sector;
2104 int clen;
2105 int i;
2106 int pdsize, vdsize;
2107 struct phys_disk *pd;
2108 struct virtual_disk *vd;
2109
2110 if (data_offset != INVALID_SECTORS) {
2111 pr_err("data-offset not supported by DDF\n");
2112 return 0;
2113 }
2114
2115 if (st->sb)
2116 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2117 data_offset);
2118
2119 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2120 pr_err("%s could not allocate superblock\n", __func__);
2121 return 0;
2122 }
2123 memset(ddf, 0, sizeof(*ddf));
2124 ddf->dlist = NULL; /* no physical disks yet */
2125 ddf->conflist = NULL; /* No virtual disks yet */
2126 st->sb = ddf;
2127
2128 if (info == NULL) {
2129 /* zeroing superblock */
2130 return 0;
2131 }
2132
2133 /* At least 32MB *must* be reserved for the ddf. So let's just
2134 * start 32MB from the end, and put the primary header there.
2135 * Don't do secondary for now.
2136 * We don't know exactly where that will be yet as it could be
2137 * different on each device. To just set up the lengths.
2138 *
2139 */
2140
2141 ddf->anchor.magic = DDF_HEADER_MAGIC;
2142 make_header_guid(ddf->anchor.guid);
2143
2144 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2145 ddf->anchor.seq = __cpu_to_be32(1);
2146 ddf->anchor.timestamp = __cpu_to_be32(time(0) - DECADE);
2147 ddf->anchor.openflag = 0xFF;
2148 ddf->anchor.foreignflag = 0;
2149 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2150 ddf->anchor.pad0 = 0xff;
2151 memset(ddf->anchor.pad1, 0xff, 12);
2152 memset(ddf->anchor.header_ext, 0xff, 32);
2153 ddf->anchor.primary_lba = ~(__u64)0;
2154 ddf->anchor.secondary_lba = ~(__u64)0;
2155 ddf->anchor.type = DDF_HEADER_ANCHOR;
2156 memset(ddf->anchor.pad2, 0xff, 3);
2157 ddf->anchor.workspace_len = __cpu_to_be32(32768); /* Must be reserved */
2158 ddf->anchor.workspace_lba = ~(__u64)0; /* Put this at bottom
2159 of 32M reserved.. */
2160 max_phys_disks = 1023; /* Should be enough */
2161 ddf->anchor.max_pd_entries = __cpu_to_be16(max_phys_disks);
2162 max_virt_disks = 255;
2163 ddf->anchor.max_vd_entries = __cpu_to_be16(max_virt_disks); /* ?? */
2164 ddf->anchor.max_partitions = __cpu_to_be16(64); /* ?? */
2165 ddf->max_part = 64;
2166 ddf->mppe = 256;
2167 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2168 ddf->anchor.config_record_len = __cpu_to_be16(ddf->conf_rec_len);
2169 ddf->anchor.max_primary_element_entries = __cpu_to_be16(ddf->mppe);
2170 memset(ddf->anchor.pad3, 0xff, 54);
2171 /* controller sections is one sector long immediately
2172 * after the ddf header */
2173 sector = 1;
2174 ddf->anchor.controller_section_offset = __cpu_to_be32(sector);
2175 ddf->anchor.controller_section_length = __cpu_to_be32(1);
2176 sector += 1;
2177
2178 /* phys is 8 sectors after that */
2179 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2180 sizeof(struct phys_disk_entry)*max_phys_disks,
2181 512);
2182 switch(pdsize/512) {
2183 case 2: case 8: case 32: case 128: case 512: break;
2184 default: abort();
2185 }
2186 ddf->anchor.phys_section_offset = __cpu_to_be32(sector);
2187 ddf->anchor.phys_section_length =
2188 __cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2189 sector += pdsize/512;
2190
2191 /* virt is another 32 sectors */
2192 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2193 sizeof(struct virtual_entry) * max_virt_disks,
2194 512);
2195 switch(vdsize/512) {
2196 case 2: case 8: case 32: case 128: case 512: break;
2197 default: abort();
2198 }
2199 ddf->anchor.virt_section_offset = __cpu_to_be32(sector);
2200 ddf->anchor.virt_section_length =
2201 __cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2202 sector += vdsize/512;
2203
2204 clen = ddf->conf_rec_len * (ddf->max_part+1);
2205 ddf->anchor.config_section_offset = __cpu_to_be32(sector);
2206 ddf->anchor.config_section_length = __cpu_to_be32(clen);
2207 sector += clen;
2208
2209 ddf->anchor.data_section_offset = __cpu_to_be32(sector);
2210 ddf->anchor.data_section_length = __cpu_to_be32(1);
2211 sector += 1;
2212
2213 ddf->anchor.bbm_section_length = __cpu_to_be32(0);
2214 ddf->anchor.bbm_section_offset = __cpu_to_be32(0xFFFFFFFF);
2215 ddf->anchor.diag_space_length = __cpu_to_be32(0);
2216 ddf->anchor.diag_space_offset = __cpu_to_be32(0xFFFFFFFF);
2217 ddf->anchor.vendor_length = __cpu_to_be32(0);
2218 ddf->anchor.vendor_offset = __cpu_to_be32(0xFFFFFFFF);
2219
2220 memset(ddf->anchor.pad4, 0xff, 256);
2221
2222 memcpy(&ddf->primary, &ddf->anchor, 512);
2223 memcpy(&ddf->secondary, &ddf->anchor, 512);
2224
2225 ddf->primary.openflag = 1; /* I guess.. */
2226 ddf->primary.type = DDF_HEADER_PRIMARY;
2227
2228 ddf->secondary.openflag = 1; /* I guess.. */
2229 ddf->secondary.type = DDF_HEADER_SECONDARY;
2230
2231 ddf->active = &ddf->primary;
2232
2233 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2234
2235 /* 24 more bytes of fiction required.
2236 * first 8 are a 'vendor-id' - "Linux-MD"
2237 * Remaining 16 are serial number.... maybe a hostname would do?
2238 */
2239 memcpy(ddf->controller.guid, T10, sizeof(T10));
2240 gethostname(hostname, sizeof(hostname));
2241 hostname[sizeof(hostname) - 1] = 0;
2242 hostlen = strlen(hostname);
2243 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2244 for (i = strlen(T10) ; i+hostlen < 24; i++)
2245 ddf->controller.guid[i] = ' ';
2246
2247 ddf->controller.type.vendor_id = __cpu_to_be16(0xDEAD);
2248 ddf->controller.type.device_id = __cpu_to_be16(0xBEEF);
2249 ddf->controller.type.sub_vendor_id = 0;
2250 ddf->controller.type.sub_device_id = 0;
2251 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2252 memset(ddf->controller.pad, 0xff, 8);
2253 memset(ddf->controller.vendor_data, 0xff, 448);
2254 if (homehost && strlen(homehost) < 440)
2255 strcpy((char*)ddf->controller.vendor_data, homehost);
2256
2257 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2258 pr_err("%s could not allocate pd\n", __func__);
2259 return 0;
2260 }
2261 ddf->phys = pd;
2262 ddf->pdsize = pdsize;
2263
2264 memset(pd, 0xff, pdsize);
2265 memset(pd, 0, sizeof(*pd));
2266 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2267 pd->used_pdes = __cpu_to_be16(0);
2268 pd->max_pdes = __cpu_to_be16(max_phys_disks);
2269 memset(pd->pad, 0xff, 52);
2270 for (i = 0; i < max_phys_disks; i++)
2271 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2272
2273 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2274 pr_err("%s could not allocate vd\n", __func__);
2275 return 0;
2276 }
2277 ddf->virt = vd;
2278 ddf->vdsize = vdsize;
2279 memset(vd, 0, vdsize);
2280 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2281 vd->populated_vdes = __cpu_to_be16(0);
2282 vd->max_vdes = __cpu_to_be16(max_virt_disks);
2283 memset(vd->pad, 0xff, 52);
2284
2285 for (i=0; i<max_virt_disks; i++)
2286 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2287
2288 st->sb = ddf;
2289 ddf_set_updates_pending(ddf);
2290 return 1;
2291 }
2292
2293 static int chunk_to_shift(int chunksize)
2294 {
2295 return ffs(chunksize/512)-1;
2296 }
2297
2298 #ifndef MDASSEMBLE
2299 struct extent {
2300 unsigned long long start, size;
2301 };
2302 static int cmp_extent(const void *av, const void *bv)
2303 {
2304 const struct extent *a = av;
2305 const struct extent *b = bv;
2306 if (a->start < b->start)
2307 return -1;
2308 if (a->start > b->start)
2309 return 1;
2310 return 0;
2311 }
2312
2313 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2314 {
2315 /* find a list of used extents on the give physical device
2316 * (dnum) of the given ddf.
2317 * Return a malloced array of 'struct extent'
2318
2319 * FIXME ignore DDF_Legacy devices?
2320
2321 */
2322 struct extent *rv;
2323 int n = 0;
2324 unsigned int i;
2325
2326 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2327
2328 for (i = 0; i < ddf->max_part; i++) {
2329 const struct vd_config *bvd;
2330 unsigned int ibvd;
2331 struct vcl *v = dl->vlist[i];
2332 if (v == NULL ||
2333 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2334 &bvd, &ibvd) == DDF_NOTFOUND)
2335 continue;
2336 rv[n].start = __be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2337 rv[n].size = __be64_to_cpu(bvd->blocks);
2338 n++;
2339 }
2340 qsort(rv, n, sizeof(*rv), cmp_extent);
2341
2342 rv[n].start = __be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2343 rv[n].size = 0;
2344 return rv;
2345 }
2346 #endif
2347
2348 static int init_super_ddf_bvd(struct supertype *st,
2349 mdu_array_info_t *info,
2350 unsigned long long size,
2351 char *name, char *homehost,
2352 int *uuid, unsigned long long data_offset)
2353 {
2354 /* We are creating a BVD inside a pre-existing container.
2355 * so st->sb is already set.
2356 * We need to create a new vd_config and a new virtual_entry
2357 */
2358 struct ddf_super *ddf = st->sb;
2359 unsigned int venum, i;
2360 struct virtual_entry *ve;
2361 struct vcl *vcl;
2362 struct vd_config *vc;
2363
2364 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2365 pr_err("This ddf already has an array called %s\n", name);
2366 return 0;
2367 }
2368 venum = find_unused_vde(ddf);
2369 if (venum == DDF_NOTFOUND) {
2370 pr_err("Cannot find spare slot for virtual disk\n");
2371 return 0;
2372 }
2373 ve = &ddf->virt->entries[venum];
2374
2375 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2376 * timestamp, random number
2377 */
2378 make_header_guid(ve->guid);
2379 ve->unit = __cpu_to_be16(info->md_minor);
2380 ve->pad0 = 0xFFFF;
2381 ve->guid_crc = crc32(0, (unsigned char*)ddf->anchor.guid, DDF_GUID_LEN);
2382 ve->type = 0;
2383 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2384 if (info->state & 1) /* clean */
2385 ve->init_state = DDF_init_full;
2386 else
2387 ve->init_state = DDF_init_not;
2388
2389 memset(ve->pad1, 0xff, 14);
2390 memset(ve->name, ' ', 16);
2391 if (name)
2392 strncpy(ve->name, name, 16);
2393 ddf->virt->populated_vdes =
2394 __cpu_to_be16(__be16_to_cpu(ddf->virt->populated_vdes)+1);
2395
2396 /* Now create a new vd_config */
2397 if (posix_memalign((void**)&vcl, 512,
2398 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2399 pr_err("%s could not allocate vd_config\n", __func__);
2400 return 0;
2401 }
2402 vcl->vcnum = venum;
2403 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2404 vc = &vcl->conf;
2405
2406 vc->magic = DDF_VD_CONF_MAGIC;
2407 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2408 vc->timestamp = __cpu_to_be32(time(0)-DECADE);
2409 vc->seqnum = __cpu_to_be32(1);
2410 memset(vc->pad0, 0xff, 24);
2411 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2412 if (layout_md2ddf(info, vc) == -1 ||
2413 __be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2414 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2415 __func__, info->level, info->layout, info->raid_disks);
2416 free(vcl);
2417 return 0;
2418 }
2419 vc->sec_elmnt_seq = 0;
2420 if (alloc_other_bvds(ddf, vcl) != 0) {
2421 pr_err("%s could not allocate other bvds\n",
2422 __func__);
2423 free(vcl);
2424 return 0;
2425 }
2426 vc->blocks = __cpu_to_be64(info->size * 2);
2427 vc->array_blocks = __cpu_to_be64(
2428 calc_array_size(info->level, info->raid_disks, info->layout,
2429 info->chunk_size, info->size*2));
2430 memset(vc->pad1, 0xff, 8);
2431 vc->spare_refs[0] = 0xffffffff;
2432 vc->spare_refs[1] = 0xffffffff;
2433 vc->spare_refs[2] = 0xffffffff;
2434 vc->spare_refs[3] = 0xffffffff;
2435 vc->spare_refs[4] = 0xffffffff;
2436 vc->spare_refs[5] = 0xffffffff;
2437 vc->spare_refs[6] = 0xffffffff;
2438 vc->spare_refs[7] = 0xffffffff;
2439 memset(vc->cache_pol, 0, 8);
2440 vc->bg_rate = 0x80;
2441 memset(vc->pad2, 0xff, 3);
2442 memset(vc->pad3, 0xff, 52);
2443 memset(vc->pad4, 0xff, 192);
2444 memset(vc->v0, 0xff, 32);
2445 memset(vc->v1, 0xff, 32);
2446 memset(vc->v2, 0xff, 16);
2447 memset(vc->v3, 0xff, 16);
2448 memset(vc->vendor, 0xff, 32);
2449
2450 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2451 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2452
2453 for (i = 1; i < vc->sec_elmnt_count; i++) {
2454 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2455 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2456 }
2457
2458 vcl->next = ddf->conflist;
2459 ddf->conflist = vcl;
2460 ddf->currentconf = vcl;
2461 ddf_set_updates_pending(ddf);
2462 return 1;
2463 }
2464
2465 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2466
2467 #ifndef MDASSEMBLE
2468 static void add_to_super_ddf_bvd(struct supertype *st,
2469 mdu_disk_info_t *dk, int fd, char *devname)
2470 {
2471 /* fd and devname identify a device with-in the ddf container (st).
2472 * dk identifies a location in the new BVD.
2473 * We need to find suitable free space in that device and update
2474 * the phys_refnum and lba_offset for the newly created vd_config.
2475 * We might also want to update the type in the phys_disk
2476 * section.
2477 *
2478 * Alternately: fd == -1 and we have already chosen which device to
2479 * use and recorded in dlist->raid_disk;
2480 */
2481 struct dl *dl;
2482 struct ddf_super *ddf = st->sb;
2483 struct vd_config *vc;
2484 unsigned int i;
2485 unsigned long long blocks, pos, esize;
2486 struct extent *ex;
2487 unsigned int raid_disk = dk->raid_disk;
2488
2489 if (fd == -1) {
2490 for (dl = ddf->dlist; dl ; dl = dl->next)
2491 if (dl->raiddisk == dk->raid_disk)
2492 break;
2493 } else {
2494 for (dl = ddf->dlist; dl ; dl = dl->next)
2495 if (dl->major == dk->major &&
2496 dl->minor == dk->minor)
2497 break;
2498 }
2499 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2500 return;
2501
2502 vc = &ddf->currentconf->conf;
2503 if (vc->sec_elmnt_count > 1) {
2504 unsigned int n = __be16_to_cpu(vc->prim_elmnt_count);
2505 if (raid_disk >= n)
2506 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2507 raid_disk %= n;
2508 }
2509
2510 ex = get_extents(ddf, dl);
2511 if (!ex)
2512 return;
2513
2514 i = 0; pos = 0;
2515 blocks = __be64_to_cpu(vc->blocks);
2516 if (ddf->currentconf->block_sizes)
2517 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2518
2519 do {
2520 esize = ex[i].start - pos;
2521 if (esize >= blocks)
2522 break;
2523 pos = ex[i].start + ex[i].size;
2524 i++;
2525 } while (ex[i-1].size);
2526
2527 free(ex);
2528 if (esize < blocks)
2529 return;
2530
2531 ddf->currentdev = dk->raid_disk;
2532 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2533 LBA_OFFSET(ddf, vc)[raid_disk] = __cpu_to_be64(pos);
2534
2535 for (i = 0; i < ddf->max_part ; i++)
2536 if (dl->vlist[i] == NULL)
2537 break;
2538 if (i == ddf->max_part)
2539 return;
2540 dl->vlist[i] = ddf->currentconf;
2541
2542 if (fd >= 0)
2543 dl->fd = fd;
2544 if (devname)
2545 dl->devname = devname;
2546
2547 /* Check if we can mark array as optimal yet */
2548 i = ddf->currentconf->vcnum;
2549 ddf->virt->entries[i].state =
2550 (ddf->virt->entries[i].state & ~DDF_state_mask)
2551 | get_svd_state(ddf, ddf->currentconf);
2552 ddf->phys->entries[dl->pdnum].type &= ~__cpu_to_be16(DDF_Global_Spare);
2553 ddf->phys->entries[dl->pdnum].type |= __cpu_to_be16(DDF_Active_in_VD);
2554 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2555 __func__, dl->pdnum, __be32_to_cpu(dl->disk.refnum),
2556 ddf->currentconf->vcnum, guid_str(vc->guid),
2557 dk->raid_disk);
2558 ddf_set_updates_pending(ddf);
2559 }
2560
2561 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2562 {
2563 unsigned int i;
2564 for (i = 0; i < __be16_to_cpu(ddf->phys->max_pdes); i++) {
2565 if (all_ff(ddf->phys->entries[i].guid))
2566 return i;
2567 }
2568 return DDF_NOTFOUND;
2569 }
2570
2571 /* add a device to a container, either while creating it or while
2572 * expanding a pre-existing container
2573 */
2574 static int add_to_super_ddf(struct supertype *st,
2575 mdu_disk_info_t *dk, int fd, char *devname,
2576 unsigned long long data_offset)
2577 {
2578 struct ddf_super *ddf = st->sb;
2579 struct dl *dd;
2580 time_t now;
2581 struct tm *tm;
2582 unsigned long long size;
2583 struct phys_disk_entry *pde;
2584 unsigned int n, i;
2585 struct stat stb;
2586 __u32 *tptr;
2587
2588 if (ddf->currentconf) {
2589 add_to_super_ddf_bvd(st, dk, fd, devname);
2590 return 0;
2591 }
2592
2593 /* This is device numbered dk->number. We need to create
2594 * a phys_disk entry and a more detailed disk_data entry.
2595 */
2596 fstat(fd, &stb);
2597 n = find_unused_pde(ddf);
2598 if (n == DDF_NOTFOUND) {
2599 pr_err("%s: No free slot in array, cannot add disk\n",
2600 __func__);
2601 return 1;
2602 }
2603 pde = &ddf->phys->entries[n];
2604 get_dev_size(fd, NULL, &size);
2605 if (size <= 32*1024*1024) {
2606 pr_err("%s: device size must be at least 32MB\n",
2607 __func__);
2608 return 1;
2609 }
2610 size >>= 9;
2611
2612 if (posix_memalign((void**)&dd, 512,
2613 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2614 pr_err("%s could allocate buffer for new disk, aborting\n",
2615 __func__);
2616 return 1;
2617 }
2618 dd->major = major(stb.st_rdev);
2619 dd->minor = minor(stb.st_rdev);
2620 dd->devname = devname;
2621 dd->fd = fd;
2622 dd->spare = NULL;
2623
2624 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2625 now = time(0);
2626 tm = localtime(&now);
2627 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2628 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2629 tptr = (__u32 *)(dd->disk.guid + 16);
2630 *tptr++ = random32();
2631 *tptr = random32();
2632
2633 do {
2634 /* Cannot be bothered finding a CRC of some irrelevant details*/
2635 dd->disk.refnum = random32();
2636 for (i = __be16_to_cpu(ddf->active->max_pd_entries);
2637 i > 0; i--)
2638 if (ddf->phys->entries[i-1].refnum == dd->disk.refnum)
2639 break;
2640 } while (i > 0);
2641
2642 dd->disk.forced_ref = 1;
2643 dd->disk.forced_guid = 1;
2644 memset(dd->disk.vendor, ' ', 32);
2645 memcpy(dd->disk.vendor, "Linux", 5);
2646 memset(dd->disk.pad, 0xff, 442);
2647 for (i = 0; i < ddf->max_part ; i++)
2648 dd->vlist[i] = NULL;
2649
2650 dd->pdnum = n;
2651
2652 if (st->update_tail) {
2653 int len = (sizeof(struct phys_disk) +
2654 sizeof(struct phys_disk_entry));
2655 struct phys_disk *pd;
2656
2657 pd = xmalloc(len);
2658 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2659 pd->used_pdes = __cpu_to_be16(n);
2660 pde = &pd->entries[0];
2661 dd->mdupdate = pd;
2662 } else
2663 ddf->phys->used_pdes = __cpu_to_be16(
2664 1 + __be16_to_cpu(ddf->phys->used_pdes));
2665
2666 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2667 pde->refnum = dd->disk.refnum;
2668 pde->type = __cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2669 pde->state = __cpu_to_be16(DDF_Online);
2670 dd->size = size;
2671 /*
2672 * If there is already a device in dlist, try to reserve the same
2673 * amount of workspace. Otherwise, use 32MB.
2674 * We checked disk size above already.
2675 */
2676 #define __calc_lba(new, old, lba, mb) do { \
2677 unsigned long long dif; \
2678 if ((old) != NULL) \
2679 dif = (old)->size - __be64_to_cpu((old)->lba); \
2680 else \
2681 dif = (new)->size; \
2682 if ((new)->size > dif) \
2683 (new)->lba = __cpu_to_be64((new)->size - dif); \
2684 else \
2685 (new)->lba = __cpu_to_be64((new)->size - (mb*1024*2)); \
2686 } while (0)
2687 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2688 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2689 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2690 pde->config_size = dd->workspace_lba;
2691
2692 sprintf(pde->path, "%17.17s","Information: nil") ;
2693 memset(pde->pad, 0xff, 6);
2694
2695 if (st->update_tail) {
2696 dd->next = ddf->add_list;
2697 ddf->add_list = dd;
2698 } else {
2699 dd->next = ddf->dlist;
2700 ddf->dlist = dd;
2701 ddf_set_updates_pending(ddf);
2702 }
2703
2704 return 0;
2705 }
2706
2707 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2708 {
2709 struct ddf_super *ddf = st->sb;
2710 struct dl *dl;
2711
2712 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2713 * disappeared from the container.
2714 * We need to arrange that it disappears from the metadata and
2715 * internal data structures too.
2716 * Most of the work is done by ddf_process_update which edits
2717 * the metadata and closes the file handle and attaches the memory
2718 * where free_updates will free it.
2719 */
2720 for (dl = ddf->dlist; dl ; dl = dl->next)
2721 if (dl->major == dk->major &&
2722 dl->minor == dk->minor)
2723 break;
2724 if (!dl)
2725 return -1;
2726
2727 if (st->update_tail) {
2728 int len = (sizeof(struct phys_disk) +
2729 sizeof(struct phys_disk_entry));
2730 struct phys_disk *pd;
2731
2732 pd = xmalloc(len);
2733 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2734 pd->used_pdes = __cpu_to_be16(dl->pdnum);
2735 pd->entries[0].state = __cpu_to_be16(DDF_Missing);
2736 append_metadata_update(st, pd, len);
2737 }
2738 return 0;
2739 }
2740
2741 /*
2742 * This is the write_init_super method for a ddf container. It is
2743 * called when creating a container or adding another device to a
2744 * container.
2745 */
2746 #define NULL_CONF_SZ 4096
2747
2748 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type,
2749 char *null_aligned)
2750 {
2751 unsigned long long sector;
2752 struct ddf_header *header;
2753 int fd, i, n_config, conf_size;
2754 int ret = 0;
2755
2756 fd = d->fd;
2757
2758 switch (type) {
2759 case DDF_HEADER_PRIMARY:
2760 header = &ddf->primary;
2761 sector = __be64_to_cpu(header->primary_lba);
2762 break;
2763 case DDF_HEADER_SECONDARY:
2764 header = &ddf->secondary;
2765 sector = __be64_to_cpu(header->secondary_lba);
2766 break;
2767 default:
2768 return 0;
2769 }
2770
2771 header->type = type;
2772 header->openflag = 1;
2773 header->crc = calc_crc(header, 512);
2774
2775 lseek64(fd, sector<<9, 0);
2776 if (write(fd, header, 512) < 0)
2777 goto out;
2778
2779 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2780 if (write(fd, &ddf->controller, 512) < 0)
2781 goto out;
2782
2783 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2784 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2785 goto out;
2786 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2787 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2788 goto out;
2789
2790 /* Now write lots of config records. */
2791 n_config = ddf->max_part;
2792 conf_size = ddf->conf_rec_len * 512;
2793 for (i = 0 ; i <= n_config ; i++) {
2794 struct vcl *c;
2795 struct vd_config *vdc = NULL;
2796 if (i == n_config) {
2797 c = (struct vcl *)d->spare;
2798 if (c)
2799 vdc = &c->conf;
2800 } else {
2801 unsigned int dummy;
2802 c = d->vlist[i];
2803 if (c)
2804 get_pd_index_from_refnum(
2805 c, d->disk.refnum,
2806 ddf->mppe,
2807 (const struct vd_config **)&vdc,
2808 &dummy);
2809 }
2810 if (c) {
2811 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2812 i, __be32_to_cpu(d->disk.refnum),
2813 guid_str(vdc->guid),
2814 vdc->sec_elmnt_seq);
2815 vdc->seqnum = header->seq;
2816 vdc->crc = calc_crc(vdc, conf_size);
2817 if (write(fd, vdc, conf_size) < 0)
2818 break;
2819 } else {
2820 unsigned int togo = conf_size;
2821 while (togo > NULL_CONF_SZ) {
2822 if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
2823 break;
2824 togo -= NULL_CONF_SZ;
2825 }
2826 if (write(fd, null_aligned, togo) < 0)
2827 break;
2828 }
2829 }
2830 if (i <= n_config)
2831 goto out;
2832
2833 d->disk.crc = calc_crc(&d->disk, 512);
2834 if (write(fd, &d->disk, 512) < 0)
2835 goto out;
2836
2837 ret = 1;
2838 out:
2839 header->openflag = 0;
2840 header->crc = calc_crc(header, 512);
2841
2842 lseek64(fd, sector<<9, 0);
2843 if (write(fd, header, 512) < 0)
2844 ret = 0;
2845
2846 return ret;
2847 }
2848
2849 static int __write_init_super_ddf(struct supertype *st)
2850 {
2851 struct ddf_super *ddf = st->sb;
2852 struct dl *d;
2853 int attempts = 0;
2854 int successes = 0;
2855 unsigned long long size;
2856 char *null_aligned;
2857 __u32 seq;
2858
2859 pr_state(ddf, __func__);
2860 if (posix_memalign((void**)&null_aligned, 4096, NULL_CONF_SZ) != 0) {
2861 return -ENOMEM;
2862 }
2863 memset(null_aligned, 0xff, NULL_CONF_SZ);
2864
2865 seq = ddf->active->seq + 1;
2866
2867 /* try to write updated metadata,
2868 * if we catch a failure move on to the next disk
2869 */
2870 for (d = ddf->dlist; d; d=d->next) {
2871 int fd = d->fd;
2872
2873 if (fd < 0)
2874 continue;
2875
2876 attempts++;
2877 /* We need to fill in the primary, (secondary) and workspace
2878 * lba's in the headers, set their checksums,
2879 * Also checksum phys, virt....
2880 *
2881 * Then write everything out, finally the anchor is written.
2882 */
2883 get_dev_size(fd, NULL, &size);
2884 size /= 512;
2885 if (d->workspace_lba != 0)
2886 ddf->anchor.workspace_lba = d->workspace_lba;
2887 else
2888 ddf->anchor.workspace_lba =
2889 __cpu_to_be64(size - 32*1024*2);
2890 if (d->primary_lba != 0)
2891 ddf->anchor.primary_lba = d->primary_lba;
2892 else
2893 ddf->anchor.primary_lba =
2894 __cpu_to_be64(size - 16*1024*2);
2895 if (d->secondary_lba != 0)
2896 ddf->anchor.secondary_lba = d->secondary_lba;
2897 else
2898 ddf->anchor.secondary_lba =
2899 __cpu_to_be64(size - 32*1024*2);
2900 ddf->anchor.seq = seq;
2901 memcpy(&ddf->primary, &ddf->anchor, 512);
2902 memcpy(&ddf->secondary, &ddf->anchor, 512);
2903
2904 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2905 ddf->anchor.seq = 0xFFFFFFFF; /* no sequencing in anchor */
2906 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2907
2908 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY,
2909 null_aligned))
2910 continue;
2911
2912 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY,
2913 null_aligned))
2914 continue;
2915
2916 lseek64(fd, (size-1)*512, SEEK_SET);
2917 if (write(fd, &ddf->anchor, 512) < 0)
2918 continue;
2919 successes++;
2920 }
2921 free(null_aligned);
2922
2923 return attempts != successes;
2924 }
2925
2926 static int write_init_super_ddf(struct supertype *st)
2927 {
2928 struct ddf_super *ddf = st->sb;
2929 struct vcl *currentconf = ddf->currentconf;
2930
2931 /* we are done with currentconf reset it to point st at the container */
2932 ddf->currentconf = NULL;
2933
2934 if (st->update_tail) {
2935 /* queue the virtual_disk and vd_config as metadata updates */
2936 struct virtual_disk *vd;
2937 struct vd_config *vc;
2938 int len;
2939
2940 if (!currentconf) {
2941 int len = (sizeof(struct phys_disk) +
2942 sizeof(struct phys_disk_entry));
2943
2944 /* adding a disk to the container. */
2945 if (!ddf->add_list)
2946 return 0;
2947
2948 append_metadata_update(st, ddf->add_list->mdupdate, len);
2949 ddf->add_list->mdupdate = NULL;
2950 return 0;
2951 }
2952
2953 /* Newly created VD */
2954
2955 /* First the virtual disk. We have a slightly fake header */
2956 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
2957 vd = xmalloc(len);
2958 *vd = *ddf->virt;
2959 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
2960 vd->populated_vdes = __cpu_to_be16(currentconf->vcnum);
2961 append_metadata_update(st, vd, len);
2962
2963 /* Then the vd_config */
2964 len = ddf->conf_rec_len * 512;
2965 vc = xmalloc(len);
2966 memcpy(vc, &currentconf->conf, len);
2967 append_metadata_update(st, vc, len);
2968
2969 /* FIXME I need to close the fds! */
2970 return 0;
2971 } else {
2972 struct dl *d;
2973 if (!currentconf)
2974 for (d = ddf->dlist; d; d=d->next)
2975 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
2976 return __write_init_super_ddf(st);
2977 }
2978 }
2979
2980 #endif
2981
2982 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
2983 unsigned long long data_offset)
2984 {
2985 /* We must reserve the last 32Meg */
2986 if (devsize <= 32*1024*2)
2987 return 0;
2988 return devsize - 32*1024*2;
2989 }
2990
2991 #ifndef MDASSEMBLE
2992
2993 static int reserve_space(struct supertype *st, int raiddisks,
2994 unsigned long long size, int chunk,
2995 unsigned long long *freesize)
2996 {
2997 /* Find 'raiddisks' spare extents at least 'size' big (but
2998 * only caring about multiples of 'chunk') and remember
2999 * them.
3000 * If the cannot be found, fail.
3001 */
3002 struct dl *dl;
3003 struct ddf_super *ddf = st->sb;
3004 int cnt = 0;
3005
3006 for (dl = ddf->dlist; dl ; dl=dl->next) {
3007 dl->raiddisk = -1;
3008 dl->esize = 0;
3009 }
3010 /* Now find largest extent on each device */
3011 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3012 struct extent *e = get_extents(ddf, dl);
3013 unsigned long long pos = 0;
3014 int i = 0;
3015 int found = 0;
3016 unsigned long long minsize = size;
3017
3018 if (size == 0)
3019 minsize = chunk;
3020
3021 if (!e)
3022 continue;
3023 do {
3024 unsigned long long esize;
3025 esize = e[i].start - pos;
3026 if (esize >= minsize) {
3027 found = 1;
3028 minsize = esize;
3029 }
3030 pos = e[i].start + e[i].size;
3031 i++;
3032 } while (e[i-1].size);
3033 if (found) {
3034 cnt++;
3035 dl->esize = minsize;
3036 }
3037 free(e);
3038 }
3039 if (cnt < raiddisks) {
3040 pr_err("not enough devices with space to create array.\n");
3041 return 0; /* No enough free spaces large enough */
3042 }
3043 if (size == 0) {
3044 /* choose the largest size of which there are at least 'raiddisk' */
3045 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3046 struct dl *dl2;
3047 if (dl->esize <= size)
3048 continue;
3049 /* This is bigger than 'size', see if there are enough */
3050 cnt = 0;
3051 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3052 if (dl2->esize >= dl->esize)
3053 cnt++;
3054 if (cnt >= raiddisks)
3055 size = dl->esize;
3056 }
3057 if (chunk) {
3058 size = size / chunk;
3059 size *= chunk;
3060 }
3061 *freesize = size;
3062 if (size < 32) {
3063 pr_err("not enough spare devices to create array.\n");
3064 return 0;
3065 }
3066 }
3067 /* We have a 'size' of which there are enough spaces.
3068 * We simply do a first-fit */
3069 cnt = 0;
3070 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3071 if (dl->esize < size)
3072 continue;
3073
3074 dl->raiddisk = cnt;
3075 cnt++;
3076 }
3077 return 1;
3078 }
3079
3080 static int
3081 validate_geometry_ddf_container(struct supertype *st,
3082 int level, int layout, int raiddisks,
3083 int chunk, unsigned long long size,
3084 unsigned long long data_offset,
3085 char *dev, unsigned long long *freesize,
3086 int verbose);
3087
3088 static int validate_geometry_ddf_bvd(struct supertype *st,
3089 int level, int layout, int raiddisks,
3090 int *chunk, unsigned long long size,
3091 unsigned long long data_offset,
3092 char *dev, unsigned long long *freesize,
3093 int verbose);
3094
3095 static int validate_geometry_ddf(struct supertype *st,
3096 int level, int layout, int raiddisks,
3097 int *chunk, unsigned long long size,
3098 unsigned long long data_offset,
3099 char *dev, unsigned long long *freesize,
3100 int verbose)
3101 {
3102 int fd;
3103 struct mdinfo *sra;
3104 int cfd;
3105
3106 /* ddf potentially supports lots of things, but it depends on
3107 * what devices are offered (and maybe kernel version?)
3108 * If given unused devices, we will make a container.
3109 * If given devices in a container, we will make a BVD.
3110 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3111 */
3112
3113 if (chunk && *chunk == UnSet)
3114 *chunk = DEFAULT_CHUNK;
3115
3116 if (level == -1000000) level = LEVEL_CONTAINER;
3117 if (level == LEVEL_CONTAINER) {
3118 /* Must be a fresh device to add to a container */
3119 return validate_geometry_ddf_container(st, level, layout,
3120 raiddisks, chunk?*chunk:0,
3121 size, data_offset, dev,
3122 freesize,
3123 verbose);
3124 }
3125
3126 if (!dev) {
3127 mdu_array_info_t array = {
3128 .level = level, .layout = layout,
3129 .raid_disks = raiddisks
3130 };
3131 struct vd_config conf;
3132 if (layout_md2ddf(&array, &conf) == -1) {
3133 if (verbose)
3134 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3135 level, layout, raiddisks);
3136 return 0;
3137 }
3138 /* Should check layout? etc */
3139
3140 if (st->sb && freesize) {
3141 /* --create was given a container to create in.
3142 * So we need to check that there are enough
3143 * free spaces and return the amount of space.
3144 * We may as well remember which drives were
3145 * chosen so that add_to_super/getinfo_super
3146 * can return them.
3147 */
3148 return reserve_space(st, raiddisks, size, chunk?*chunk:0, freesize);
3149 }
3150 return 1;
3151 }
3152
3153 if (st->sb) {
3154 /* A container has already been opened, so we are
3155 * creating in there. Maybe a BVD, maybe an SVD.
3156 * Should make a distinction one day.
3157 */
3158 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3159 chunk, size, data_offset, dev,
3160 freesize,
3161 verbose);
3162 }
3163 /* This is the first device for the array.
3164 * If it is a container, we read it in and do automagic allocations,
3165 * no other devices should be given.
3166 * Otherwise it must be a member device of a container, and we
3167 * do manual allocation.
3168 * Later we should check for a BVD and make an SVD.
3169 */
3170 fd = open(dev, O_RDONLY|O_EXCL, 0);
3171 if (fd >= 0) {
3172 sra = sysfs_read(fd, NULL, GET_VERSION);
3173 close(fd);
3174 if (sra && sra->array.major_version == -1 &&
3175 strcmp(sra->text_version, "ddf") == 0) {
3176
3177 /* load super */
3178 /* find space for 'n' devices. */
3179 /* remember the devices */
3180 /* Somehow return the fact that we have enough */
3181 }
3182
3183 if (verbose)
3184 pr_err("ddf: Cannot create this array "
3185 "on device %s - a container is required.\n",
3186 dev);
3187 return 0;
3188 }
3189 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3190 if (verbose)
3191 pr_err("ddf: Cannot open %s: %s\n",
3192 dev, strerror(errno));
3193 return 0;
3194 }
3195 /* Well, it is in use by someone, maybe a 'ddf' container. */
3196 cfd = open_container(fd);
3197 if (cfd < 0) {
3198 close(fd);
3199 if (verbose)
3200 pr_err("ddf: Cannot use %s: %s\n",
3201 dev, strerror(EBUSY));
3202 return 0;
3203 }
3204 sra = sysfs_read(cfd, NULL, GET_VERSION);
3205 close(fd);
3206 if (sra && sra->array.major_version == -1 &&
3207 strcmp(sra->text_version, "ddf") == 0) {
3208 /* This is a member of a ddf container. Load the container
3209 * and try to create a bvd
3210 */
3211 struct ddf_super *ddf;
3212 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3213 st->sb = ddf;
3214 strcpy(st->container_devnm, fd2devnm(cfd));
3215 close(cfd);
3216 return validate_geometry_ddf_bvd(st, level, layout,
3217 raiddisks, chunk, size,
3218 data_offset,
3219 dev, freesize,
3220 verbose);
3221 }
3222 close(cfd);
3223 } else /* device may belong to a different container */
3224 return 0;
3225
3226 return 1;
3227 }
3228
3229 static int
3230 validate_geometry_ddf_container(struct supertype *st,
3231 int level, int layout, int raiddisks,
3232 int chunk, unsigned long long size,
3233 unsigned long long data_offset,
3234 char *dev, unsigned long long *freesize,
3235 int verbose)
3236 {
3237 int fd;
3238 unsigned long long ldsize;
3239
3240 if (level != LEVEL_CONTAINER)
3241 return 0;
3242 if (!dev)
3243 return 1;
3244
3245 fd = open(dev, O_RDONLY|O_EXCL, 0);
3246 if (fd < 0) {
3247 if (verbose)
3248 pr_err("ddf: Cannot open %s: %s\n",
3249 dev, strerror(errno));
3250 return 0;
3251 }
3252 if (!get_dev_size(fd, dev, &ldsize)) {
3253 close(fd);
3254 return 0;
3255 }
3256 close(fd);
3257
3258 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3259 if (*freesize == 0)
3260 return 0;
3261
3262 return 1;
3263 }
3264
3265 static int validate_geometry_ddf_bvd(struct supertype *st,
3266 int level, int layout, int raiddisks,
3267 int *chunk, unsigned long long size,
3268 unsigned long long data_offset,
3269 char *dev, unsigned long long *freesize,
3270 int verbose)
3271 {
3272 struct stat stb;
3273 struct ddf_super *ddf = st->sb;
3274 struct dl *dl;
3275 unsigned long long pos = 0;
3276 unsigned long long maxsize;
3277 struct extent *e;
3278 int i;
3279 /* ddf/bvd supports lots of things, but not containers */
3280 if (level == LEVEL_CONTAINER) {
3281 if (verbose)
3282 pr_err("DDF cannot create a container within an container\n");
3283 return 0;
3284 }
3285 /* We must have the container info already read in. */
3286 if (!ddf)
3287 return 0;
3288
3289 if (!dev) {
3290 /* General test: make sure there is space for
3291 * 'raiddisks' device extents of size 'size'.
3292 */
3293 unsigned long long minsize = size;
3294 int dcnt = 0;
3295 if (minsize == 0)
3296 minsize = 8;
3297 for (dl = ddf->dlist; dl ; dl = dl->next)
3298 {
3299 int found = 0;
3300 pos = 0;
3301
3302 i = 0;
3303 e = get_extents(ddf, dl);
3304 if (!e) continue;
3305 do {
3306 unsigned long long esize;
3307 esize = e[i].start - pos;
3308 if (esize >= minsize)
3309 found = 1;
3310 pos = e[i].start + e[i].size;
3311 i++;
3312 } while (e[i-1].size);
3313 if (found)
3314 dcnt++;
3315 free(e);
3316 }
3317 if (dcnt < raiddisks) {
3318 if (verbose)
3319 pr_err("ddf: Not enough devices with "
3320 "space for this array (%d < %d)\n",
3321 dcnt, raiddisks);
3322 return 0;
3323 }
3324 return 1;
3325 }
3326 /* This device must be a member of the set */
3327 if (stat(dev, &stb) < 0)
3328 return 0;
3329 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3330 return 0;
3331 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3332 if (dl->major == (int)major(stb.st_rdev) &&
3333 dl->minor == (int)minor(stb.st_rdev))
3334 break;
3335 }
3336 if (!dl) {
3337 if (verbose)
3338 pr_err("ddf: %s is not in the "
3339 "same DDF set\n",
3340 dev);
3341 return 0;
3342 }
3343 e = get_extents(ddf, dl);
3344 maxsize = 0;
3345 i = 0;
3346 if (e) do {
3347 unsigned long long esize;
3348 esize = e[i].start - pos;
3349 if (esize >= maxsize)
3350 maxsize = esize;
3351 pos = e[i].start + e[i].size;
3352 i++;
3353 } while (e[i-1].size);
3354 *freesize = maxsize;
3355 // FIXME here I am
3356
3357 return 1;
3358 }
3359
3360 static int load_super_ddf_all(struct supertype *st, int fd,
3361 void **sbp, char *devname)
3362 {
3363 struct mdinfo *sra;
3364 struct ddf_super *super;
3365 struct mdinfo *sd, *best = NULL;
3366 int bestseq = 0;
3367 int seq;
3368 char nm[20];
3369 int dfd;
3370
3371 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3372 if (!sra)
3373 return 1;
3374 if (sra->array.major_version != -1 ||
3375 sra->array.minor_version != -2 ||
3376 strcmp(sra->text_version, "ddf") != 0)
3377 return 1;
3378
3379 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3380 return 1;
3381 memset(super, 0, sizeof(*super));
3382
3383 /* first, try each device, and choose the best ddf */
3384 for (sd = sra->devs ; sd ; sd = sd->next) {
3385 int rv;
3386 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3387 dfd = dev_open(nm, O_RDONLY);
3388 if (dfd < 0)
3389 return 2;
3390 rv = load_ddf_headers(dfd, super, NULL);
3391 close(dfd);
3392 if (rv == 0) {
3393 seq = __be32_to_cpu(super->active->seq);
3394 if (super->active->openflag)
3395 seq--;
3396 if (!best || seq > bestseq) {
3397 bestseq = seq;
3398 best = sd;
3399 }
3400 }
3401 }
3402 if (!best)
3403 return 1;
3404 /* OK, load this ddf */
3405 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3406 dfd = dev_open(nm, O_RDONLY);
3407 if (dfd < 0)
3408 return 1;
3409 load_ddf_headers(dfd, super, NULL);
3410 load_ddf_global(dfd, super, NULL);
3411 close(dfd);
3412 /* Now we need the device-local bits */
3413 for (sd = sra->devs ; sd ; sd = sd->next) {
3414 int rv;
3415
3416 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3417 dfd = dev_open(nm, O_RDWR);
3418 if (dfd < 0)
3419 return 2;
3420 rv = load_ddf_headers(dfd, super, NULL);
3421 if (rv == 0)
3422 rv = load_ddf_local(dfd, super, NULL, 1);
3423 if (rv)
3424 return 1;
3425 }
3426
3427 *sbp = super;
3428 if (st->ss == NULL) {
3429 st->ss = &super_ddf;
3430 st->minor_version = 0;
3431 st->max_devs = 512;
3432 }
3433 strcpy(st->container_devnm, fd2devnm(fd));
3434 return 0;
3435 }
3436
3437 static int load_container_ddf(struct supertype *st, int fd,
3438 char *devname)
3439 {
3440 return load_super_ddf_all(st, fd, &st->sb, devname);
3441 }
3442
3443 #endif /* MDASSEMBLE */
3444
3445 static int check_secondary(const struct vcl *vc)
3446 {
3447 const struct vd_config *conf = &vc->conf;
3448 int i;
3449
3450 /* The only DDF secondary RAID level md can support is
3451 * RAID 10, if the stripe sizes and Basic volume sizes
3452 * are all equal.
3453 * Other configurations could in theory be supported by exposing
3454 * the BVDs to user space and using device mapper for the secondary
3455 * mapping. So far we don't support that.
3456 */
3457
3458 __u64 sec_elements[4] = {0, 0, 0, 0};
3459 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3460 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3461
3462 if (vc->other_bvds == NULL) {
3463 pr_err("No BVDs for secondary RAID found\n");
3464 return -1;
3465 }
3466 if (conf->prl != DDF_RAID1) {
3467 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3468 return -1;
3469 }
3470 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3471 pr_err("Secondary RAID level %d is unsupported\n",
3472 conf->srl);
3473 return -1;
3474 }
3475 __set_sec_seen(conf->sec_elmnt_seq);
3476 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3477 const struct vd_config *bvd = vc->other_bvds[i];
3478 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3479 continue;
3480 if (bvd->srl != conf->srl) {
3481 pr_err("Inconsistent secondary RAID level across BVDs\n");
3482 return -1;
3483 }
3484 if (bvd->prl != conf->prl) {
3485 pr_err("Different RAID levels for BVDs are unsupported\n");
3486 return -1;
3487 }
3488 if (bvd->prim_elmnt_count != conf->prim_elmnt_count) {
3489 pr_err("All BVDs must have the same number of primary elements\n");
3490 return -1;
3491 }
3492 if (bvd->chunk_shift != conf->chunk_shift) {
3493 pr_err("Different strip sizes for BVDs are unsupported\n");
3494 return -1;
3495 }
3496 if (bvd->array_blocks != conf->array_blocks) {
3497 pr_err("Different BVD sizes are unsupported\n");
3498 return -1;
3499 }
3500 __set_sec_seen(bvd->sec_elmnt_seq);
3501 }
3502 for (i = 0; i < conf->sec_elmnt_count; i++) {
3503 if (!__was_sec_seen(i)) {
3504 pr_err("BVD %d is missing\n", i);
3505 return -1;
3506 }
3507 }
3508 return 0;
3509 }
3510
3511 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3512 __u32 refnum, unsigned int nmax,
3513 const struct vd_config **bvd,
3514 unsigned int *idx)
3515 {
3516 unsigned int i, j, n, sec, cnt;
3517
3518 cnt = __be16_to_cpu(vc->conf.prim_elmnt_count);
3519 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3520
3521 for (i = 0, j = 0 ; i < nmax ; i++) {
3522 /* j counts valid entries for this BVD */
3523 if (vc->conf.phys_refnum[i] != 0xffffffff)
3524 j++;
3525 if (vc->conf.phys_refnum[i] == refnum) {
3526 *bvd = &vc->conf;
3527 *idx = i;
3528 return sec * cnt + j - 1;
3529 }
3530 }
3531 if (vc->other_bvds == NULL)
3532 goto bad;
3533
3534 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3535 struct vd_config *vd = vc->other_bvds[n-1];
3536 sec = vd->sec_elmnt_seq;
3537 if (sec == DDF_UNUSED_BVD)
3538 continue;
3539 for (i = 0, j = 0 ; i < nmax ; i++) {
3540 if (vd->phys_refnum[i] != 0xffffffff)
3541 j++;
3542 if (vd->phys_refnum[i] == refnum) {
3543 *bvd = vd;
3544 *idx = i;
3545 return sec * cnt + j - 1;
3546 }
3547 }
3548 }
3549 bad:
3550 *bvd = NULL;
3551 return DDF_NOTFOUND;
3552 }
3553
3554 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3555 {
3556 /* Given a container loaded by load_super_ddf_all,
3557 * extract information about all the arrays into
3558 * an mdinfo tree.
3559 *
3560 * For each vcl in conflist: create an mdinfo, fill it in,
3561 * then look for matching devices (phys_refnum) in dlist
3562 * and create appropriate device mdinfo.
3563 */
3564 struct ddf_super *ddf = st->sb;
3565 struct mdinfo *rest = NULL;
3566 struct vcl *vc;
3567
3568 for (vc = ddf->conflist ; vc ; vc=vc->next)
3569 {
3570 unsigned int i;
3571 unsigned int j;
3572 struct mdinfo *this;
3573 char *ep;
3574 __u32 *cptr;
3575 unsigned int pd;
3576
3577 if (subarray &&
3578 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3579 *ep != '\0'))
3580 continue;
3581
3582 if (vc->conf.sec_elmnt_count > 1) {
3583 if (check_secondary(vc) != 0)
3584 continue;
3585 }
3586
3587 this = xcalloc(1, sizeof(*this));
3588 this->next = rest;
3589 rest = this;
3590
3591 if (layout_ddf2md(&vc->conf, &this->array))
3592 continue;
3593 this->array.md_minor = -1;
3594 this->array.major_version = -1;
3595 this->array.minor_version = -2;
3596 cptr = (__u32 *)(vc->conf.guid + 16);
3597 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3598 this->array.utime = DECADE +
3599 __be32_to_cpu(vc->conf.timestamp);
3600 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3601
3602 i = vc->vcnum;
3603 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3604 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3605 DDF_init_full) {
3606 this->array.state = 0;
3607 this->resync_start = 0;
3608 } else {
3609 this->array.state = 1;
3610 this->resync_start = MaxSector;
3611 }
3612 memcpy(this->name, ddf->virt->entries[i].name, 16);
3613 this->name[16]=0;
3614 for(j=0; j<16; j++)
3615 if (this->name[j] == ' ')
3616 this->name[j] = 0;
3617
3618 memset(this->uuid, 0, sizeof(this->uuid));
3619 this->component_size = __be64_to_cpu(vc->conf.blocks);
3620 this->array.size = this->component_size / 2;
3621 this->container_member = i;
3622
3623 ddf->currentconf = vc;
3624 uuid_from_super_ddf(st, this->uuid);
3625 if (!subarray)
3626 ddf->currentconf = NULL;
3627
3628 sprintf(this->text_version, "/%s/%d",
3629 st->container_devnm, this->container_member);
3630
3631 for (pd = 0; pd < __be16_to_cpu(ddf->phys->used_pdes); pd++) {
3632 struct mdinfo *dev;
3633 struct dl *d;
3634 const struct vd_config *bvd;
3635 unsigned int iphys;
3636 int stt;
3637
3638 if (ddf->phys->entries[pd].refnum == 0xFFFFFFFF)
3639 continue;
3640
3641 stt = __be16_to_cpu(ddf->phys->entries[pd].state);
3642 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3643 != DDF_Online)
3644 continue;
3645
3646 i = get_pd_index_from_refnum(
3647 vc, ddf->phys->entries[pd].refnum,
3648 ddf->mppe, &bvd, &iphys);
3649 if (i == DDF_NOTFOUND)
3650 continue;
3651
3652 this->array.working_disks++;
3653
3654 for (d = ddf->dlist; d ; d=d->next)
3655 if (d->disk.refnum ==
3656 ddf->phys->entries[pd].refnum)
3657 break;
3658 if (d == NULL)
3659 /* Haven't found that one yet, maybe there are others */
3660 continue;
3661
3662 dev = xcalloc(1, sizeof(*dev));
3663 dev->next = this->devs;
3664 this->devs = dev;
3665
3666 dev->disk.number = __be32_to_cpu(d->disk.refnum);
3667 dev->disk.major = d->major;
3668 dev->disk.minor = d->minor;
3669 dev->disk.raid_disk = i;
3670 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3671 dev->recovery_start = MaxSector;
3672
3673 dev->events = __be32_to_cpu(ddf->primary.seq);
3674 dev->data_offset =
3675 __be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3676 dev->component_size = __be64_to_cpu(bvd->blocks);
3677 if (d->devname)
3678 strcpy(dev->name, d->devname);
3679 }
3680 }
3681 return rest;
3682 }
3683
3684 static int store_super_ddf(struct supertype *st, int fd)
3685 {
3686 struct ddf_super *ddf = st->sb;
3687 unsigned long long dsize;
3688 void *buf;
3689 int rc;
3690
3691 if (!ddf)
3692 return 1;
3693
3694 if (!get_dev_size(fd, NULL, &dsize))
3695 return 1;
3696
3697 if (ddf->dlist || ddf->conflist) {
3698 struct stat sta;
3699 struct dl *dl;
3700 int ofd, ret;
3701
3702 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3703 pr_err("%s: file descriptor for invalid device\n",
3704 __func__);
3705 return 1;
3706 }
3707 for (dl = ddf->dlist; dl; dl = dl->next)
3708 if (dl->major == (int)major(sta.st_rdev) &&
3709 dl->minor == (int)minor(sta.st_rdev))
3710 break;
3711 if (!dl) {
3712 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3713 (int)major(sta.st_rdev),
3714 (int)minor(sta.st_rdev));
3715 return 1;
3716 }
3717 /*
3718 For DDF, writing to just one disk makes no sense.
3719 We would run the risk of writing inconsistent meta data
3720 to the devices. So just call __write_init_super_ddf and
3721 write to all devices, including this one.
3722 Use the fd passed to this function, just in case dl->fd
3723 is invalid.
3724 */
3725 ofd = dl->fd;
3726 dl->fd = fd;
3727 ret = __write_init_super_ddf(st);
3728 dl->fd = ofd;
3729 return ret;
3730 }
3731
3732 if (posix_memalign(&buf, 512, 512) != 0)
3733 return 1;
3734 memset(buf, 0, 512);
3735
3736 lseek64(fd, dsize-512, 0);
3737 rc = write(fd, buf, 512);
3738 free(buf);
3739 if (rc < 0)
3740 return 1;
3741 return 0;
3742 }
3743
3744 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3745 {
3746 /*
3747 * return:
3748 * 0 same, or first was empty, and second was copied
3749 * 1 second had wrong number
3750 * 2 wrong uuid
3751 * 3 wrong other info
3752 */
3753 struct ddf_super *first = st->sb;
3754 struct ddf_super *second = tst->sb;
3755 struct dl *dl1, *dl2;
3756 struct vcl *vl1, *vl2;
3757 unsigned int max_vds, max_pds, pd, vd;
3758
3759 if (!first) {
3760 st->sb = tst->sb;
3761 tst->sb = NULL;
3762 return 0;
3763 }
3764
3765 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3766 return 2;
3767
3768 if (first->anchor.seq != second->anchor.seq) {
3769 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3770 __be32_to_cpu(first->anchor.seq),
3771 __be32_to_cpu(second->anchor.seq));
3772 return 3;
3773 }
3774 if (first->max_part != second->max_part ||
3775 first->phys->used_pdes != second->phys->used_pdes ||
3776 first->virt->populated_vdes != second->virt->populated_vdes) {
3777 dprintf("%s: PD/VD number mismatch\n", __func__);
3778 return 3;
3779 }
3780
3781 max_pds = __be16_to_cpu(first->phys->used_pdes);
3782 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3783 for (pd = 0; pd < max_pds; pd++)
3784 if (first->phys->entries[pd].refnum == dl2->disk.refnum)
3785 break;
3786 if (pd == max_pds) {
3787 dprintf("%s: no match for disk %08x\n", __func__,
3788 __be32_to_cpu(dl2->disk.refnum));
3789 return 3;
3790 }
3791 }
3792
3793 max_vds = __be16_to_cpu(first->active->max_vd_entries);
3794 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3795 if (vl2->conf.magic != DDF_VD_CONF_MAGIC)
3796 continue;
3797 for (vd = 0; vd < max_vds; vd++)
3798 if (!memcmp(first->virt->entries[vd].guid,
3799 vl2->conf.guid, DDF_GUID_LEN))
3800 break;
3801 if (vd == max_vds) {
3802 dprintf("%s: no match for VD config\n", __func__);
3803 return 3;
3804 }
3805 }
3806 /* FIXME should I look at anything else? */
3807
3808 /*
3809 At this point we are fairly sure that the meta data matches.
3810 But the new disk may contain additional local data.
3811 Add it to the super block.
3812 */
3813 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3814 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3815 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3816 DDF_GUID_LEN))
3817 break;
3818 if (vl1) {
3819 if (vl1->other_bvds != NULL &&
3820 vl1->conf.sec_elmnt_seq !=
3821 vl2->conf.sec_elmnt_seq) {
3822 dprintf("%s: adding BVD %u\n", __func__,
3823 vl2->conf.sec_elmnt_seq);
3824 add_other_bvd(vl1, &vl2->conf,
3825 first->conf_rec_len*512);
3826 }
3827 continue;
3828 }
3829
3830 if (posix_memalign((void **)&vl1, 512,
3831 (first->conf_rec_len*512 +
3832 offsetof(struct vcl, conf))) != 0) {
3833 pr_err("%s could not allocate vcl buf\n",
3834 __func__);
3835 return 3;
3836 }
3837
3838 vl1->next = first->conflist;
3839 vl1->block_sizes = NULL;
3840 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3841 if (alloc_other_bvds(first, vl1) != 0) {
3842 pr_err("%s could not allocate other bvds\n",
3843 __func__);
3844 free(vl1);
3845 return 3;
3846 }
3847 for (vd = 0; vd < max_vds; vd++)
3848 if (!memcmp(first->virt->entries[vd].guid,
3849 vl1->conf.guid, DDF_GUID_LEN))
3850 break;
3851 vl1->vcnum = vd;
3852 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3853 first->conflist = vl1;
3854 }
3855
3856 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3857 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3858 if (dl1->disk.refnum == dl2->disk.refnum)
3859 break;
3860 if (dl1)
3861 continue;
3862
3863 if (posix_memalign((void **)&dl1, 512,
3864 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3865 != 0) {
3866 pr_err("%s could not allocate disk info buffer\n",
3867 __func__);
3868 return 3;
3869 }
3870 memcpy(dl1, dl2, sizeof(*dl1));
3871 dl1->mdupdate = NULL;
3872 dl1->next = first->dlist;
3873 dl1->fd = -1;
3874 for (pd = 0; pd < max_pds; pd++)
3875 if (first->phys->entries[pd].refnum == dl1->disk.refnum)
3876 break;
3877 dl1->pdnum = pd;
3878 if (dl2->spare) {
3879 if (posix_memalign((void **)&dl1->spare, 512,
3880 first->conf_rec_len*512) != 0) {
3881 pr_err("%s could not allocate spare info buf\n",
3882 __func__);
3883 return 3;
3884 }
3885 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3886 }
3887 for (vd = 0 ; vd < first->max_part ; vd++) {
3888 if (!dl2->vlist[vd]) {
3889 dl1->vlist[vd] = NULL;
3890 continue;
3891 }
3892 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3893 if (!memcmp(vl1->conf.guid,
3894 dl2->vlist[vd]->conf.guid,
3895 DDF_GUID_LEN))
3896 break;
3897 dl1->vlist[vd] = vl1;
3898 }
3899 }
3900 first->dlist = dl1;
3901 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3902 __be32_to_cpu(dl1->disk.refnum));
3903 }
3904
3905 return 0;
3906 }
3907
3908 #ifndef MDASSEMBLE
3909 /*
3910 * A new array 'a' has been started which claims to be instance 'inst'
3911 * within container 'c'.
3912 * We need to confirm that the array matches the metadata in 'c' so
3913 * that we don't corrupt any metadata.
3914 */
3915 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3916 {
3917 struct ddf_super *ddf = c->sb;
3918 int n = atoi(inst);
3919 if (all_ff(ddf->virt->entries[n].guid)) {
3920 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
3921 return -ENODEV;
3922 }
3923 dprintf("ddf: open_new %d\n", n);
3924 a->info.container_member = n;
3925 return 0;
3926 }
3927
3928 /*
3929 * The array 'a' is to be marked clean in the metadata.
3930 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
3931 * clean up to the point (in sectors). If that cannot be recorded in the
3932 * metadata, then leave it as dirty.
3933 *
3934 * For DDF, we need to clear the DDF_state_inconsistent bit in the
3935 * !global! virtual_disk.virtual_entry structure.
3936 */
3937 static int ddf_set_array_state(struct active_array *a, int consistent)
3938 {
3939 struct ddf_super *ddf = a->container->sb;
3940 int inst = a->info.container_member;
3941 int old = ddf->virt->entries[inst].state;
3942 if (consistent == 2) {
3943 /* Should check if a recovery should be started FIXME */
3944 consistent = 1;
3945 if (!is_resync_complete(&a->info))
3946 consistent = 0;
3947 }
3948 if (consistent)
3949 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
3950 else
3951 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
3952 if (old != ddf->virt->entries[inst].state)
3953 ddf_set_updates_pending(ddf);
3954
3955 old = ddf->virt->entries[inst].init_state;
3956 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
3957 if (is_resync_complete(&a->info))
3958 ddf->virt->entries[inst].init_state |= DDF_init_full;
3959 else if (a->info.resync_start == 0)
3960 ddf->virt->entries[inst].init_state |= DDF_init_not;
3961 else
3962 ddf->virt->entries[inst].init_state |= DDF_init_quick;
3963 if (old != ddf->virt->entries[inst].init_state)
3964 ddf_set_updates_pending(ddf);
3965
3966 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
3967 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
3968 consistent?"clean":"dirty",
3969 a->info.resync_start);
3970 return consistent;
3971 }
3972
3973 static int get_bvd_state(const struct ddf_super *ddf,
3974 const struct vd_config *vc)
3975 {
3976 unsigned int i, n_bvd, working = 0;
3977 unsigned int n_prim = __be16_to_cpu(vc->prim_elmnt_count);
3978 int pd, st, state;
3979 for (i = 0; i < n_prim; i++) {
3980 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
3981 continue;
3982 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
3983 if (pd < 0)
3984 continue;
3985 st = __be16_to_cpu(ddf->phys->entries[pd].state);
3986 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3987 == DDF_Online)
3988 working++;
3989 }
3990
3991 state = DDF_state_degraded;
3992 if (working == n_prim)
3993 state = DDF_state_optimal;
3994 else
3995 switch (vc->prl) {
3996 case DDF_RAID0:
3997 case DDF_CONCAT:
3998 case DDF_JBOD:
3999 state = DDF_state_failed;
4000 break;
4001 case DDF_RAID1:
4002 if (working == 0)
4003 state = DDF_state_failed;
4004 else if (working >= 2)
4005 state = DDF_state_part_optimal;
4006 break;
4007 case DDF_RAID4:
4008 case DDF_RAID5:
4009 if (working < n_prim - 1)
4010 state = DDF_state_failed;
4011 break;
4012 case DDF_RAID6:
4013 if (working < n_prim - 2)
4014 state = DDF_state_failed;
4015 else if (working == n_prim - 1)
4016 state = DDF_state_part_optimal;
4017 break;
4018 }
4019 return state;
4020 }
4021
4022 static int secondary_state(int state, int other, int seclevel)
4023 {
4024 if (state == DDF_state_optimal && other == DDF_state_optimal)
4025 return DDF_state_optimal;
4026 if (seclevel == DDF_2MIRRORED) {
4027 if (state == DDF_state_optimal || other == DDF_state_optimal)
4028 return DDF_state_part_optimal;
4029 if (state == DDF_state_failed && other == DDF_state_failed)
4030 return DDF_state_failed;
4031 return DDF_state_degraded;
4032 } else {
4033 if (state == DDF_state_failed || other == DDF_state_failed)
4034 return DDF_state_failed;
4035 if (state == DDF_state_degraded || other == DDF_state_degraded)
4036 return DDF_state_degraded;
4037 return DDF_state_part_optimal;
4038 }
4039 }
4040
4041 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4042 {
4043 int state = get_bvd_state(ddf, &vcl->conf);
4044 unsigned int i;
4045 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4046 state = secondary_state(
4047 state,
4048 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4049 vcl->conf.srl);
4050 }
4051 return state;
4052 }
4053
4054 /*
4055 * The state of each disk is stored in the global phys_disk structure
4056 * in phys_disk.entries[n].state.
4057 * This makes various combinations awkward.
4058 * - When a device fails in any array, it must be failed in all arrays
4059 * that include a part of this device.
4060 * - When a component is rebuilding, we cannot include it officially in the
4061 * array unless this is the only array that uses the device.
4062 *
4063 * So: when transitioning:
4064 * Online -> failed, just set failed flag. monitor will propagate
4065 * spare -> online, the device might need to be added to the array.
4066 * spare -> failed, just set failed. Don't worry if in array or not.
4067 */
4068 static void ddf_set_disk(struct active_array *a, int n, int state)
4069 {
4070 struct ddf_super *ddf = a->container->sb;
4071 unsigned int inst = a->info.container_member, n_bvd;
4072 struct vcl *vcl;
4073 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4074 &n_bvd, &vcl);
4075 int pd;
4076 struct mdinfo *mdi;
4077 struct dl *dl;
4078
4079 if (vc == NULL) {
4080 dprintf("ddf: cannot find instance %d!!\n", inst);
4081 return;
4082 }
4083 /* Find the matching slot in 'info'. */
4084 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4085 if (mdi->disk.raid_disk == n)
4086 break;
4087 if (!mdi)
4088 return;
4089
4090 /* and find the 'dl' entry corresponding to that. */
4091 for (dl = ddf->dlist; dl; dl = dl->next)
4092 if (mdi->state_fd >= 0 &&
4093 mdi->disk.major == dl->major &&
4094 mdi->disk.minor == dl->minor)
4095 break;
4096 if (!dl)
4097 return;
4098
4099 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4100 if (pd < 0 || pd != dl->pdnum) {
4101 /* disk doesn't currently exist or has changed.
4102 * If it is now in_sync, insert it. */
4103 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4104 __func__, dl->pdnum, dl->major, dl->minor,
4105 __be32_to_cpu(dl->disk.refnum));
4106 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4107 __func__, inst, n_bvd, vc->phys_refnum[n_bvd], pd);
4108 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4109 pd = dl->pdnum; /* FIXME: is this really correct ? */
4110 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4111 LBA_OFFSET(ddf, vc)[n_bvd] =
4112 __cpu_to_be64(mdi->data_offset);
4113 ddf->phys->entries[pd].type &=
4114 ~__cpu_to_be16(DDF_Global_Spare);
4115 ddf->phys->entries[pd].type |=
4116 __cpu_to_be16(DDF_Active_in_VD);
4117 ddf_set_updates_pending(ddf);
4118 }
4119 } else {
4120 int old = ddf->phys->entries[pd].state;
4121 if (state & DS_FAULTY)
4122 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Failed);
4123 if (state & DS_INSYNC) {
4124 ddf->phys->entries[pd].state |= __cpu_to_be16(DDF_Online);
4125 ddf->phys->entries[pd].state &= __cpu_to_be16(~DDF_Rebuilding);
4126 }
4127 if (old != ddf->phys->entries[pd].state)
4128 ddf_set_updates_pending(ddf);
4129 }
4130
4131 dprintf("ddf: set_disk %d to %x\n", n, state);
4132
4133 /* Now we need to check the state of the array and update
4134 * virtual_disk.entries[n].state.
4135 * It needs to be one of "optimal", "degraded", "failed".
4136 * I don't understand 'deleted' or 'missing'.
4137 */
4138 state = get_svd_state(ddf, vcl);
4139
4140 if (ddf->virt->entries[inst].state !=
4141 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4142 | state)) {
4143
4144 ddf->virt->entries[inst].state =
4145 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4146 | state;
4147 ddf_set_updates_pending(ddf);
4148 }
4149
4150 }
4151
4152 static void ddf_sync_metadata(struct supertype *st)
4153 {
4154
4155 /*
4156 * Write all data to all devices.
4157 * Later, we might be able to track whether only local changes
4158 * have been made, or whether any global data has been changed,
4159 * but ddf is sufficiently weird that it probably always
4160 * changes global data ....
4161 */
4162 struct ddf_super *ddf = st->sb;
4163 if (!ddf->updates_pending)
4164 return;
4165 ddf->updates_pending = 0;
4166 __write_init_super_ddf(st);
4167 dprintf("ddf: sync_metadata\n");
4168 }
4169
4170 static int del_from_conflist(struct vcl **list, const char *guid)
4171 {
4172 struct vcl **p;
4173 int found = 0;
4174 for (p = list; p && *p; p = &((*p)->next))
4175 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4176 found = 1;
4177 *p = (*p)->next;
4178 }
4179 return found;
4180 }
4181
4182 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4183 {
4184 struct dl *dl;
4185 unsigned int vdnum, i;
4186 vdnum = find_vde_by_guid(ddf, guid);
4187 if (vdnum == DDF_NOTFOUND) {
4188 pr_err("%s: could not find VD %s\n", __func__,
4189 guid_str(guid));
4190 return -1;
4191 }
4192 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4193 pr_err("%s: could not find conf %s\n", __func__,
4194 guid_str(guid));
4195 return -1;
4196 }
4197 for (dl = ddf->dlist; dl; dl = dl->next)
4198 for (i = 0; i < ddf->max_part; i++)
4199 if (dl->vlist[i] != NULL &&
4200 !memcmp(dl->vlist[i]->conf.guid, guid,
4201 DDF_GUID_LEN))
4202 dl->vlist[i] = NULL;
4203 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4204 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4205 return 0;
4206 }
4207
4208 static int kill_subarray_ddf(struct supertype *st)
4209 {
4210 struct ddf_super *ddf = st->sb;
4211 /*
4212 * currentconf is set in container_content_ddf,
4213 * called with subarray arg
4214 */
4215 struct vcl *victim = ddf->currentconf;
4216 struct vd_config *conf;
4217 ddf->currentconf = NULL;
4218 unsigned int vdnum;
4219 if (!victim) {
4220 pr_err("%s: nothing to kill\n", __func__);
4221 return -1;
4222 }
4223 conf = &victim->conf;
4224 vdnum = find_vde_by_guid(ddf, conf->guid);
4225 if (vdnum == DDF_NOTFOUND) {
4226 pr_err("%s: could not find VD %s\n", __func__,
4227 guid_str(conf->guid));
4228 return -1;
4229 }
4230 if (st->update_tail) {
4231 struct virtual_disk *vd;
4232 int len = sizeof(struct virtual_disk)
4233 + sizeof(struct virtual_entry);
4234 vd = xmalloc(len);
4235 if (vd == NULL) {
4236 pr_err("%s: failed to allocate %d bytes\n", __func__,
4237 len);
4238 return -1;
4239 }
4240 memset(vd, 0 , len);
4241 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4242 vd->populated_vdes = 0;
4243 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4244 /* we use DDF_state_deleted as marker */
4245 vd->entries[0].state = DDF_state_deleted;
4246 append_metadata_update(st, vd, len);
4247 } else
4248 _kill_subarray_ddf(ddf, conf->guid);
4249 return 0;
4250 }
4251
4252 static void ddf_process_update(struct supertype *st,
4253 struct metadata_update *update)
4254 {
4255 /* Apply this update to the metadata.
4256 * The first 4 bytes are a DDF_*_MAGIC which guides
4257 * our actions.
4258 * Possible update are:
4259 * DDF_PHYS_RECORDS_MAGIC
4260 * Add a new physical device or remove an old one.
4261 * Changes to this record only happen implicitly.
4262 * used_pdes is the device number.
4263 * DDF_VIRT_RECORDS_MAGIC
4264 * Add a new VD. Possibly also change the 'access' bits.
4265 * populated_vdes is the entry number.
4266 * DDF_VD_CONF_MAGIC
4267 * New or updated VD. the VIRT_RECORD must already
4268 * exist. For an update, phys_refnum and lba_offset
4269 * (at least) are updated, and the VD_CONF must
4270 * be written to precisely those devices listed with
4271 * a phys_refnum.
4272 * DDF_SPARE_ASSIGN_MAGIC
4273 * replacement Spare Assignment Record... but for which device?
4274 *
4275 * So, e.g.:
4276 * - to create a new array, we send a VIRT_RECORD and
4277 * a VD_CONF. Then assemble and start the array.
4278 * - to activate a spare we send a VD_CONF to add the phys_refnum
4279 * and offset. This will also mark the spare as active with
4280 * a spare-assignment record.
4281 */
4282 struct ddf_super *ddf = st->sb;
4283 __u32 *magic = (__u32*)update->buf;
4284 struct phys_disk *pd;
4285 struct virtual_disk *vd;
4286 struct vd_config *vc;
4287 struct vcl *vcl;
4288 struct dl *dl;
4289 unsigned int mppe;
4290 unsigned int ent;
4291 unsigned int pdnum, pd2;
4292
4293 dprintf("Process update %x\n", *magic);
4294
4295 switch (*magic) {
4296 case DDF_PHYS_RECORDS_MAGIC:
4297
4298 if (update->len != (sizeof(struct phys_disk) +
4299 sizeof(struct phys_disk_entry)))
4300 return;
4301 pd = (struct phys_disk*)update->buf;
4302
4303 ent = __be16_to_cpu(pd->used_pdes);
4304 if (ent >= __be16_to_cpu(ddf->phys->max_pdes))
4305 return;
4306 if (pd->entries[0].state & __cpu_to_be16(DDF_Missing)) {
4307 struct dl **dlp;
4308 /* removing this disk. */
4309 ddf->phys->entries[ent].state |= __cpu_to_be16(DDF_Missing);
4310 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4311 struct dl *dl = *dlp;
4312 if (dl->pdnum == (signed)ent) {
4313 close(dl->fd);
4314 dl->fd = -1;
4315 /* FIXME this doesn't free
4316 * dl->devname */
4317 update->space = dl;
4318 *dlp = dl->next;
4319 break;
4320 }
4321 }
4322 ddf_set_updates_pending(ddf);
4323 return;
4324 }
4325 if (!all_ff(ddf->phys->entries[ent].guid))
4326 return;
4327 ddf->phys->entries[ent] = pd->entries[0];
4328 ddf->phys->used_pdes = __cpu_to_be16(1 +
4329 __be16_to_cpu(ddf->phys->used_pdes));
4330 ddf_set_updates_pending(ddf);
4331 if (ddf->add_list) {
4332 struct active_array *a;
4333 struct dl *al = ddf->add_list;
4334 ddf->add_list = al->next;
4335
4336 al->next = ddf->dlist;
4337 ddf->dlist = al;
4338
4339 /* As a device has been added, we should check
4340 * for any degraded devices that might make
4341 * use of this spare */
4342 for (a = st->arrays ; a; a=a->next)
4343 a->check_degraded = 1;
4344 }
4345 break;
4346
4347 case DDF_VIRT_RECORDS_MAGIC:
4348
4349 if (update->len != (sizeof(struct virtual_disk) +
4350 sizeof(struct virtual_entry)))
4351 return;
4352 vd = (struct virtual_disk*)update->buf;
4353
4354 if (vd->entries[0].state == DDF_state_deleted) {
4355 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4356 return;
4357 } else {
4358
4359 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4360 if (ent != DDF_NOTFOUND) {
4361 dprintf("%s: VD %s exists already in slot %d\n",
4362 __func__, guid_str(vd->entries[0].guid),
4363 ent);
4364 return;
4365 }
4366 ent = find_unused_vde(ddf);
4367 if (ent == DDF_NOTFOUND)
4368 return;
4369 ddf->virt->entries[ent] = vd->entries[0];
4370 ddf->virt->populated_vdes =
4371 __cpu_to_be16(
4372 1 + __be16_to_cpu(
4373 ddf->virt->populated_vdes));
4374 dprintf("%s: added VD %s in slot %d\n",
4375 __func__, guid_str(vd->entries[0].guid), ent);
4376 }
4377 ddf_set_updates_pending(ddf);
4378 break;
4379
4380 case DDF_VD_CONF_MAGIC:
4381 dprintf("len %d %d\n", update->len, ddf->conf_rec_len);
4382
4383 mppe = __be16_to_cpu(ddf->anchor.max_primary_element_entries);
4384 if ((unsigned)update->len != ddf->conf_rec_len * 512)
4385 return;
4386 vc = (struct vd_config*)update->buf;
4387 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4388 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4389 break;
4390 dprintf("vcl = %p\n", vcl);
4391 if (vcl) {
4392 /* An update, just copy the phys_refnum and lba_offset
4393 * fields
4394 */
4395 struct vd_config *conf = &vcl->conf;
4396 if (vcl->other_bvds != NULL &&
4397 conf->sec_elmnt_seq != vc->sec_elmnt_seq) {
4398 unsigned int i;
4399 for (i = 1; i < conf->sec_elmnt_count; i++)
4400 if (vcl->other_bvds[i-1]->sec_elmnt_seq
4401 == vc->sec_elmnt_seq)
4402 break;
4403 if (i == conf->sec_elmnt_count) {
4404 pr_err("%s/DDF_VD_CONF_MAGIC: BVD %u not found\n",
4405 __func__, vc->sec_elmnt_seq);
4406 return;
4407 }
4408 conf = vcl->other_bvds[i-1];
4409 }
4410 memcpy(conf->phys_refnum, vc->phys_refnum,
4411 mppe * (sizeof(__u32) + sizeof(__u64)));
4412 } else {
4413 /* A new VD_CONF */
4414 if (!update->space)
4415 return;
4416 vcl = update->space;
4417 update->space = NULL;
4418 vcl->next = ddf->conflist;
4419 memcpy(&vcl->conf, vc, update->len);
4420 ent = find_vde_by_guid(ddf, vc->guid);
4421 if (ent == DDF_NOTFOUND)
4422 return;
4423 vcl->vcnum = ent;
4424 ddf->conflist = vcl;
4425 }
4426 /* Set DDF_Transition on all Failed devices - to help
4427 * us detect those that are no longer in use
4428 */
4429 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4430 if (ddf->phys->entries[pdnum].state
4431 & __be16_to_cpu(DDF_Failed))
4432 ddf->phys->entries[pdnum].state
4433 |= __be16_to_cpu(DDF_Transition);
4434 /* Now make sure vlist is correct for each dl. */
4435 for (dl = ddf->dlist; dl; dl = dl->next) {
4436 unsigned int vn = 0;
4437 int in_degraded = 0;
4438 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4439 unsigned int dn, ibvd;
4440 const struct vd_config *conf;
4441 int vstate;
4442 dn = get_pd_index_from_refnum(vcl,
4443 dl->disk.refnum,
4444 ddf->mppe,
4445 &conf, &ibvd);
4446 if (dn == DDF_NOTFOUND)
4447 continue;
4448 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4449 dl->pdnum,
4450 __be32_to_cpu(dl->disk.refnum),
4451 guid_str(conf->guid),
4452 conf->sec_elmnt_seq, vn);
4453 /* Clear the Transition flag */
4454 if (ddf->phys->entries[dl->pdnum].state
4455 & __be16_to_cpu(DDF_Failed))
4456 ddf->phys->entries[dl->pdnum].state &=
4457 ~__be16_to_cpu(DDF_Transition);
4458 dl->vlist[vn++] = vcl;
4459 vstate = ddf->virt->entries[vcl->vcnum].state
4460 & DDF_state_mask;
4461 if (vstate == DDF_state_degraded ||
4462 vstate == DDF_state_part_optimal)
4463 in_degraded = 1;
4464 }
4465 while (vn < ddf->max_part)
4466 dl->vlist[vn++] = NULL;
4467 if (dl->vlist[0]) {
4468 ddf->phys->entries[dl->pdnum].type &=
4469 ~__cpu_to_be16(DDF_Global_Spare);
4470 if (!(ddf->phys->entries[dl->pdnum].type &
4471 __cpu_to_be16(DDF_Active_in_VD))) {
4472 ddf->phys->entries[dl->pdnum].type |=
4473 __cpu_to_be16(DDF_Active_in_VD);
4474 if (in_degraded)
4475 ddf->phys->entries[dl->pdnum].state |=
4476 __cpu_to_be16(DDF_Rebuilding);
4477 }
4478 }
4479 if (dl->spare) {
4480 ddf->phys->entries[dl->pdnum].type &=
4481 ~__cpu_to_be16(DDF_Global_Spare);
4482 ddf->phys->entries[dl->pdnum].type |=
4483 __cpu_to_be16(DDF_Spare);
4484 }
4485 if (!dl->vlist[0] && !dl->spare) {
4486 ddf->phys->entries[dl->pdnum].type |=
4487 __cpu_to_be16(DDF_Global_Spare);
4488 ddf->phys->entries[dl->pdnum].type &=
4489 ~__cpu_to_be16(DDF_Spare |
4490 DDF_Active_in_VD);
4491 }
4492 }
4493
4494 /* Now remove any 'Failed' devices that are not part
4495 * of any VD. They will have the Transition flag set.
4496 * Once done, we need to update all dl->pdnum numbers.
4497 */
4498 pd2 = 0;
4499 for (pdnum = 0; pdnum < __be16_to_cpu(ddf->phys->used_pdes); pdnum++)
4500 if ((ddf->phys->entries[pdnum].state
4501 & __be16_to_cpu(DDF_Failed))
4502 && (ddf->phys->entries[pdnum].state
4503 & __be16_to_cpu(DDF_Transition)))
4504 /* skip this one */;
4505 else if (pdnum == pd2)
4506 pd2++;
4507 else {
4508 ddf->phys->entries[pd2] = ddf->phys->entries[pdnum];
4509 for (dl = ddf->dlist; dl; dl = dl->next)
4510 if (dl->pdnum == (int)pdnum)
4511 dl->pdnum = pd2;
4512 pd2++;
4513 }
4514 ddf->phys->used_pdes = __cpu_to_be16(pd2);
4515 while (pd2 < pdnum) {
4516 memset(ddf->phys->entries[pd2].guid, 0xff, DDF_GUID_LEN);
4517 pd2++;
4518 }
4519
4520 ddf_set_updates_pending(ddf);
4521 break;
4522 case DDF_SPARE_ASSIGN_MAGIC:
4523 default: break;
4524 }
4525 }
4526
4527 static void ddf_prepare_update(struct supertype *st,
4528 struct metadata_update *update)
4529 {
4530 /* This update arrived at managemon.
4531 * We are about to pass it to monitor.
4532 * If a malloc is needed, do it here.
4533 */
4534 struct ddf_super *ddf = st->sb;
4535 __u32 *magic = (__u32*)update->buf;
4536 if (*magic == DDF_VD_CONF_MAGIC)
4537 if (posix_memalign(&update->space, 512,
4538 offsetof(struct vcl, conf)
4539 + ddf->conf_rec_len * 512) != 0)
4540 update->space = NULL;
4541 }
4542
4543 /*
4544 * Check if the array 'a' is degraded but not failed.
4545 * If it is, find as many spares as are available and needed and
4546 * arrange for their inclusion.
4547 * We only choose devices which are not already in the array,
4548 * and prefer those with a spare-assignment to this array.
4549 * otherwise we choose global spares - assuming always that
4550 * there is enough room.
4551 * For each spare that we assign, we return an 'mdinfo' which
4552 * describes the position for the device in the array.
4553 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4554 * the new phys_refnum and lba_offset values.
4555 *
4556 * Only worry about BVDs at the moment.
4557 */
4558 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4559 struct metadata_update **updates)
4560 {
4561 int working = 0;
4562 struct mdinfo *d;
4563 struct ddf_super *ddf = a->container->sb;
4564 int global_ok = 0;
4565 struct mdinfo *rv = NULL;
4566 struct mdinfo *di;
4567 struct metadata_update *mu;
4568 struct dl *dl;
4569 int i;
4570 struct vcl *vcl;
4571 struct vd_config *vc;
4572 unsigned int n_bvd;
4573
4574 for (d = a->info.devs ; d ; d = d->next) {
4575 if ((d->curr_state & DS_FAULTY) &&
4576 d->state_fd >= 0)
4577 /* wait for Removal to happen */
4578 return NULL;
4579 if (d->state_fd >= 0)
4580 working ++;
4581 }
4582
4583 dprintf("ddf_activate: working=%d (%d) level=%d\n", working, a->info.array.raid_disks,
4584 a->info.array.level);
4585 if (working == a->info.array.raid_disks)
4586 return NULL; /* array not degraded */
4587 switch (a->info.array.level) {
4588 case 1:
4589 if (working == 0)
4590 return NULL; /* failed */
4591 break;
4592 case 4:
4593 case 5:
4594 if (working < a->info.array.raid_disks - 1)
4595 return NULL; /* failed */
4596 break;
4597 case 6:
4598 if (working < a->info.array.raid_disks - 2)
4599 return NULL; /* failed */
4600 break;
4601 default: /* concat or stripe */
4602 return NULL; /* failed */
4603 }
4604
4605 /* For each slot, if it is not working, find a spare */
4606 dl = ddf->dlist;
4607 for (i = 0; i < a->info.array.raid_disks; i++) {
4608 for (d = a->info.devs ; d ; d = d->next)
4609 if (d->disk.raid_disk == i)
4610 break;
4611 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4612 if (d && (d->state_fd >= 0))
4613 continue;
4614
4615 /* OK, this device needs recovery. Find a spare */
4616 again:
4617 for ( ; dl ; dl = dl->next) {
4618 unsigned long long esize;
4619 unsigned long long pos;
4620 struct mdinfo *d2;
4621 int is_global = 0;
4622 int is_dedicated = 0;
4623 struct extent *ex;
4624 unsigned int j;
4625 /* If in this array, skip */
4626 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4627 if (d2->state_fd >= 0 &&
4628 d2->disk.major == dl->major &&
4629 d2->disk.minor == dl->minor) {
4630 dprintf("%x:%x already in array\n", dl->major, dl->minor);
4631 break;
4632 }
4633 if (d2)
4634 continue;
4635 if (ddf->phys->entries[dl->pdnum].type &
4636 __cpu_to_be16(DDF_Spare)) {
4637 /* Check spare assign record */
4638 if (dl->spare) {
4639 if (dl->spare->type & DDF_spare_dedicated) {
4640 /* check spare_ents for guid */
4641 for (j = 0 ;
4642 j < __be16_to_cpu(dl->spare->populated);
4643 j++) {
4644 if (memcmp(dl->spare->spare_ents[j].guid,
4645 ddf->virt->entries[a->info.container_member].guid,
4646 DDF_GUID_LEN) == 0)
4647 is_dedicated = 1;
4648 }
4649 } else
4650 is_global = 1;
4651 }
4652 } else if (ddf->phys->entries[dl->pdnum].type &
4653 __cpu_to_be16(DDF_Global_Spare)) {
4654 is_global = 1;
4655 } else if (!(ddf->phys->entries[dl->pdnum].state &
4656 __cpu_to_be16(DDF_Failed))) {
4657 /* we can possibly use some of this */
4658 is_global = 1;
4659 }
4660 if ( ! (is_dedicated ||
4661 (is_global && global_ok))) {
4662 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4663 is_dedicated, is_global);
4664 continue;
4665 }
4666
4667 /* We are allowed to use this device - is there space?
4668 * We need a->info.component_size sectors */
4669 ex = get_extents(ddf, dl);
4670 if (!ex) {
4671 dprintf("cannot get extents\n");
4672 continue;
4673 }
4674 j = 0; pos = 0;
4675 esize = 0;
4676
4677 do {
4678 esize = ex[j].start - pos;
4679 if (esize >= a->info.component_size)
4680 break;
4681 pos = ex[j].start + ex[j].size;
4682 j++;
4683 } while (ex[j-1].size);
4684
4685 free(ex);
4686 if (esize < a->info.component_size) {
4687 dprintf("%x:%x has no room: %llu %llu\n",
4688 dl->major, dl->minor,
4689 esize, a->info.component_size);
4690 /* No room */
4691 continue;
4692 }
4693
4694 /* Cool, we have a device with some space at pos */
4695 di = xcalloc(1, sizeof(*di));
4696 di->disk.number = i;
4697 di->disk.raid_disk = i;
4698 di->disk.major = dl->major;
4699 di->disk.minor = dl->minor;
4700 di->disk.state = 0;
4701 di->recovery_start = 0;
4702 di->data_offset = pos;
4703 di->component_size = a->info.component_size;
4704 di->container_member = dl->pdnum;
4705 di->next = rv;
4706 rv = di;
4707 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
4708 i, pos);
4709
4710 break;
4711 }
4712 if (!dl && ! global_ok) {
4713 /* not enough dedicated spares, try global */
4714 global_ok = 1;
4715 dl = ddf->dlist;
4716 goto again;
4717 }
4718 }
4719
4720 if (!rv)
4721 /* No spares found */
4722 return rv;
4723 /* Now 'rv' has a list of devices to return.
4724 * Create a metadata_update record to update the
4725 * phys_refnum and lba_offset values
4726 */
4727 mu = xmalloc(sizeof(*mu));
4728 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4729 free(mu);
4730 mu = NULL;
4731 }
4732 mu->buf = xmalloc(ddf->conf_rec_len * 512);
4733 mu->len = ddf->conf_rec_len * 512;
4734 mu->space = NULL;
4735 mu->space_list = NULL;
4736 mu->next = *updates;
4737 vc = find_vdcr(ddf, a->info.container_member, di->disk.raid_disk,
4738 &n_bvd, &vcl);
4739 memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
4740
4741 vc = (struct vd_config*)mu->buf;
4742 for (di = rv ; di ; di = di->next) {
4743 vc->phys_refnum[di->disk.raid_disk] =
4744 ddf->phys->entries[dl->pdnum].refnum;
4745 LBA_OFFSET(ddf, vc)[di->disk.raid_disk]
4746 = __cpu_to_be64(di->data_offset);
4747 }
4748 *updates = mu;
4749 return rv;
4750 }
4751 #endif /* MDASSEMBLE */
4752
4753 static int ddf_level_to_layout(int level)
4754 {
4755 switch(level) {
4756 case 0:
4757 case 1:
4758 return 0;
4759 case 5:
4760 return ALGORITHM_LEFT_SYMMETRIC;
4761 case 6:
4762 return ALGORITHM_ROTATING_N_CONTINUE;
4763 case 10:
4764 return 0x102;
4765 default:
4766 return UnSet;
4767 }
4768 }
4769
4770 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
4771 {
4772 if (level && *level == UnSet)
4773 *level = LEVEL_CONTAINER;
4774
4775 if (level && layout && *layout == UnSet)
4776 *layout = ddf_level_to_layout(*level);
4777 }
4778
4779 struct superswitch super_ddf = {
4780 #ifndef MDASSEMBLE
4781 .examine_super = examine_super_ddf,
4782 .brief_examine_super = brief_examine_super_ddf,
4783 .brief_examine_subarrays = brief_examine_subarrays_ddf,
4784 .export_examine_super = export_examine_super_ddf,
4785 .detail_super = detail_super_ddf,
4786 .brief_detail_super = brief_detail_super_ddf,
4787 .validate_geometry = validate_geometry_ddf,
4788 .write_init_super = write_init_super_ddf,
4789 .add_to_super = add_to_super_ddf,
4790 .remove_from_super = remove_from_super_ddf,
4791 .load_container = load_container_ddf,
4792 .copy_metadata = copy_metadata_ddf,
4793 #endif
4794 .match_home = match_home_ddf,
4795 .uuid_from_super= uuid_from_super_ddf,
4796 .getinfo_super = getinfo_super_ddf,
4797 .update_super = update_super_ddf,
4798
4799 .avail_size = avail_size_ddf,
4800
4801 .compare_super = compare_super_ddf,
4802
4803 .load_super = load_super_ddf,
4804 .init_super = init_super_ddf,
4805 .store_super = store_super_ddf,
4806 .free_super = free_super_ddf,
4807 .match_metadata_desc = match_metadata_desc_ddf,
4808 .container_content = container_content_ddf,
4809 .default_geometry = default_geometry_ddf,
4810 .kill_subarray = kill_subarray_ddf,
4811
4812 .external = 1,
4813
4814 #ifndef MDASSEMBLE
4815 /* for mdmon */
4816 .open_new = ddf_open_new,
4817 .set_array_state= ddf_set_array_state,
4818 .set_disk = ddf_set_disk,
4819 .sync_metadata = ddf_sync_metadata,
4820 .process_update = ddf_process_update,
4821 .prepare_update = ddf_prepare_update,
4822 .activate_spare = ddf_activate_spare,
4823 #endif
4824 .name = "ddf",
4825 };