]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: ddf_activate_spare: fix gcc -O2 uninitialized warning
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 typedef struct __be16 {
62 __u16 _v16;
63 } be16;
64 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
65 #define be16_and(x, y) ((x)._v16 & (y)._v16)
66 #define be16_or(x, y) ((x)._v16 | (y)._v16)
67 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
68 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
69
70 typedef struct __be32 {
71 __u32 _v32;
72 } be32;
73 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
74
75 typedef struct __be64 {
76 __u64 _v64;
77 } be64;
78 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
79
80 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
81 static inline be16 cpu_to_be16(__u16 x)
82 {
83 be16 be = { ._v16 = __cpu_to_be16(x) };
84 return be;
85 }
86
87 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
88 static inline be32 cpu_to_be32(__u32 x)
89 {
90 be32 be = { ._v32 = __cpu_to_be32(x) };
91 return be;
92 }
93
94 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
95 static inline be64 cpu_to_be64(__u64 x)
96 {
97 be64 be = { ._v64 = __cpu_to_be64(x) };
98 return be;
99 }
100
101 /* Primary Raid Level (PRL) */
102 #define DDF_RAID0 0x00
103 #define DDF_RAID1 0x01
104 #define DDF_RAID3 0x03
105 #define DDF_RAID4 0x04
106 #define DDF_RAID5 0x05
107 #define DDF_RAID1E 0x11
108 #define DDF_JBOD 0x0f
109 #define DDF_CONCAT 0x1f
110 #define DDF_RAID5E 0x15
111 #define DDF_RAID5EE 0x25
112 #define DDF_RAID6 0x06
113
114 /* Raid Level Qualifier (RLQ) */
115 #define DDF_RAID0_SIMPLE 0x00
116 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
117 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
118 #define DDF_RAID3_0 0x00 /* parity in first extent */
119 #define DDF_RAID3_N 0x01 /* parity in last extent */
120 #define DDF_RAID4_0 0x00 /* parity in first extent */
121 #define DDF_RAID4_N 0x01 /* parity in last extent */
122 /* these apply to raid5e and raid5ee as well */
123 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
124 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
125 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
126 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
127
128 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
129 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
130
131 /* Secondary RAID Level (SRL) */
132 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
133 #define DDF_2MIRRORED 0x01
134 #define DDF_2CONCAT 0x02
135 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
136
137 /* Magic numbers */
138 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
139 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
140 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
141 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
142 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
143 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
144 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
145 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
146 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
147 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
148
149 #define DDF_GUID_LEN 24
150 #define DDF_REVISION_0 "01.00.00"
151 #define DDF_REVISION_2 "01.02.00"
152
153 struct ddf_header {
154 be32 magic; /* DDF_HEADER_MAGIC */
155 be32 crc;
156 char guid[DDF_GUID_LEN];
157 char revision[8]; /* 01.02.00 */
158 be32 seq; /* starts at '1' */
159 be32 timestamp;
160 __u8 openflag;
161 __u8 foreignflag;
162 __u8 enforcegroups;
163 __u8 pad0; /* 0xff */
164 __u8 pad1[12]; /* 12 * 0xff */
165 /* 64 bytes so far */
166 __u8 header_ext[32]; /* reserved: fill with 0xff */
167 be64 primary_lba;
168 be64 secondary_lba;
169 __u8 type;
170 __u8 pad2[3]; /* 0xff */
171 be32 workspace_len; /* sectors for vendor space -
172 * at least 32768(sectors) */
173 be64 workspace_lba;
174 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
175 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
176 be16 max_partitions; /* i.e. max num of configuration
177 record entries per disk */
178 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
179 *12/512) */
180 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
181 __u8 pad3[54]; /* 0xff */
182 /* 192 bytes so far */
183 be32 controller_section_offset;
184 be32 controller_section_length;
185 be32 phys_section_offset;
186 be32 phys_section_length;
187 be32 virt_section_offset;
188 be32 virt_section_length;
189 be32 config_section_offset;
190 be32 config_section_length;
191 be32 data_section_offset;
192 be32 data_section_length;
193 be32 bbm_section_offset;
194 be32 bbm_section_length;
195 be32 diag_space_offset;
196 be32 diag_space_length;
197 be32 vendor_offset;
198 be32 vendor_length;
199 /* 256 bytes so far */
200 __u8 pad4[256]; /* 0xff */
201 };
202
203 /* type field */
204 #define DDF_HEADER_ANCHOR 0x00
205 #define DDF_HEADER_PRIMARY 0x01
206 #define DDF_HEADER_SECONDARY 0x02
207
208 /* The content of the 'controller section' - global scope */
209 struct ddf_controller_data {
210 be32 magic; /* DDF_CONTROLLER_MAGIC */
211 be32 crc;
212 char guid[DDF_GUID_LEN];
213 struct controller_type {
214 be16 vendor_id;
215 be16 device_id;
216 be16 sub_vendor_id;
217 be16 sub_device_id;
218 } type;
219 char product_id[16];
220 __u8 pad[8]; /* 0xff */
221 __u8 vendor_data[448];
222 };
223
224 /* The content of phys_section - global scope */
225 struct phys_disk {
226 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
227 be32 crc;
228 be16 used_pdes;
229 be16 max_pdes;
230 __u8 pad[52];
231 struct phys_disk_entry {
232 char guid[DDF_GUID_LEN];
233 be32 refnum;
234 be16 type;
235 be16 state;
236 be64 config_size; /* DDF structures must be after here */
237 char path[18]; /* another horrible structure really */
238 __u8 pad[6];
239 } entries[0];
240 };
241
242 /* phys_disk_entry.type is a bitmap - bigendian remember */
243 #define DDF_Forced_PD_GUID 1
244 #define DDF_Active_in_VD 2
245 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
246 #define DDF_Spare 8 /* overrides Global_spare */
247 #define DDF_Foreign 16
248 #define DDF_Legacy 32 /* no DDF on this device */
249
250 #define DDF_Interface_mask 0xf00
251 #define DDF_Interface_SCSI 0x100
252 #define DDF_Interface_SAS 0x200
253 #define DDF_Interface_SATA 0x300
254 #define DDF_Interface_FC 0x400
255
256 /* phys_disk_entry.state is a bigendian bitmap */
257 #define DDF_Online 1
258 #define DDF_Failed 2 /* overrides 1,4,8 */
259 #define DDF_Rebuilding 4
260 #define DDF_Transition 8
261 #define DDF_SMART 16
262 #define DDF_ReadErrors 32
263 #define DDF_Missing 64
264
265 /* The content of the virt_section global scope */
266 struct virtual_disk {
267 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
268 be32 crc;
269 be16 populated_vdes;
270 be16 max_vdes;
271 __u8 pad[52];
272 struct virtual_entry {
273 char guid[DDF_GUID_LEN];
274 be16 unit;
275 __u16 pad0; /* 0xffff */
276 be16 guid_crc;
277 be16 type;
278 __u8 state;
279 __u8 init_state;
280 __u8 pad1[14];
281 char name[16];
282 } entries[0];
283 };
284
285 /* virtual_entry.type is a bitmap - bigendian */
286 #define DDF_Shared 1
287 #define DDF_Enforce_Groups 2
288 #define DDF_Unicode 4
289 #define DDF_Owner_Valid 8
290
291 /* virtual_entry.state is a bigendian bitmap */
292 #define DDF_state_mask 0x7
293 #define DDF_state_optimal 0x0
294 #define DDF_state_degraded 0x1
295 #define DDF_state_deleted 0x2
296 #define DDF_state_missing 0x3
297 #define DDF_state_failed 0x4
298 #define DDF_state_part_optimal 0x5
299
300 #define DDF_state_morphing 0x8
301 #define DDF_state_inconsistent 0x10
302
303 /* virtual_entry.init_state is a bigendian bitmap */
304 #define DDF_initstate_mask 0x03
305 #define DDF_init_not 0x00
306 #define DDF_init_quick 0x01 /* initialisation is progress.
307 * i.e. 'state_inconsistent' */
308 #define DDF_init_full 0x02
309
310 #define DDF_access_mask 0xc0
311 #define DDF_access_rw 0x00
312 #define DDF_access_ro 0x80
313 #define DDF_access_blocked 0xc0
314
315 /* The content of the config_section - local scope
316 * It has multiple records each config_record_len sectors
317 * They can be vd_config or spare_assign
318 */
319
320 struct vd_config {
321 be32 magic; /* DDF_VD_CONF_MAGIC */
322 be32 crc;
323 char guid[DDF_GUID_LEN];
324 be32 timestamp;
325 be32 seqnum;
326 __u8 pad0[24];
327 be16 prim_elmnt_count;
328 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
329 __u8 prl;
330 __u8 rlq;
331 __u8 sec_elmnt_count;
332 __u8 sec_elmnt_seq;
333 __u8 srl;
334 be64 blocks; /* blocks per component could be different
335 * on different component devices...(only
336 * for concat I hope) */
337 be64 array_blocks; /* blocks in array */
338 __u8 pad1[8];
339 be32 spare_refs[8];
340 __u8 cache_pol[8];
341 __u8 bg_rate;
342 __u8 pad2[3];
343 __u8 pad3[52];
344 __u8 pad4[192];
345 __u8 v0[32]; /* reserved- 0xff */
346 __u8 v1[32]; /* reserved- 0xff */
347 __u8 v2[16]; /* reserved- 0xff */
348 __u8 v3[16]; /* reserved- 0xff */
349 __u8 vendor[32];
350 be32 phys_refnum[0]; /* refnum of each disk in sequence */
351 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
352 bvd are always the same size */
353 };
354 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
355
356 /* vd_config.cache_pol[7] is a bitmap */
357 #define DDF_cache_writeback 1 /* else writethrough */
358 #define DDF_cache_wadaptive 2 /* only applies if writeback */
359 #define DDF_cache_readahead 4
360 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
361 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
362 #define DDF_cache_wallowed 32 /* enable write caching */
363 #define DDF_cache_rallowed 64 /* enable read caching */
364
365 struct spare_assign {
366 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
367 be32 crc;
368 be32 timestamp;
369 __u8 reserved[7];
370 __u8 type;
371 be16 populated; /* SAEs used */
372 be16 max; /* max SAEs */
373 __u8 pad[8];
374 struct spare_assign_entry {
375 char guid[DDF_GUID_LEN];
376 be16 secondary_element;
377 __u8 pad[6];
378 } spare_ents[0];
379 };
380 /* spare_assign.type is a bitmap */
381 #define DDF_spare_dedicated 0x1 /* else global */
382 #define DDF_spare_revertible 0x2 /* else committable */
383 #define DDF_spare_active 0x4 /* else not active */
384 #define DDF_spare_affinity 0x8 /* enclosure affinity */
385
386 /* The data_section contents - local scope */
387 struct disk_data {
388 be32 magic; /* DDF_PHYS_DATA_MAGIC */
389 be32 crc;
390 char guid[DDF_GUID_LEN];
391 be32 refnum; /* crc of some magic drive data ... */
392 __u8 forced_ref; /* set when above was not result of magic */
393 __u8 forced_guid; /* set if guid was forced rather than magic */
394 __u8 vendor[32];
395 __u8 pad[442];
396 };
397
398 /* bbm_section content */
399 struct bad_block_log {
400 be32 magic;
401 be32 crc;
402 be16 entry_count;
403 be32 spare_count;
404 __u8 pad[10];
405 be64 first_spare;
406 struct mapped_block {
407 be64 defective_start;
408 be32 replacement_start;
409 be16 remap_count;
410 __u8 pad[2];
411 } entries[0];
412 };
413
414 /* Struct for internally holding ddf structures */
415 /* The DDF structure stored on each device is potentially
416 * quite different, as some data is global and some is local.
417 * The global data is:
418 * - ddf header
419 * - controller_data
420 * - Physical disk records
421 * - Virtual disk records
422 * The local data is:
423 * - Configuration records
424 * - Physical Disk data section
425 * ( and Bad block and vendor which I don't care about yet).
426 *
427 * The local data is parsed into separate lists as it is read
428 * and reconstructed for writing. This means that we only need
429 * to make config changes once and they are automatically
430 * propagated to all devices.
431 * Note that the ddf_super has space of the conf and disk data
432 * for this disk and also for a list of all such data.
433 * The list is only used for the superblock that is being
434 * built in Create or Assemble to describe the whole array.
435 */
436 struct ddf_super {
437 struct ddf_header anchor, primary, secondary;
438 struct ddf_controller_data controller;
439 struct ddf_header *active;
440 struct phys_disk *phys;
441 struct virtual_disk *virt;
442 char *conf;
443 int pdsize, vdsize;
444 unsigned int max_part, mppe, conf_rec_len;
445 int currentdev;
446 int updates_pending;
447 struct vcl {
448 union {
449 char space[512];
450 struct {
451 struct vcl *next;
452 unsigned int vcnum; /* index into ->virt */
453 struct vd_config **other_bvds;
454 __u64 *block_sizes; /* NULL if all the same */
455 };
456 };
457 struct vd_config conf;
458 } *conflist, *currentconf;
459 struct dl {
460 union {
461 char space[512];
462 struct {
463 struct dl *next;
464 int major, minor;
465 char *devname;
466 int fd;
467 unsigned long long size; /* sectors */
468 be64 primary_lba; /* sectors */
469 be64 secondary_lba; /* sectors */
470 be64 workspace_lba; /* sectors */
471 int pdnum; /* index in ->phys */
472 struct spare_assign *spare;
473 void *mdupdate; /* hold metadata update */
474
475 /* These fields used by auto-layout */
476 int raiddisk; /* slot to fill in autolayout */
477 __u64 esize;
478 };
479 };
480 struct disk_data disk;
481 struct vcl *vlist[0]; /* max_part in size */
482 } *dlist, *add_list;
483 };
484
485 #ifndef offsetof
486 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
487 #endif
488
489 #if DEBUG
490 static int all_ff(const char *guid);
491 static void pr_state(struct ddf_super *ddf, const char *msg)
492 {
493 unsigned int i;
494 dprintf("%s/%s: ", __func__, msg);
495 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
496 if (all_ff(ddf->virt->entries[i].guid))
497 continue;
498 dprintf("%u(s=%02x i=%02x) ", i,
499 ddf->virt->entries[i].state,
500 ddf->virt->entries[i].init_state);
501 }
502 dprintf("\n");
503 }
504 #else
505 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
506 #endif
507
508 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
509 {
510 ddf->updates_pending = 1;
511 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
512 pr_state(ddf, func);
513 }
514
515 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
516
517 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
518 be32 refnum, unsigned int nmax,
519 const struct vd_config **bvd,
520 unsigned int *idx);
521
522 static be32 calc_crc(void *buf, int len)
523 {
524 /* crcs are always at the same place as in the ddf_header */
525 struct ddf_header *ddf = buf;
526 be32 oldcrc = ddf->crc;
527 __u32 newcrc;
528 ddf->crc = cpu_to_be32(0xffffffff);
529
530 newcrc = crc32(0, buf, len);
531 ddf->crc = oldcrc;
532 /* The crc is store (like everything) bigendian, so convert
533 * here for simplicity
534 */
535 return cpu_to_be32(newcrc);
536 }
537
538 #define DDF_INVALID_LEVEL 0xff
539 #define DDF_NO_SECONDARY 0xff
540 static int err_bad_md_layout(const mdu_array_info_t *array)
541 {
542 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
543 array->level, array->layout, array->raid_disks);
544 return -1;
545 }
546
547 static int layout_md2ddf(const mdu_array_info_t *array,
548 struct vd_config *conf)
549 {
550 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
551 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
552 __u8 sec_elmnt_count = 1;
553 __u8 srl = DDF_NO_SECONDARY;
554
555 switch (array->level) {
556 case LEVEL_LINEAR:
557 prl = DDF_CONCAT;
558 break;
559 case 0:
560 rlq = DDF_RAID0_SIMPLE;
561 prl = DDF_RAID0;
562 break;
563 case 1:
564 switch (array->raid_disks) {
565 case 2:
566 rlq = DDF_RAID1_SIMPLE;
567 break;
568 case 3:
569 rlq = DDF_RAID1_MULTI;
570 break;
571 default:
572 return err_bad_md_layout(array);
573 }
574 prl = DDF_RAID1;
575 break;
576 case 4:
577 if (array->layout != 0)
578 return err_bad_md_layout(array);
579 rlq = DDF_RAID4_N;
580 prl = DDF_RAID4;
581 break;
582 case 5:
583 switch (array->layout) {
584 case ALGORITHM_LEFT_ASYMMETRIC:
585 rlq = DDF_RAID5_N_RESTART;
586 break;
587 case ALGORITHM_RIGHT_ASYMMETRIC:
588 rlq = DDF_RAID5_0_RESTART;
589 break;
590 case ALGORITHM_LEFT_SYMMETRIC:
591 rlq = DDF_RAID5_N_CONTINUE;
592 break;
593 case ALGORITHM_RIGHT_SYMMETRIC:
594 /* not mentioned in standard */
595 default:
596 return err_bad_md_layout(array);
597 }
598 prl = DDF_RAID5;
599 break;
600 case 6:
601 switch (array->layout) {
602 case ALGORITHM_ROTATING_N_RESTART:
603 rlq = DDF_RAID5_N_RESTART;
604 break;
605 case ALGORITHM_ROTATING_ZERO_RESTART:
606 rlq = DDF_RAID6_0_RESTART;
607 break;
608 case ALGORITHM_ROTATING_N_CONTINUE:
609 rlq = DDF_RAID5_N_CONTINUE;
610 break;
611 default:
612 return err_bad_md_layout(array);
613 }
614 prl = DDF_RAID6;
615 break;
616 case 10:
617 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
618 rlq = DDF_RAID1_SIMPLE;
619 prim_elmnt_count = cpu_to_be16(2);
620 sec_elmnt_count = array->raid_disks / 2;
621 } else if (array->raid_disks % 3 == 0
622 && array->layout == 0x103) {
623 rlq = DDF_RAID1_MULTI;
624 prim_elmnt_count = cpu_to_be16(3);
625 sec_elmnt_count = array->raid_disks / 3;
626 } else
627 return err_bad_md_layout(array);
628 srl = DDF_2SPANNED;
629 prl = DDF_RAID1;
630 break;
631 default:
632 return err_bad_md_layout(array);
633 }
634 conf->prl = prl;
635 conf->prim_elmnt_count = prim_elmnt_count;
636 conf->rlq = rlq;
637 conf->srl = srl;
638 conf->sec_elmnt_count = sec_elmnt_count;
639 return 0;
640 }
641
642 static int err_bad_ddf_layout(const struct vd_config *conf)
643 {
644 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
645 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
646 return -1;
647 }
648
649 static int layout_ddf2md(const struct vd_config *conf,
650 mdu_array_info_t *array)
651 {
652 int level = LEVEL_UNSUPPORTED;
653 int layout = 0;
654 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
655
656 if (conf->sec_elmnt_count > 1) {
657 /* see also check_secondary() */
658 if (conf->prl != DDF_RAID1 ||
659 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
660 pr_err("Unsupported secondary RAID level %u/%u\n",
661 conf->prl, conf->srl);
662 return -1;
663 }
664 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
665 layout = 0x102;
666 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
667 layout = 0x103;
668 else
669 return err_bad_ddf_layout(conf);
670 raiddisks *= conf->sec_elmnt_count;
671 level = 10;
672 goto good;
673 }
674
675 switch (conf->prl) {
676 case DDF_CONCAT:
677 level = LEVEL_LINEAR;
678 break;
679 case DDF_RAID0:
680 if (conf->rlq != DDF_RAID0_SIMPLE)
681 return err_bad_ddf_layout(conf);
682 level = 0;
683 break;
684 case DDF_RAID1:
685 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
686 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
687 return err_bad_ddf_layout(conf);
688 level = 1;
689 break;
690 case DDF_RAID4:
691 if (conf->rlq != DDF_RAID4_N)
692 return err_bad_ddf_layout(conf);
693 level = 4;
694 break;
695 case DDF_RAID5:
696 switch (conf->rlq) {
697 case DDF_RAID5_N_RESTART:
698 layout = ALGORITHM_LEFT_ASYMMETRIC;
699 break;
700 case DDF_RAID5_0_RESTART:
701 layout = ALGORITHM_RIGHT_ASYMMETRIC;
702 break;
703 case DDF_RAID5_N_CONTINUE:
704 layout = ALGORITHM_LEFT_SYMMETRIC;
705 break;
706 default:
707 return err_bad_ddf_layout(conf);
708 }
709 level = 5;
710 break;
711 case DDF_RAID6:
712 switch (conf->rlq) {
713 case DDF_RAID5_N_RESTART:
714 layout = ALGORITHM_ROTATING_N_RESTART;
715 break;
716 case DDF_RAID6_0_RESTART:
717 layout = ALGORITHM_ROTATING_ZERO_RESTART;
718 break;
719 case DDF_RAID5_N_CONTINUE:
720 layout = ALGORITHM_ROTATING_N_CONTINUE;
721 break;
722 default:
723 return err_bad_ddf_layout(conf);
724 }
725 level = 6;
726 break;
727 default:
728 return err_bad_ddf_layout(conf);
729 };
730
731 good:
732 array->level = level;
733 array->layout = layout;
734 array->raid_disks = raiddisks;
735 return 0;
736 }
737
738 static int load_ddf_header(int fd, unsigned long long lba,
739 unsigned long long size,
740 int type,
741 struct ddf_header *hdr, struct ddf_header *anchor)
742 {
743 /* read a ddf header (primary or secondary) from fd/lba
744 * and check that it is consistent with anchor
745 * Need to check:
746 * magic, crc, guid, rev, and LBA's header_type, and
747 * everything after header_type must be the same
748 */
749 if (lba >= size-1)
750 return 0;
751
752 if (lseek64(fd, lba<<9, 0) < 0)
753 return 0;
754
755 if (read(fd, hdr, 512) != 512)
756 return 0;
757
758 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
759 pr_err("%s: bad header magic\n", __func__);
760 return 0;
761 }
762 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
763 pr_err("%s: bad CRC\n", __func__);
764 return 0;
765 }
766 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
767 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
768 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
769 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
770 hdr->type != type ||
771 memcmp(anchor->pad2, hdr->pad2, 512 -
772 offsetof(struct ddf_header, pad2)) != 0) {
773 pr_err("%s: header mismatch\n", __func__);
774 return 0;
775 }
776
777 /* Looks good enough to me... */
778 return 1;
779 }
780
781 static void *load_section(int fd, struct ddf_super *super, void *buf,
782 be32 offset_be, be32 len_be, int check)
783 {
784 unsigned long long offset = be32_to_cpu(offset_be);
785 unsigned long long len = be32_to_cpu(len_be);
786 int dofree = (buf == NULL);
787
788 if (check)
789 if (len != 2 && len != 8 && len != 32
790 && len != 128 && len != 512)
791 return NULL;
792
793 if (len > 1024)
794 return NULL;
795 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
796 buf = NULL;
797
798 if (!buf)
799 return NULL;
800
801 if (super->active->type == 1)
802 offset += be64_to_cpu(super->active->primary_lba);
803 else
804 offset += be64_to_cpu(super->active->secondary_lba);
805
806 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
807 if (dofree)
808 free(buf);
809 return NULL;
810 }
811 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
812 if (dofree)
813 free(buf);
814 return NULL;
815 }
816 return buf;
817 }
818
819 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
820 {
821 unsigned long long dsize;
822
823 get_dev_size(fd, NULL, &dsize);
824
825 if (lseek64(fd, dsize-512, 0) < 0) {
826 if (devname)
827 pr_err("Cannot seek to anchor block on %s: %s\n",
828 devname, strerror(errno));
829 return 1;
830 }
831 if (read(fd, &super->anchor, 512) != 512) {
832 if (devname)
833 pr_err("Cannot read anchor block on %s: %s\n",
834 devname, strerror(errno));
835 return 1;
836 }
837 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
838 if (devname)
839 pr_err("no DDF anchor found on %s\n",
840 devname);
841 return 2;
842 }
843 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
844 if (devname)
845 pr_err("bad CRC on anchor on %s\n",
846 devname);
847 return 2;
848 }
849 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
850 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
851 if (devname)
852 pr_err("can only support super revision"
853 " %.8s and earlier, not %.8s on %s\n",
854 DDF_REVISION_2, super->anchor.revision,devname);
855 return 2;
856 }
857 super->active = NULL;
858 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
859 dsize >> 9, 1,
860 &super->primary, &super->anchor) == 0) {
861 if (devname)
862 pr_err("Failed to load primary DDF header "
863 "on %s\n", devname);
864 } else
865 super->active = &super->primary;
866
867 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
868 dsize >> 9, 2,
869 &super->secondary, &super->anchor)) {
870 if (super->active == NULL
871 || (be32_to_cpu(super->primary.seq)
872 < be32_to_cpu(super->secondary.seq) &&
873 !super->secondary.openflag)
874 || (be32_to_cpu(super->primary.seq)
875 == be32_to_cpu(super->secondary.seq) &&
876 super->primary.openflag && !super->secondary.openflag)
877 )
878 super->active = &super->secondary;
879 } else if (devname)
880 pr_err("Failed to load secondary DDF header on %s\n",
881 devname);
882 if (super->active == NULL)
883 return 2;
884 return 0;
885 }
886
887 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
888 {
889 void *ok;
890 ok = load_section(fd, super, &super->controller,
891 super->active->controller_section_offset,
892 super->active->controller_section_length,
893 0);
894 super->phys = load_section(fd, super, NULL,
895 super->active->phys_section_offset,
896 super->active->phys_section_length,
897 1);
898 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
899
900 super->virt = load_section(fd, super, NULL,
901 super->active->virt_section_offset,
902 super->active->virt_section_length,
903 1);
904 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
905 if (!ok ||
906 !super->phys ||
907 !super->virt) {
908 free(super->phys);
909 free(super->virt);
910 super->phys = NULL;
911 super->virt = NULL;
912 return 2;
913 }
914 super->conflist = NULL;
915 super->dlist = NULL;
916
917 super->max_part = be16_to_cpu(super->active->max_partitions);
918 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
919 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
920 return 0;
921 }
922
923 #define DDF_UNUSED_BVD 0xff
924 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
925 {
926 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
927 unsigned int i, vdsize;
928 void *p;
929 if (n_vds == 0) {
930 vcl->other_bvds = NULL;
931 return 0;
932 }
933 vdsize = ddf->conf_rec_len * 512;
934 if (posix_memalign(&p, 512, n_vds *
935 (vdsize + sizeof(struct vd_config *))) != 0)
936 return -1;
937 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
938 for (i = 0; i < n_vds; i++) {
939 vcl->other_bvds[i] = p + i * vdsize;
940 memset(vcl->other_bvds[i], 0, vdsize);
941 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
942 }
943 return 0;
944 }
945
946 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
947 unsigned int len)
948 {
949 int i;
950 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
951 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
952 break;
953
954 if (i < vcl->conf.sec_elmnt_count-1) {
955 if (be32_to_cpu(vd->seqnum) <=
956 be32_to_cpu(vcl->other_bvds[i]->seqnum))
957 return;
958 } else {
959 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
960 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
961 break;
962 if (i == vcl->conf.sec_elmnt_count-1) {
963 pr_err("no space for sec level config %u, count is %u\n",
964 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
965 return;
966 }
967 }
968 memcpy(vcl->other_bvds[i], vd, len);
969 }
970
971 static int load_ddf_local(int fd, struct ddf_super *super,
972 char *devname, int keep)
973 {
974 struct dl *dl;
975 struct stat stb;
976 char *conf;
977 unsigned int i;
978 unsigned int confsec;
979 int vnum;
980 unsigned int max_virt_disks = be16_to_cpu
981 (super->active->max_vd_entries);
982 unsigned long long dsize;
983
984 /* First the local disk info */
985 if (posix_memalign((void**)&dl, 512,
986 sizeof(*dl) +
987 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
988 pr_err("%s could not allocate disk info buffer\n",
989 __func__);
990 return 1;
991 }
992
993 load_section(fd, super, &dl->disk,
994 super->active->data_section_offset,
995 super->active->data_section_length,
996 0);
997 dl->devname = devname ? xstrdup(devname) : NULL;
998
999 fstat(fd, &stb);
1000 dl->major = major(stb.st_rdev);
1001 dl->minor = minor(stb.st_rdev);
1002 dl->next = super->dlist;
1003 dl->fd = keep ? fd : -1;
1004
1005 dl->size = 0;
1006 if (get_dev_size(fd, devname, &dsize))
1007 dl->size = dsize >> 9;
1008 /* If the disks have different sizes, the LBAs will differ
1009 * between phys disks.
1010 * At this point here, the values in super->active must be valid
1011 * for this phys disk. */
1012 dl->primary_lba = super->active->primary_lba;
1013 dl->secondary_lba = super->active->secondary_lba;
1014 dl->workspace_lba = super->active->workspace_lba;
1015 dl->spare = NULL;
1016 for (i = 0 ; i < super->max_part ; i++)
1017 dl->vlist[i] = NULL;
1018 super->dlist = dl;
1019 dl->pdnum = -1;
1020 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1021 if (memcmp(super->phys->entries[i].guid,
1022 dl->disk.guid, DDF_GUID_LEN) == 0)
1023 dl->pdnum = i;
1024
1025 /* Now the config list. */
1026 /* 'conf' is an array of config entries, some of which are
1027 * probably invalid. Those which are good need to be copied into
1028 * the conflist
1029 */
1030
1031 conf = load_section(fd, super, super->conf,
1032 super->active->config_section_offset,
1033 super->active->config_section_length,
1034 0);
1035 super->conf = conf;
1036 vnum = 0;
1037 for (confsec = 0;
1038 confsec < be32_to_cpu(super->active->config_section_length);
1039 confsec += super->conf_rec_len) {
1040 struct vd_config *vd =
1041 (struct vd_config *)((char*)conf + confsec*512);
1042 struct vcl *vcl;
1043
1044 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1045 if (dl->spare)
1046 continue;
1047 if (posix_memalign((void**)&dl->spare, 512,
1048 super->conf_rec_len*512) != 0) {
1049 pr_err("%s could not allocate spare info buf\n",
1050 __func__);
1051 return 1;
1052 }
1053
1054 memcpy(dl->spare, vd, super->conf_rec_len*512);
1055 continue;
1056 }
1057 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1058 continue;
1059 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1060 if (memcmp(vcl->conf.guid,
1061 vd->guid, DDF_GUID_LEN) == 0)
1062 break;
1063 }
1064
1065 if (vcl) {
1066 dl->vlist[vnum++] = vcl;
1067 if (vcl->other_bvds != NULL &&
1068 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1069 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1070 continue;
1071 }
1072 if (be32_to_cpu(vd->seqnum) <=
1073 be32_to_cpu(vcl->conf.seqnum))
1074 continue;
1075 } else {
1076 if (posix_memalign((void**)&vcl, 512,
1077 (super->conf_rec_len*512 +
1078 offsetof(struct vcl, conf))) != 0) {
1079 pr_err("%s could not allocate vcl buf\n",
1080 __func__);
1081 return 1;
1082 }
1083 vcl->next = super->conflist;
1084 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1085 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1086 if (alloc_other_bvds(super, vcl) != 0) {
1087 pr_err("%s could not allocate other bvds\n",
1088 __func__);
1089 free(vcl);
1090 return 1;
1091 };
1092 super->conflist = vcl;
1093 dl->vlist[vnum++] = vcl;
1094 }
1095 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1096 for (i=0; i < max_virt_disks ; i++)
1097 if (memcmp(super->virt->entries[i].guid,
1098 vcl->conf.guid, DDF_GUID_LEN)==0)
1099 break;
1100 if (i < max_virt_disks)
1101 vcl->vcnum = i;
1102 }
1103
1104 return 0;
1105 }
1106
1107 #ifndef MDASSEMBLE
1108 static int load_super_ddf_all(struct supertype *st, int fd,
1109 void **sbp, char *devname);
1110 #endif
1111
1112 static void free_super_ddf(struct supertype *st);
1113
1114 static int load_super_ddf(struct supertype *st, int fd,
1115 char *devname)
1116 {
1117 unsigned long long dsize;
1118 struct ddf_super *super;
1119 int rv;
1120
1121 if (get_dev_size(fd, devname, &dsize) == 0)
1122 return 1;
1123
1124 if (!st->ignore_hw_compat && test_partition(fd))
1125 /* DDF is not allowed on partitions */
1126 return 1;
1127
1128 /* 32M is a lower bound */
1129 if (dsize <= 32*1024*1024) {
1130 if (devname)
1131 pr_err("%s is too small for ddf: "
1132 "size is %llu sectors.\n",
1133 devname, dsize>>9);
1134 return 1;
1135 }
1136 if (dsize & 511) {
1137 if (devname)
1138 pr_err("%s is an odd size for ddf: "
1139 "size is %llu bytes.\n",
1140 devname, dsize);
1141 return 1;
1142 }
1143
1144 free_super_ddf(st);
1145
1146 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1147 pr_err("malloc of %zu failed.\n",
1148 sizeof(*super));
1149 return 1;
1150 }
1151 memset(super, 0, sizeof(*super));
1152
1153 rv = load_ddf_headers(fd, super, devname);
1154 if (rv) {
1155 free(super);
1156 return rv;
1157 }
1158
1159 /* Have valid headers and have chosen the best. Let's read in the rest*/
1160
1161 rv = load_ddf_global(fd, super, devname);
1162
1163 if (rv) {
1164 if (devname)
1165 pr_err("Failed to load all information "
1166 "sections on %s\n", devname);
1167 free(super);
1168 return rv;
1169 }
1170
1171 rv = load_ddf_local(fd, super, devname, 0);
1172
1173 if (rv) {
1174 if (devname)
1175 pr_err("Failed to load all information "
1176 "sections on %s\n", devname);
1177 free(super);
1178 return rv;
1179 }
1180
1181 /* Should possibly check the sections .... */
1182
1183 st->sb = super;
1184 if (st->ss == NULL) {
1185 st->ss = &super_ddf;
1186 st->minor_version = 0;
1187 st->max_devs = 512;
1188 }
1189 return 0;
1190
1191 }
1192
1193 static void free_super_ddf(struct supertype *st)
1194 {
1195 struct ddf_super *ddf = st->sb;
1196 if (ddf == NULL)
1197 return;
1198 free(ddf->phys);
1199 free(ddf->virt);
1200 free(ddf->conf);
1201 while (ddf->conflist) {
1202 struct vcl *v = ddf->conflist;
1203 ddf->conflist = v->next;
1204 if (v->block_sizes)
1205 free(v->block_sizes);
1206 if (v->other_bvds)
1207 /*
1208 v->other_bvds[0] points to beginning of buffer,
1209 see alloc_other_bvds()
1210 */
1211 free(v->other_bvds[0]);
1212 free(v);
1213 }
1214 while (ddf->dlist) {
1215 struct dl *d = ddf->dlist;
1216 ddf->dlist = d->next;
1217 if (d->fd >= 0)
1218 close(d->fd);
1219 if (d->spare)
1220 free(d->spare);
1221 free(d);
1222 }
1223 while (ddf->add_list) {
1224 struct dl *d = ddf->add_list;
1225 ddf->add_list = d->next;
1226 if (d->fd >= 0)
1227 close(d->fd);
1228 if (d->spare)
1229 free(d->spare);
1230 free(d);
1231 }
1232 free(ddf);
1233 st->sb = NULL;
1234 }
1235
1236 static struct supertype *match_metadata_desc_ddf(char *arg)
1237 {
1238 /* 'ddf' only support containers */
1239 struct supertype *st;
1240 if (strcmp(arg, "ddf") != 0 &&
1241 strcmp(arg, "default") != 0
1242 )
1243 return NULL;
1244
1245 st = xcalloc(1, sizeof(*st));
1246 st->ss = &super_ddf;
1247 st->max_devs = 512;
1248 st->minor_version = 0;
1249 st->sb = NULL;
1250 return st;
1251 }
1252
1253 #ifndef MDASSEMBLE
1254
1255 static mapping_t ddf_state[] = {
1256 { "Optimal", 0},
1257 { "Degraded", 1},
1258 { "Deleted", 2},
1259 { "Missing", 3},
1260 { "Failed", 4},
1261 { "Partially Optimal", 5},
1262 { "-reserved-", 6},
1263 { "-reserved-", 7},
1264 { NULL, 0}
1265 };
1266
1267 static mapping_t ddf_init_state[] = {
1268 { "Not Initialised", 0},
1269 { "QuickInit in Progress", 1},
1270 { "Fully Initialised", 2},
1271 { "*UNKNOWN*", 3},
1272 { NULL, 0}
1273 };
1274 static mapping_t ddf_access[] = {
1275 { "Read/Write", 0},
1276 { "Reserved", 1},
1277 { "Read Only", 2},
1278 { "Blocked (no access)", 3},
1279 { NULL ,0}
1280 };
1281
1282 static mapping_t ddf_level[] = {
1283 { "RAID0", DDF_RAID0},
1284 { "RAID1", DDF_RAID1},
1285 { "RAID3", DDF_RAID3},
1286 { "RAID4", DDF_RAID4},
1287 { "RAID5", DDF_RAID5},
1288 { "RAID1E",DDF_RAID1E},
1289 { "JBOD", DDF_JBOD},
1290 { "CONCAT",DDF_CONCAT},
1291 { "RAID5E",DDF_RAID5E},
1292 { "RAID5EE",DDF_RAID5EE},
1293 { "RAID6", DDF_RAID6},
1294 { NULL, 0}
1295 };
1296 static mapping_t ddf_sec_level[] = {
1297 { "Striped", DDF_2STRIPED},
1298 { "Mirrored", DDF_2MIRRORED},
1299 { "Concat", DDF_2CONCAT},
1300 { "Spanned", DDF_2SPANNED},
1301 { NULL, 0}
1302 };
1303 #endif
1304
1305 static int all_ff(const char *guid)
1306 {
1307 int i;
1308 for (i = 0; i < DDF_GUID_LEN; i++)
1309 if (guid[i] != (char)0xff)
1310 return 0;
1311 return 1;
1312 }
1313
1314 static const char *guid_str(const char *guid)
1315 {
1316 static char buf[DDF_GUID_LEN*2+1];
1317 int i;
1318 char *p = buf;
1319 for (i = 0; i < DDF_GUID_LEN; i++) {
1320 unsigned char c = guid[i];
1321 if (c >= 32 && c < 127)
1322 p += sprintf(p, "%c", c);
1323 else
1324 p += sprintf(p, "%02x", c);
1325 }
1326 *p = '\0';
1327 return (const char *) buf;
1328 }
1329
1330 #ifndef MDASSEMBLE
1331 static void print_guid(char *guid, int tstamp)
1332 {
1333 /* A GUIDs are part (or all) ASCII and part binary.
1334 * They tend to be space padded.
1335 * We print the GUID in HEX, then in parentheses add
1336 * any initial ASCII sequence, and a possible
1337 * time stamp from bytes 16-19
1338 */
1339 int l = DDF_GUID_LEN;
1340 int i;
1341
1342 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1343 if ((i&3)==0 && i != 0) printf(":");
1344 printf("%02X", guid[i]&255);
1345 }
1346
1347 printf("\n (");
1348 while (l && guid[l-1] == ' ')
1349 l--;
1350 for (i=0 ; i<l ; i++) {
1351 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1352 fputc(guid[i], stdout);
1353 else
1354 break;
1355 }
1356 if (tstamp) {
1357 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1358 char tbuf[100];
1359 struct tm *tm;
1360 tm = localtime(&then);
1361 strftime(tbuf, 100, " %D %T",tm);
1362 fputs(tbuf, stdout);
1363 }
1364 printf(")");
1365 }
1366
1367 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1368 {
1369 int crl = sb->conf_rec_len;
1370 struct vcl *vcl;
1371
1372 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1373 unsigned int i;
1374 struct vd_config *vc = &vcl->conf;
1375
1376 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1377 continue;
1378 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1379 continue;
1380
1381 /* Ok, we know about this VD, let's give more details */
1382 printf(" Raid Devices[%d] : %d (", n,
1383 be16_to_cpu(vc->prim_elmnt_count));
1384 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1385 int j;
1386 int cnt = be16_to_cpu(sb->phys->used_pdes);
1387 for (j=0; j<cnt; j++)
1388 if (be32_eq(vc->phys_refnum[i],
1389 sb->phys->entries[j].refnum))
1390 break;
1391 if (i) printf(" ");
1392 if (j < cnt)
1393 printf("%d", j);
1394 else
1395 printf("--");
1396 }
1397 printf(")\n");
1398 if (vc->chunk_shift != 255)
1399 printf(" Chunk Size[%d] : %d sectors\n", n,
1400 1 << vc->chunk_shift);
1401 printf(" Raid Level[%d] : %s\n", n,
1402 map_num(ddf_level, vc->prl)?:"-unknown-");
1403 if (vc->sec_elmnt_count != 1) {
1404 printf(" Secondary Position[%d] : %d of %d\n", n,
1405 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1406 printf(" Secondary Level[%d] : %s\n", n,
1407 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1408 }
1409 printf(" Device Size[%d] : %llu\n", n,
1410 be64_to_cpu(vc->blocks)/2);
1411 printf(" Array Size[%d] : %llu\n", n,
1412 be64_to_cpu(vc->array_blocks)/2);
1413 }
1414 }
1415
1416 static void examine_vds(struct ddf_super *sb)
1417 {
1418 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1419 unsigned int i;
1420 printf(" Virtual Disks : %d\n", cnt);
1421
1422 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1423 struct virtual_entry *ve = &sb->virt->entries[i];
1424 if (all_ff(ve->guid))
1425 continue;
1426 printf("\n");
1427 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1428 printf("\n");
1429 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1430 printf(" state[%d] : %s, %s%s\n", i,
1431 map_num(ddf_state, ve->state & 7),
1432 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1433 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1434 printf(" init state[%d] : %s\n", i,
1435 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1436 printf(" access[%d] : %s\n", i,
1437 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1438 printf(" Name[%d] : %.16s\n", i, ve->name);
1439 examine_vd(i, sb, ve->guid);
1440 }
1441 if (cnt) printf("\n");
1442 }
1443
1444 static void examine_pds(struct ddf_super *sb)
1445 {
1446 int cnt = be16_to_cpu(sb->phys->used_pdes);
1447 int i;
1448 struct dl *dl;
1449 printf(" Physical Disks : %d\n", cnt);
1450 printf(" Number RefNo Size Device Type/State\n");
1451
1452 for (i=0 ; i<cnt ; i++) {
1453 struct phys_disk_entry *pd = &sb->phys->entries[i];
1454 int type = be16_to_cpu(pd->type);
1455 int state = be16_to_cpu(pd->state);
1456
1457 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1458 //printf("\n");
1459 printf(" %3d %08x ", i,
1460 be32_to_cpu(pd->refnum));
1461 printf("%8lluK ",
1462 be64_to_cpu(pd->config_size)>>1);
1463 for (dl = sb->dlist; dl ; dl = dl->next) {
1464 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1465 char *dv = map_dev(dl->major, dl->minor, 0);
1466 if (dv) {
1467 printf("%-15s", dv);
1468 break;
1469 }
1470 }
1471 }
1472 if (!dl)
1473 printf("%15s","");
1474 printf(" %s%s%s%s%s",
1475 (type&2) ? "active":"",
1476 (type&4) ? "Global-Spare":"",
1477 (type&8) ? "spare" : "",
1478 (type&16)? ", foreign" : "",
1479 (type&32)? "pass-through" : "");
1480 if (state & DDF_Failed)
1481 /* This over-rides these three */
1482 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1483 printf("/%s%s%s%s%s%s%s",
1484 (state&1)? "Online": "Offline",
1485 (state&2)? ", Failed": "",
1486 (state&4)? ", Rebuilding": "",
1487 (state&8)? ", in-transition": "",
1488 (state&16)? ", SMART-errors": "",
1489 (state&32)? ", Unrecovered-Read-Errors": "",
1490 (state&64)? ", Missing" : "");
1491 printf("\n");
1492 }
1493 }
1494
1495 static void examine_super_ddf(struct supertype *st, char *homehost)
1496 {
1497 struct ddf_super *sb = st->sb;
1498
1499 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1500 printf(" Version : %.8s\n", sb->anchor.revision);
1501 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1502 printf("\n");
1503 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1504 printf("\n");
1505 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1506 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1507 DDF_HEADER_MAGIC)
1508 ?"yes" : "no");
1509 examine_vds(sb);
1510 examine_pds(sb);
1511 }
1512
1513 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1514
1515 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1516 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1517
1518 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1519 {
1520 /*
1521 * Figure out the VD number for this supertype.
1522 * Returns DDF_CONTAINER for the container itself,
1523 * and DDF_NOTFOUND on error.
1524 */
1525 struct ddf_super *ddf = st->sb;
1526 struct mdinfo *sra;
1527 char *sub, *end;
1528 unsigned int vcnum;
1529
1530 if (*st->container_devnm == '\0')
1531 return DDF_CONTAINER;
1532
1533 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1534 if (!sra || sra->array.major_version != -1 ||
1535 sra->array.minor_version != -2 ||
1536 !is_subarray(sra->text_version))
1537 return DDF_NOTFOUND;
1538
1539 sub = strchr(sra->text_version + 1, '/');
1540 if (sub != NULL)
1541 vcnum = strtoul(sub + 1, &end, 10);
1542 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1543 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1544 return DDF_NOTFOUND;
1545
1546 return vcnum;
1547 }
1548
1549 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1550 {
1551 /* We just write a generic DDF ARRAY entry
1552 */
1553 struct mdinfo info;
1554 char nbuf[64];
1555 getinfo_super_ddf(st, &info, NULL);
1556 fname_from_uuid(st, &info, nbuf, ':');
1557
1558 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1559 }
1560
1561 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1562 {
1563 /* We just write a generic DDF ARRAY entry
1564 */
1565 struct ddf_super *ddf = st->sb;
1566 struct mdinfo info;
1567 unsigned int i;
1568 char nbuf[64];
1569 getinfo_super_ddf(st, &info, NULL);
1570 fname_from_uuid(st, &info, nbuf, ':');
1571
1572 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1573 struct virtual_entry *ve = &ddf->virt->entries[i];
1574 struct vcl vcl;
1575 char nbuf1[64];
1576 if (all_ff(ve->guid))
1577 continue;
1578 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1579 ddf->currentconf =&vcl;
1580 uuid_from_super_ddf(st, info.uuid);
1581 fname_from_uuid(st, &info, nbuf1, ':');
1582 printf("ARRAY container=%s member=%d UUID=%s\n",
1583 nbuf+5, i, nbuf1+5);
1584 }
1585 }
1586
1587 static void export_examine_super_ddf(struct supertype *st)
1588 {
1589 struct mdinfo info;
1590 char nbuf[64];
1591 getinfo_super_ddf(st, &info, NULL);
1592 fname_from_uuid(st, &info, nbuf, ':');
1593 printf("MD_METADATA=ddf\n");
1594 printf("MD_LEVEL=container\n");
1595 printf("MD_UUID=%s\n", nbuf+5);
1596 }
1597
1598 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1599 {
1600 void *buf;
1601 unsigned long long dsize, offset;
1602 int bytes;
1603 struct ddf_header *ddf;
1604 int written = 0;
1605
1606 /* The meta consists of an anchor, a primary, and a secondary.
1607 * This all lives at the end of the device.
1608 * So it is easiest to find the earliest of primary and
1609 * secondary, and copy everything from there.
1610 *
1611 * Anchor is 512 from end It contains primary_lba and secondary_lba
1612 * we choose one of those
1613 */
1614
1615 if (posix_memalign(&buf, 4096, 4096) != 0)
1616 return 1;
1617
1618 if (!get_dev_size(from, NULL, &dsize))
1619 goto err;
1620
1621 if (lseek64(from, dsize-512, 0) < 0)
1622 goto err;
1623 if (read(from, buf, 512) != 512)
1624 goto err;
1625 ddf = buf;
1626 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1627 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1628 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1629 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1630 goto err;
1631
1632 offset = dsize - 512;
1633 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1634 offset = be64_to_cpu(ddf->primary_lba) << 9;
1635 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1636 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1637
1638 bytes = dsize - offset;
1639
1640 if (lseek64(from, offset, 0) < 0 ||
1641 lseek64(to, offset, 0) < 0)
1642 goto err;
1643 while (written < bytes) {
1644 int n = bytes - written;
1645 if (n > 4096)
1646 n = 4096;
1647 if (read(from, buf, n) != n)
1648 goto err;
1649 if (write(to, buf, n) != n)
1650 goto err;
1651 written += n;
1652 }
1653 free(buf);
1654 return 0;
1655 err:
1656 free(buf);
1657 return 1;
1658 }
1659
1660 static void detail_super_ddf(struct supertype *st, char *homehost)
1661 {
1662 /* FIXME later
1663 * Could print DDF GUID
1664 * Need to find which array
1665 * If whole, briefly list all arrays
1666 * If one, give name
1667 */
1668 }
1669
1670 static void brief_detail_super_ddf(struct supertype *st)
1671 {
1672 struct mdinfo info;
1673 char nbuf[64];
1674 struct ddf_super *ddf = st->sb;
1675 unsigned int vcnum = get_vd_num_of_subarray(st);
1676 if (vcnum == DDF_CONTAINER)
1677 uuid_from_super_ddf(st, info.uuid);
1678 else if (vcnum == DDF_NOTFOUND)
1679 return;
1680 else
1681 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1682 fname_from_uuid(st, &info, nbuf,':');
1683 printf(" UUID=%s", nbuf + 5);
1684 }
1685 #endif
1686
1687 static int match_home_ddf(struct supertype *st, char *homehost)
1688 {
1689 /* It matches 'this' host if the controller is a
1690 * Linux-MD controller with vendor_data matching
1691 * the hostname
1692 */
1693 struct ddf_super *ddf = st->sb;
1694 unsigned int len;
1695
1696 if (!homehost)
1697 return 0;
1698 len = strlen(homehost);
1699
1700 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1701 len < sizeof(ddf->controller.vendor_data) &&
1702 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1703 ddf->controller.vendor_data[len] == 0);
1704 }
1705
1706 #ifndef MDASSEMBLE
1707 static int find_index_in_bvd(const struct ddf_super *ddf,
1708 const struct vd_config *conf, unsigned int n,
1709 unsigned int *n_bvd)
1710 {
1711 /*
1712 * Find the index of the n-th valid physical disk in this BVD
1713 */
1714 unsigned int i, j;
1715 for (i = 0, j = 0; i < ddf->mppe &&
1716 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1717 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1718 if (n == j) {
1719 *n_bvd = i;
1720 return 1;
1721 }
1722 j++;
1723 }
1724 }
1725 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1726 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1727 return 0;
1728 }
1729
1730 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1731 unsigned int n,
1732 unsigned int *n_bvd, struct vcl **vcl)
1733 {
1734 struct vcl *v;
1735
1736 for (v = ddf->conflist; v; v = v->next) {
1737 unsigned int nsec, ibvd = 0;
1738 struct vd_config *conf;
1739 if (inst != v->vcnum)
1740 continue;
1741 conf = &v->conf;
1742 if (conf->sec_elmnt_count == 1) {
1743 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1744 *vcl = v;
1745 return conf;
1746 } else
1747 goto bad;
1748 }
1749 if (v->other_bvds == NULL) {
1750 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1751 __func__, conf->sec_elmnt_count);
1752 goto bad;
1753 }
1754 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1755 if (conf->sec_elmnt_seq != nsec) {
1756 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1757 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1758 == nsec)
1759 break;
1760 }
1761 if (ibvd == conf->sec_elmnt_count)
1762 goto bad;
1763 conf = v->other_bvds[ibvd-1];
1764 }
1765 if (!find_index_in_bvd(ddf, conf,
1766 n - nsec*conf->sec_elmnt_count, n_bvd))
1767 goto bad;
1768 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1769 , __func__, n, *n_bvd, ibvd, inst);
1770 *vcl = v;
1771 return conf;
1772 }
1773 bad:
1774 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1775 return NULL;
1776 }
1777 #endif
1778
1779 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1780 {
1781 /* Find the entry in phys_disk which has the given refnum
1782 * and return it's index
1783 */
1784 unsigned int i;
1785 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1786 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1787 return i;
1788 return -1;
1789 }
1790
1791 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1792 {
1793 char buf[20];
1794 struct sha1_ctx ctx;
1795 sha1_init_ctx(&ctx);
1796 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1797 sha1_finish_ctx(&ctx, buf);
1798 memcpy(uuid, buf, 4*4);
1799 }
1800
1801 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1802 {
1803 /* The uuid returned here is used for:
1804 * uuid to put into bitmap file (Create, Grow)
1805 * uuid for backup header when saving critical section (Grow)
1806 * comparing uuids when re-adding a device into an array
1807 * In these cases the uuid required is that of the data-array,
1808 * not the device-set.
1809 * uuid to recognise same set when adding a missing device back
1810 * to an array. This is a uuid for the device-set.
1811 *
1812 * For each of these we can make do with a truncated
1813 * or hashed uuid rather than the original, as long as
1814 * everyone agrees.
1815 * In the case of SVD we assume the BVD is of interest,
1816 * though that might be the case if a bitmap were made for
1817 * a mirrored SVD - worry about that later.
1818 * So we need to find the VD configuration record for the
1819 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1820 * The first 16 bytes of the sha1 of these is used.
1821 */
1822 struct ddf_super *ddf = st->sb;
1823 struct vcl *vcl = ddf->currentconf;
1824 char *guid;
1825
1826 if (vcl)
1827 guid = vcl->conf.guid;
1828 else
1829 guid = ddf->anchor.guid;
1830 uuid_from_ddf_guid(guid, uuid);
1831 }
1832
1833 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1834
1835 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1836 {
1837 struct ddf_super *ddf = st->sb;
1838 int map_disks = info->array.raid_disks;
1839 __u32 *cptr;
1840
1841 if (ddf->currentconf) {
1842 getinfo_super_ddf_bvd(st, info, map);
1843 return;
1844 }
1845 memset(info, 0, sizeof(*info));
1846
1847 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1848 info->array.level = LEVEL_CONTAINER;
1849 info->array.layout = 0;
1850 info->array.md_minor = -1;
1851 cptr = (__u32 *)(ddf->anchor.guid + 16);
1852 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1853
1854 info->array.utime = 0;
1855 info->array.chunk_size = 0;
1856 info->container_enough = 1;
1857
1858 info->disk.major = 0;
1859 info->disk.minor = 0;
1860 if (ddf->dlist) {
1861 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1862 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1863
1864 info->data_offset = be64_to_cpu(ddf->phys->
1865 entries[info->disk.raid_disk].
1866 config_size);
1867 info->component_size = ddf->dlist->size - info->data_offset;
1868 } else {
1869 info->disk.number = -1;
1870 info->disk.raid_disk = -1;
1871 // info->disk.raid_disk = find refnum in the table and use index;
1872 }
1873 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1874
1875 info->recovery_start = MaxSector;
1876 info->reshape_active = 0;
1877 info->recovery_blocked = 0;
1878 info->name[0] = 0;
1879
1880 info->array.major_version = -1;
1881 info->array.minor_version = -2;
1882 strcpy(info->text_version, "ddf");
1883 info->safe_mode_delay = 0;
1884
1885 uuid_from_super_ddf(st, info->uuid);
1886
1887 if (map) {
1888 int i;
1889 for (i = 0 ; i < map_disks; i++) {
1890 if (i < info->array.raid_disks &&
1891 (be16_to_cpu(ddf->phys->entries[i].state)
1892 & DDF_Online) &&
1893 !(be16_to_cpu(ddf->phys->entries[i].state)
1894 & DDF_Failed))
1895 map[i] = 1;
1896 else
1897 map[i] = 0;
1898 }
1899 }
1900 }
1901
1902 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1903 {
1904 struct ddf_super *ddf = st->sb;
1905 struct vcl *vc = ddf->currentconf;
1906 int cd = ddf->currentdev;
1907 int n_prim;
1908 int j;
1909 struct dl *dl;
1910 int map_disks = info->array.raid_disks;
1911 __u32 *cptr;
1912 struct vd_config *conf;
1913
1914 memset(info, 0, sizeof(*info));
1915 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1916 return;
1917 info->array.md_minor = -1;
1918 cptr = (__u32 *)(vc->conf.guid + 16);
1919 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1920 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1921 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1922 info->custom_array_size = 0;
1923
1924 conf = &vc->conf;
1925 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1926 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1927 int ibvd = cd / n_prim - 1;
1928 cd %= n_prim;
1929 conf = vc->other_bvds[ibvd];
1930 }
1931
1932 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1933 info->data_offset =
1934 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
1935 if (vc->block_sizes)
1936 info->component_size = vc->block_sizes[cd];
1937 else
1938 info->component_size = be64_to_cpu(conf->blocks);
1939 }
1940
1941 for (dl = ddf->dlist; dl ; dl = dl->next)
1942 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
1943 break;
1944
1945 info->disk.major = 0;
1946 info->disk.minor = 0;
1947 info->disk.state = 0;
1948 if (dl) {
1949 info->disk.major = dl->major;
1950 info->disk.minor = dl->minor;
1951 info->disk.raid_disk = cd + conf->sec_elmnt_seq
1952 * be16_to_cpu(conf->prim_elmnt_count);
1953 info->disk.number = dl->pdnum;
1954 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1955 }
1956
1957 info->container_member = ddf->currentconf->vcnum;
1958
1959 info->recovery_start = MaxSector;
1960 info->resync_start = 0;
1961 info->reshape_active = 0;
1962 info->recovery_blocked = 0;
1963 if (!(ddf->virt->entries[info->container_member].state
1964 & DDF_state_inconsistent) &&
1965 (ddf->virt->entries[info->container_member].init_state
1966 & DDF_initstate_mask)
1967 == DDF_init_full)
1968 info->resync_start = MaxSector;
1969
1970 uuid_from_super_ddf(st, info->uuid);
1971
1972 info->array.major_version = -1;
1973 info->array.minor_version = -2;
1974 sprintf(info->text_version, "/%s/%d",
1975 st->container_devnm,
1976 info->container_member);
1977 info->safe_mode_delay = 200;
1978
1979 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1980 info->name[16]=0;
1981 for(j=0; j<16; j++)
1982 if (info->name[j] == ' ')
1983 info->name[j] = 0;
1984
1985 if (map)
1986 for (j = 0; j < map_disks; j++) {
1987 map[j] = 0;
1988 if (j < info->array.raid_disks) {
1989 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1990 if (i >= 0 &&
1991 (be16_to_cpu(ddf->phys->entries[i].state)
1992 & DDF_Online) &&
1993 !(be16_to_cpu(ddf->phys->entries[i].state)
1994 & DDF_Failed))
1995 map[i] = 1;
1996 }
1997 }
1998 }
1999
2000 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2001 char *update,
2002 char *devname, int verbose,
2003 int uuid_set, char *homehost)
2004 {
2005 /* For 'assemble' and 'force' we need to return non-zero if any
2006 * change was made. For others, the return value is ignored.
2007 * Update options are:
2008 * force-one : This device looks a bit old but needs to be included,
2009 * update age info appropriately.
2010 * assemble: clear any 'faulty' flag to allow this device to
2011 * be assembled.
2012 * force-array: Array is degraded but being forced, mark it clean
2013 * if that will be needed to assemble it.
2014 *
2015 * newdev: not used ????
2016 * grow: Array has gained a new device - this is currently for
2017 * linear only
2018 * resync: mark as dirty so a resync will happen.
2019 * uuid: Change the uuid of the array to match what is given
2020 * homehost: update the recorded homehost
2021 * name: update the name - preserving the homehost
2022 * _reshape_progress: record new reshape_progress position.
2023 *
2024 * Following are not relevant for this version:
2025 * sparc2.2 : update from old dodgey metadata
2026 * super-minor: change the preferred_minor number
2027 * summaries: update redundant counters.
2028 */
2029 int rv = 0;
2030 // struct ddf_super *ddf = st->sb;
2031 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2032 // struct virtual_entry *ve = find_ve(ddf);
2033
2034 /* we don't need to handle "force-*" or "assemble" as
2035 * there is no need to 'trick' the kernel. We the metadata is
2036 * first updated to activate the array, all the implied modifications
2037 * will just happen.
2038 */
2039
2040 if (strcmp(update, "grow") == 0) {
2041 /* FIXME */
2042 } else if (strcmp(update, "resync") == 0) {
2043 // info->resync_checkpoint = 0;
2044 } else if (strcmp(update, "homehost") == 0) {
2045 /* homehost is stored in controller->vendor_data,
2046 * or it is when we are the vendor
2047 */
2048 // if (info->vendor_is_local)
2049 // strcpy(ddf->controller.vendor_data, homehost);
2050 rv = -1;
2051 } else if (strcmp(update, "name") == 0) {
2052 /* name is stored in virtual_entry->name */
2053 // memset(ve->name, ' ', 16);
2054 // strncpy(ve->name, info->name, 16);
2055 rv = -1;
2056 } else if (strcmp(update, "_reshape_progress") == 0) {
2057 /* We don't support reshape yet */
2058 } else if (strcmp(update, "assemble") == 0 ) {
2059 /* Do nothing, just succeed */
2060 rv = 0;
2061 } else
2062 rv = -1;
2063
2064 // update_all_csum(ddf);
2065
2066 return rv;
2067 }
2068
2069 static void make_header_guid(char *guid)
2070 {
2071 be32 stamp;
2072 /* Create a DDF Header of Virtual Disk GUID */
2073
2074 /* 24 bytes of fiction required.
2075 * first 8 are a 'vendor-id' - "Linux-MD"
2076 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2077 * Remaining 8 random number plus timestamp
2078 */
2079 memcpy(guid, T10, sizeof(T10));
2080 stamp = cpu_to_be32(0xdeadbeef);
2081 memcpy(guid+8, &stamp, 4);
2082 stamp = cpu_to_be32(0);
2083 memcpy(guid+12, &stamp, 4);
2084 stamp = cpu_to_be32(time(0) - DECADE);
2085 memcpy(guid+16, &stamp, 4);
2086 stamp._v32 = random32();
2087 memcpy(guid+20, &stamp, 4);
2088 }
2089
2090 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2091 {
2092 unsigned int i;
2093 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2094 if (all_ff(ddf->virt->entries[i].guid))
2095 return i;
2096 }
2097 return DDF_NOTFOUND;
2098 }
2099
2100 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2101 const char *name)
2102 {
2103 unsigned int i;
2104 if (name == NULL)
2105 return DDF_NOTFOUND;
2106 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2107 if (all_ff(ddf->virt->entries[i].guid))
2108 continue;
2109 if (!strncmp(name, ddf->virt->entries[i].name,
2110 sizeof(ddf->virt->entries[i].name)))
2111 return i;
2112 }
2113 return DDF_NOTFOUND;
2114 }
2115
2116 #ifndef MDASSEMBLE
2117 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2118 const char *guid)
2119 {
2120 unsigned int i;
2121 if (guid == NULL || all_ff(guid))
2122 return DDF_NOTFOUND;
2123 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2124 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2125 return i;
2126 return DDF_NOTFOUND;
2127 }
2128 #endif
2129
2130 static int init_super_ddf_bvd(struct supertype *st,
2131 mdu_array_info_t *info,
2132 unsigned long long size,
2133 char *name, char *homehost,
2134 int *uuid, unsigned long long data_offset);
2135
2136 static int init_super_ddf(struct supertype *st,
2137 mdu_array_info_t *info,
2138 unsigned long long size, char *name, char *homehost,
2139 int *uuid, unsigned long long data_offset)
2140 {
2141 /* This is primarily called by Create when creating a new array.
2142 * We will then get add_to_super called for each component, and then
2143 * write_init_super called to write it out to each device.
2144 * For DDF, Create can create on fresh devices or on a pre-existing
2145 * array.
2146 * To create on a pre-existing array a different method will be called.
2147 * This one is just for fresh drives.
2148 *
2149 * We need to create the entire 'ddf' structure which includes:
2150 * DDF headers - these are easy.
2151 * Controller data - a Sector describing this controller .. not that
2152 * this is a controller exactly.
2153 * Physical Disk Record - one entry per device, so
2154 * leave plenty of space.
2155 * Virtual Disk Records - again, just leave plenty of space.
2156 * This just lists VDs, doesn't give details
2157 * Config records - describes the VDs that use this disk
2158 * DiskData - describes 'this' device.
2159 * BadBlockManagement - empty
2160 * Diag Space - empty
2161 * Vendor Logs - Could we put bitmaps here?
2162 *
2163 */
2164 struct ddf_super *ddf;
2165 char hostname[17];
2166 int hostlen;
2167 int max_phys_disks, max_virt_disks;
2168 unsigned long long sector;
2169 int clen;
2170 int i;
2171 int pdsize, vdsize;
2172 struct phys_disk *pd;
2173 struct virtual_disk *vd;
2174
2175 if (data_offset != INVALID_SECTORS) {
2176 pr_err("data-offset not supported by DDF\n");
2177 return 0;
2178 }
2179
2180 if (st->sb)
2181 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2182 data_offset);
2183
2184 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2185 pr_err("%s could not allocate superblock\n", __func__);
2186 return 0;
2187 }
2188 memset(ddf, 0, sizeof(*ddf));
2189 ddf->dlist = NULL; /* no physical disks yet */
2190 ddf->conflist = NULL; /* No virtual disks yet */
2191 st->sb = ddf;
2192
2193 if (info == NULL) {
2194 /* zeroing superblock */
2195 return 0;
2196 }
2197
2198 /* At least 32MB *must* be reserved for the ddf. So let's just
2199 * start 32MB from the end, and put the primary header there.
2200 * Don't do secondary for now.
2201 * We don't know exactly where that will be yet as it could be
2202 * different on each device. To just set up the lengths.
2203 *
2204 */
2205
2206 ddf->anchor.magic = DDF_HEADER_MAGIC;
2207 make_header_guid(ddf->anchor.guid);
2208
2209 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2210 ddf->anchor.seq = cpu_to_be32(1);
2211 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2212 ddf->anchor.openflag = 0xFF;
2213 ddf->anchor.foreignflag = 0;
2214 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2215 ddf->anchor.pad0 = 0xff;
2216 memset(ddf->anchor.pad1, 0xff, 12);
2217 memset(ddf->anchor.header_ext, 0xff, 32);
2218 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2219 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2220 ddf->anchor.type = DDF_HEADER_ANCHOR;
2221 memset(ddf->anchor.pad2, 0xff, 3);
2222 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2223 /* Put this at bottom of 32M reserved.. */
2224 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2225 max_phys_disks = 1023; /* Should be enough */
2226 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2227 max_virt_disks = 255;
2228 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2229 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2230 ddf->max_part = 64;
2231 ddf->mppe = 256;
2232 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2233 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2234 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2235 memset(ddf->anchor.pad3, 0xff, 54);
2236 /* controller sections is one sector long immediately
2237 * after the ddf header */
2238 sector = 1;
2239 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2240 ddf->anchor.controller_section_length = cpu_to_be32(1);
2241 sector += 1;
2242
2243 /* phys is 8 sectors after that */
2244 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2245 sizeof(struct phys_disk_entry)*max_phys_disks,
2246 512);
2247 switch(pdsize/512) {
2248 case 2: case 8: case 32: case 128: case 512: break;
2249 default: abort();
2250 }
2251 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2252 ddf->anchor.phys_section_length =
2253 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2254 sector += pdsize/512;
2255
2256 /* virt is another 32 sectors */
2257 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2258 sizeof(struct virtual_entry) * max_virt_disks,
2259 512);
2260 switch(vdsize/512) {
2261 case 2: case 8: case 32: case 128: case 512: break;
2262 default: abort();
2263 }
2264 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2265 ddf->anchor.virt_section_length =
2266 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2267 sector += vdsize/512;
2268
2269 clen = ddf->conf_rec_len * (ddf->max_part+1);
2270 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2271 ddf->anchor.config_section_length = cpu_to_be32(clen);
2272 sector += clen;
2273
2274 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2275 ddf->anchor.data_section_length = cpu_to_be32(1);
2276 sector += 1;
2277
2278 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2279 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2280 ddf->anchor.diag_space_length = cpu_to_be32(0);
2281 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2282 ddf->anchor.vendor_length = cpu_to_be32(0);
2283 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2284
2285 memset(ddf->anchor.pad4, 0xff, 256);
2286
2287 memcpy(&ddf->primary, &ddf->anchor, 512);
2288 memcpy(&ddf->secondary, &ddf->anchor, 512);
2289
2290 ddf->primary.openflag = 1; /* I guess.. */
2291 ddf->primary.type = DDF_HEADER_PRIMARY;
2292
2293 ddf->secondary.openflag = 1; /* I guess.. */
2294 ddf->secondary.type = DDF_HEADER_SECONDARY;
2295
2296 ddf->active = &ddf->primary;
2297
2298 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2299
2300 /* 24 more bytes of fiction required.
2301 * first 8 are a 'vendor-id' - "Linux-MD"
2302 * Remaining 16 are serial number.... maybe a hostname would do?
2303 */
2304 memcpy(ddf->controller.guid, T10, sizeof(T10));
2305 gethostname(hostname, sizeof(hostname));
2306 hostname[sizeof(hostname) - 1] = 0;
2307 hostlen = strlen(hostname);
2308 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2309 for (i = strlen(T10) ; i+hostlen < 24; i++)
2310 ddf->controller.guid[i] = ' ';
2311
2312 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2313 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2314 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2315 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2316 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2317 memset(ddf->controller.pad, 0xff, 8);
2318 memset(ddf->controller.vendor_data, 0xff, 448);
2319 if (homehost && strlen(homehost) < 440)
2320 strcpy((char*)ddf->controller.vendor_data, homehost);
2321
2322 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2323 pr_err("%s could not allocate pd\n", __func__);
2324 return 0;
2325 }
2326 ddf->phys = pd;
2327 ddf->pdsize = pdsize;
2328
2329 memset(pd, 0xff, pdsize);
2330 memset(pd, 0, sizeof(*pd));
2331 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2332 pd->used_pdes = cpu_to_be16(0);
2333 pd->max_pdes = cpu_to_be16(max_phys_disks);
2334 memset(pd->pad, 0xff, 52);
2335 for (i = 0; i < max_phys_disks; i++)
2336 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2337
2338 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2339 pr_err("%s could not allocate vd\n", __func__);
2340 return 0;
2341 }
2342 ddf->virt = vd;
2343 ddf->vdsize = vdsize;
2344 memset(vd, 0, vdsize);
2345 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2346 vd->populated_vdes = cpu_to_be16(0);
2347 vd->max_vdes = cpu_to_be16(max_virt_disks);
2348 memset(vd->pad, 0xff, 52);
2349
2350 for (i=0; i<max_virt_disks; i++)
2351 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2352
2353 st->sb = ddf;
2354 ddf_set_updates_pending(ddf);
2355 return 1;
2356 }
2357
2358 static int chunk_to_shift(int chunksize)
2359 {
2360 return ffs(chunksize/512)-1;
2361 }
2362
2363 #ifndef MDASSEMBLE
2364 struct extent {
2365 unsigned long long start, size;
2366 };
2367 static int cmp_extent(const void *av, const void *bv)
2368 {
2369 const struct extent *a = av;
2370 const struct extent *b = bv;
2371 if (a->start < b->start)
2372 return -1;
2373 if (a->start > b->start)
2374 return 1;
2375 return 0;
2376 }
2377
2378 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2379 {
2380 /* find a list of used extents on the give physical device
2381 * (dnum) of the given ddf.
2382 * Return a malloced array of 'struct extent'
2383
2384 * FIXME ignore DDF_Legacy devices?
2385
2386 */
2387 struct extent *rv;
2388 int n = 0;
2389 unsigned int i;
2390 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2391
2392 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2393 return NULL;
2394
2395 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2396
2397 for (i = 0; i < ddf->max_part; i++) {
2398 const struct vd_config *bvd;
2399 unsigned int ibvd;
2400 struct vcl *v = dl->vlist[i];
2401 if (v == NULL ||
2402 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2403 &bvd, &ibvd) == DDF_NOTFOUND)
2404 continue;
2405 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2406 rv[n].size = be64_to_cpu(bvd->blocks);
2407 n++;
2408 }
2409 qsort(rv, n, sizeof(*rv), cmp_extent);
2410
2411 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2412 rv[n].size = 0;
2413 return rv;
2414 }
2415 #endif
2416
2417 static int init_super_ddf_bvd(struct supertype *st,
2418 mdu_array_info_t *info,
2419 unsigned long long size,
2420 char *name, char *homehost,
2421 int *uuid, unsigned long long data_offset)
2422 {
2423 /* We are creating a BVD inside a pre-existing container.
2424 * so st->sb is already set.
2425 * We need to create a new vd_config and a new virtual_entry
2426 */
2427 struct ddf_super *ddf = st->sb;
2428 unsigned int venum, i;
2429 struct virtual_entry *ve;
2430 struct vcl *vcl;
2431 struct vd_config *vc;
2432
2433 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2434 pr_err("This ddf already has an array called %s\n", name);
2435 return 0;
2436 }
2437 venum = find_unused_vde(ddf);
2438 if (venum == DDF_NOTFOUND) {
2439 pr_err("Cannot find spare slot for virtual disk\n");
2440 return 0;
2441 }
2442 ve = &ddf->virt->entries[venum];
2443
2444 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2445 * timestamp, random number
2446 */
2447 make_header_guid(ve->guid);
2448 ve->unit = cpu_to_be16(info->md_minor);
2449 ve->pad0 = 0xFFFF;
2450 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2451 DDF_GUID_LEN);
2452 ve->type = cpu_to_be16(0);
2453 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2454 if (info->state & 1) /* clean */
2455 ve->init_state = DDF_init_full;
2456 else
2457 ve->init_state = DDF_init_not;
2458
2459 memset(ve->pad1, 0xff, 14);
2460 memset(ve->name, ' ', 16);
2461 if (name)
2462 strncpy(ve->name, name, 16);
2463 ddf->virt->populated_vdes =
2464 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2465
2466 /* Now create a new vd_config */
2467 if (posix_memalign((void**)&vcl, 512,
2468 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2469 pr_err("%s could not allocate vd_config\n", __func__);
2470 return 0;
2471 }
2472 vcl->vcnum = venum;
2473 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2474 vc = &vcl->conf;
2475
2476 vc->magic = DDF_VD_CONF_MAGIC;
2477 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2478 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2479 vc->seqnum = cpu_to_be32(1);
2480 memset(vc->pad0, 0xff, 24);
2481 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2482 if (layout_md2ddf(info, vc) == -1 ||
2483 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2484 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2485 __func__, info->level, info->layout, info->raid_disks);
2486 free(vcl);
2487 return 0;
2488 }
2489 vc->sec_elmnt_seq = 0;
2490 if (alloc_other_bvds(ddf, vcl) != 0) {
2491 pr_err("%s could not allocate other bvds\n",
2492 __func__);
2493 free(vcl);
2494 return 0;
2495 }
2496 vc->blocks = cpu_to_be64(info->size * 2);
2497 vc->array_blocks = cpu_to_be64(
2498 calc_array_size(info->level, info->raid_disks, info->layout,
2499 info->chunk_size, info->size*2));
2500 memset(vc->pad1, 0xff, 8);
2501 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2502 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2503 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2504 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2505 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2506 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2507 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2508 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2509 memset(vc->cache_pol, 0, 8);
2510 vc->bg_rate = 0x80;
2511 memset(vc->pad2, 0xff, 3);
2512 memset(vc->pad3, 0xff, 52);
2513 memset(vc->pad4, 0xff, 192);
2514 memset(vc->v0, 0xff, 32);
2515 memset(vc->v1, 0xff, 32);
2516 memset(vc->v2, 0xff, 16);
2517 memset(vc->v3, 0xff, 16);
2518 memset(vc->vendor, 0xff, 32);
2519
2520 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2521 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2522
2523 for (i = 1; i < vc->sec_elmnt_count; i++) {
2524 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2525 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2526 }
2527
2528 vcl->next = ddf->conflist;
2529 ddf->conflist = vcl;
2530 ddf->currentconf = vcl;
2531 ddf_set_updates_pending(ddf);
2532 return 1;
2533 }
2534
2535
2536 #ifndef MDASSEMBLE
2537 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2538
2539 static void add_to_super_ddf_bvd(struct supertype *st,
2540 mdu_disk_info_t *dk, int fd, char *devname)
2541 {
2542 /* fd and devname identify a device with-in the ddf container (st).
2543 * dk identifies a location in the new BVD.
2544 * We need to find suitable free space in that device and update
2545 * the phys_refnum and lba_offset for the newly created vd_config.
2546 * We might also want to update the type in the phys_disk
2547 * section.
2548 *
2549 * Alternately: fd == -1 and we have already chosen which device to
2550 * use and recorded in dlist->raid_disk;
2551 */
2552 struct dl *dl;
2553 struct ddf_super *ddf = st->sb;
2554 struct vd_config *vc;
2555 unsigned int i;
2556 unsigned long long blocks, pos, esize;
2557 struct extent *ex;
2558 unsigned int raid_disk = dk->raid_disk;
2559
2560 if (fd == -1) {
2561 for (dl = ddf->dlist; dl ; dl = dl->next)
2562 if (dl->raiddisk == dk->raid_disk)
2563 break;
2564 } else {
2565 for (dl = ddf->dlist; dl ; dl = dl->next)
2566 if (dl->major == dk->major &&
2567 dl->minor == dk->minor)
2568 break;
2569 }
2570 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2571 return;
2572
2573 vc = &ddf->currentconf->conf;
2574 if (vc->sec_elmnt_count > 1) {
2575 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2576 if (raid_disk >= n)
2577 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2578 raid_disk %= n;
2579 }
2580
2581 ex = get_extents(ddf, dl);
2582 if (!ex)
2583 return;
2584
2585 i = 0; pos = 0;
2586 blocks = be64_to_cpu(vc->blocks);
2587 if (ddf->currentconf->block_sizes)
2588 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2589
2590 do {
2591 esize = ex[i].start - pos;
2592 if (esize >= blocks)
2593 break;
2594 pos = ex[i].start + ex[i].size;
2595 i++;
2596 } while (ex[i-1].size);
2597
2598 free(ex);
2599 if (esize < blocks)
2600 return;
2601
2602 ddf->currentdev = dk->raid_disk;
2603 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2604 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2605
2606 for (i = 0; i < ddf->max_part ; i++)
2607 if (dl->vlist[i] == NULL)
2608 break;
2609 if (i == ddf->max_part)
2610 return;
2611 dl->vlist[i] = ddf->currentconf;
2612
2613 if (fd >= 0)
2614 dl->fd = fd;
2615 if (devname)
2616 dl->devname = devname;
2617
2618 /* Check if we can mark array as optimal yet */
2619 i = ddf->currentconf->vcnum;
2620 ddf->virt->entries[i].state =
2621 (ddf->virt->entries[i].state & ~DDF_state_mask)
2622 | get_svd_state(ddf, ddf->currentconf);
2623 be16_clear(ddf->phys->entries[dl->pdnum].type,
2624 cpu_to_be16(DDF_Global_Spare));
2625 be16_set(ddf->phys->entries[dl->pdnum].type,
2626 cpu_to_be16(DDF_Active_in_VD));
2627 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2628 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2629 ddf->currentconf->vcnum, guid_str(vc->guid),
2630 dk->raid_disk);
2631 ddf_set_updates_pending(ddf);
2632 }
2633
2634 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2635 {
2636 unsigned int i;
2637 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2638 if (all_ff(ddf->phys->entries[i].guid))
2639 return i;
2640 }
2641 return DDF_NOTFOUND;
2642 }
2643
2644 /* add a device to a container, either while creating it or while
2645 * expanding a pre-existing container
2646 */
2647 static int add_to_super_ddf(struct supertype *st,
2648 mdu_disk_info_t *dk, int fd, char *devname,
2649 unsigned long long data_offset)
2650 {
2651 struct ddf_super *ddf = st->sb;
2652 struct dl *dd;
2653 time_t now;
2654 struct tm *tm;
2655 unsigned long long size;
2656 struct phys_disk_entry *pde;
2657 unsigned int n, i;
2658 struct stat stb;
2659 __u32 *tptr;
2660
2661 if (ddf->currentconf) {
2662 add_to_super_ddf_bvd(st, dk, fd, devname);
2663 return 0;
2664 }
2665
2666 /* This is device numbered dk->number. We need to create
2667 * a phys_disk entry and a more detailed disk_data entry.
2668 */
2669 fstat(fd, &stb);
2670 n = find_unused_pde(ddf);
2671 if (n == DDF_NOTFOUND) {
2672 pr_err("%s: No free slot in array, cannot add disk\n",
2673 __func__);
2674 return 1;
2675 }
2676 pde = &ddf->phys->entries[n];
2677 get_dev_size(fd, NULL, &size);
2678 if (size <= 32*1024*1024) {
2679 pr_err("%s: device size must be at least 32MB\n",
2680 __func__);
2681 return 1;
2682 }
2683 size >>= 9;
2684
2685 if (posix_memalign((void**)&dd, 512,
2686 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2687 pr_err("%s could allocate buffer for new disk, aborting\n",
2688 __func__);
2689 return 1;
2690 }
2691 dd->major = major(stb.st_rdev);
2692 dd->minor = minor(stb.st_rdev);
2693 dd->devname = devname;
2694 dd->fd = fd;
2695 dd->spare = NULL;
2696
2697 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2698 now = time(0);
2699 tm = localtime(&now);
2700 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2701 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2702 tptr = (__u32 *)(dd->disk.guid + 16);
2703 *tptr++ = random32();
2704 *tptr = random32();
2705
2706 do {
2707 /* Cannot be bothered finding a CRC of some irrelevant details*/
2708 dd->disk.refnum._v32 = random32();
2709 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2710 i > 0; i--)
2711 if (be32_eq(ddf->phys->entries[i-1].refnum,
2712 dd->disk.refnum))
2713 break;
2714 } while (i > 0);
2715
2716 dd->disk.forced_ref = 1;
2717 dd->disk.forced_guid = 1;
2718 memset(dd->disk.vendor, ' ', 32);
2719 memcpy(dd->disk.vendor, "Linux", 5);
2720 memset(dd->disk.pad, 0xff, 442);
2721 for (i = 0; i < ddf->max_part ; i++)
2722 dd->vlist[i] = NULL;
2723
2724 dd->pdnum = n;
2725
2726 if (st->update_tail) {
2727 int len = (sizeof(struct phys_disk) +
2728 sizeof(struct phys_disk_entry));
2729 struct phys_disk *pd;
2730
2731 pd = xmalloc(len);
2732 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2733 pd->used_pdes = cpu_to_be16(n);
2734 pde = &pd->entries[0];
2735 dd->mdupdate = pd;
2736 } else
2737 ddf->phys->used_pdes = cpu_to_be16(
2738 1 + be16_to_cpu(ddf->phys->used_pdes));
2739
2740 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2741 pde->refnum = dd->disk.refnum;
2742 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2743 pde->state = cpu_to_be16(DDF_Online);
2744 dd->size = size;
2745 /*
2746 * If there is already a device in dlist, try to reserve the same
2747 * amount of workspace. Otherwise, use 32MB.
2748 * We checked disk size above already.
2749 */
2750 #define __calc_lba(new, old, lba, mb) do { \
2751 unsigned long long dif; \
2752 if ((old) != NULL) \
2753 dif = (old)->size - be64_to_cpu((old)->lba); \
2754 else \
2755 dif = (new)->size; \
2756 if ((new)->size > dif) \
2757 (new)->lba = cpu_to_be64((new)->size - dif); \
2758 else \
2759 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2760 } while (0)
2761 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2762 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2763 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2764 pde->config_size = dd->workspace_lba;
2765
2766 sprintf(pde->path, "%17.17s","Information: nil") ;
2767 memset(pde->pad, 0xff, 6);
2768
2769 if (st->update_tail) {
2770 dd->next = ddf->add_list;
2771 ddf->add_list = dd;
2772 } else {
2773 dd->next = ddf->dlist;
2774 ddf->dlist = dd;
2775 ddf_set_updates_pending(ddf);
2776 }
2777
2778 return 0;
2779 }
2780
2781 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2782 {
2783 struct ddf_super *ddf = st->sb;
2784 struct dl *dl;
2785
2786 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2787 * disappeared from the container.
2788 * We need to arrange that it disappears from the metadata and
2789 * internal data structures too.
2790 * Most of the work is done by ddf_process_update which edits
2791 * the metadata and closes the file handle and attaches the memory
2792 * where free_updates will free it.
2793 */
2794 for (dl = ddf->dlist; dl ; dl = dl->next)
2795 if (dl->major == dk->major &&
2796 dl->minor == dk->minor)
2797 break;
2798 if (!dl)
2799 return -1;
2800
2801 if (st->update_tail) {
2802 int len = (sizeof(struct phys_disk) +
2803 sizeof(struct phys_disk_entry));
2804 struct phys_disk *pd;
2805
2806 pd = xmalloc(len);
2807 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2808 pd->used_pdes = cpu_to_be16(dl->pdnum);
2809 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2810 append_metadata_update(st, pd, len);
2811 }
2812 return 0;
2813 }
2814 #endif
2815
2816 /*
2817 * This is the write_init_super method for a ddf container. It is
2818 * called when creating a container or adding another device to a
2819 * container.
2820 */
2821 #define NULL_CONF_SZ 4096
2822
2823 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2824 {
2825 unsigned long long sector;
2826 struct ddf_header *header;
2827 int fd, i, n_config, conf_size, buf_size;
2828 int ret = 0;
2829 char *conf;
2830
2831 fd = d->fd;
2832
2833 switch (type) {
2834 case DDF_HEADER_PRIMARY:
2835 header = &ddf->primary;
2836 sector = be64_to_cpu(header->primary_lba);
2837 break;
2838 case DDF_HEADER_SECONDARY:
2839 header = &ddf->secondary;
2840 sector = be64_to_cpu(header->secondary_lba);
2841 break;
2842 default:
2843 return 0;
2844 }
2845
2846 header->type = type;
2847 header->openflag = 1;
2848 header->crc = calc_crc(header, 512);
2849
2850 lseek64(fd, sector<<9, 0);
2851 if (write(fd, header, 512) < 0)
2852 goto out;
2853
2854 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2855 if (write(fd, &ddf->controller, 512) < 0)
2856 goto out;
2857
2858 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2859 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2860 goto out;
2861 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2862 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2863 goto out;
2864
2865 /* Now write lots of config records. */
2866 n_config = ddf->max_part;
2867 conf_size = ddf->conf_rec_len * 512;
2868 conf = ddf->conf;
2869 buf_size = conf_size * (n_config + 1);
2870 if (!conf) {
2871 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2872 goto out;
2873 ddf->conf = conf;
2874 }
2875 for (i = 0 ; i <= n_config ; i++) {
2876 struct vcl *c;
2877 struct vd_config *vdc = NULL;
2878 if (i == n_config) {
2879 c = (struct vcl *)d->spare;
2880 if (c)
2881 vdc = &c->conf;
2882 } else {
2883 unsigned int dummy;
2884 c = d->vlist[i];
2885 if (c)
2886 get_pd_index_from_refnum(
2887 c, d->disk.refnum,
2888 ddf->mppe,
2889 (const struct vd_config **)&vdc,
2890 &dummy);
2891 }
2892 if (c) {
2893 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2894 i, be32_to_cpu(d->disk.refnum),
2895 guid_str(vdc->guid),
2896 vdc->sec_elmnt_seq);
2897 vdc->seqnum = header->seq;
2898 vdc->crc = calc_crc(vdc, conf_size);
2899 memcpy(conf + i*conf_size, vdc, conf_size);
2900 } else
2901 memset(conf + i*conf_size, 0xff, conf_size);
2902 }
2903 if (write(fd, conf, buf_size) != buf_size)
2904 goto out;
2905
2906 d->disk.crc = calc_crc(&d->disk, 512);
2907 if (write(fd, &d->disk, 512) < 0)
2908 goto out;
2909
2910 ret = 1;
2911 out:
2912 header->openflag = 0;
2913 header->crc = calc_crc(header, 512);
2914
2915 lseek64(fd, sector<<9, 0);
2916 if (write(fd, header, 512) < 0)
2917 ret = 0;
2918
2919 return ret;
2920 }
2921
2922 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
2923 {
2924 unsigned long long size;
2925 int fd = d->fd;
2926 if (fd < 0)
2927 return 0;
2928
2929 /* We need to fill in the primary, (secondary) and workspace
2930 * lba's in the headers, set their checksums,
2931 * Also checksum phys, virt....
2932 *
2933 * Then write everything out, finally the anchor is written.
2934 */
2935 get_dev_size(fd, NULL, &size);
2936 size /= 512;
2937 if (be64_to_cpu(d->workspace_lba) != 0ULL)
2938 ddf->anchor.workspace_lba = d->workspace_lba;
2939 else
2940 ddf->anchor.workspace_lba =
2941 cpu_to_be64(size - 32*1024*2);
2942 if (be64_to_cpu(d->primary_lba) != 0ULL)
2943 ddf->anchor.primary_lba = d->primary_lba;
2944 else
2945 ddf->anchor.primary_lba =
2946 cpu_to_be64(size - 16*1024*2);
2947 if (be64_to_cpu(d->secondary_lba) != 0ULL)
2948 ddf->anchor.secondary_lba = d->secondary_lba;
2949 else
2950 ddf->anchor.secondary_lba =
2951 cpu_to_be64(size - 32*1024*2);
2952 ddf->anchor.seq = ddf->active->seq;
2953 memcpy(&ddf->primary, &ddf->anchor, 512);
2954 memcpy(&ddf->secondary, &ddf->anchor, 512);
2955
2956 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2957 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
2958 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2959
2960 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
2961 return 0;
2962
2963 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
2964 return 0;
2965
2966 lseek64(fd, (size-1)*512, SEEK_SET);
2967 if (write(fd, &ddf->anchor, 512) < 0)
2968 return 0;
2969
2970 return 1;
2971 }
2972
2973 #ifndef MDASSEMBLE
2974 static int __write_init_super_ddf(struct supertype *st)
2975 {
2976 struct ddf_super *ddf = st->sb;
2977 struct dl *d;
2978 int attempts = 0;
2979 int successes = 0;
2980
2981 pr_state(ddf, __func__);
2982
2983 /* try to write updated metadata,
2984 * if we catch a failure move on to the next disk
2985 */
2986 for (d = ddf->dlist; d; d=d->next) {
2987 attempts++;
2988 successes += _write_super_to_disk(ddf, d);
2989 }
2990
2991 return attempts != successes;
2992 }
2993
2994 static int write_init_super_ddf(struct supertype *st)
2995 {
2996 struct ddf_super *ddf = st->sb;
2997 struct vcl *currentconf = ddf->currentconf;
2998
2999 /* we are done with currentconf reset it to point st at the container */
3000 ddf->currentconf = NULL;
3001
3002 if (st->update_tail) {
3003 /* queue the virtual_disk and vd_config as metadata updates */
3004 struct virtual_disk *vd;
3005 struct vd_config *vc;
3006 int len, tlen;
3007 unsigned int i;
3008
3009 if (!currentconf) {
3010 int len = (sizeof(struct phys_disk) +
3011 sizeof(struct phys_disk_entry));
3012
3013 /* adding a disk to the container. */
3014 if (!ddf->add_list)
3015 return 0;
3016
3017 append_metadata_update(st, ddf->add_list->mdupdate, len);
3018 ddf->add_list->mdupdate = NULL;
3019 return 0;
3020 }
3021
3022 /* Newly created VD */
3023
3024 /* First the virtual disk. We have a slightly fake header */
3025 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3026 vd = xmalloc(len);
3027 *vd = *ddf->virt;
3028 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3029 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3030 append_metadata_update(st, vd, len);
3031
3032 /* Then the vd_config */
3033 len = ddf->conf_rec_len * 512;
3034 tlen = len * currentconf->conf.sec_elmnt_count;
3035 vc = xmalloc(tlen);
3036 memcpy(vc, &currentconf->conf, len);
3037 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3038 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3039 len);
3040 append_metadata_update(st, vc, tlen);
3041
3042 /* FIXME I need to close the fds! */
3043 return 0;
3044 } else {
3045 struct dl *d;
3046 if (!currentconf)
3047 for (d = ddf->dlist; d; d=d->next)
3048 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3049 return __write_init_super_ddf(st);
3050 }
3051 }
3052
3053 #endif
3054
3055 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3056 unsigned long long data_offset)
3057 {
3058 /* We must reserve the last 32Meg */
3059 if (devsize <= 32*1024*2)
3060 return 0;
3061 return devsize - 32*1024*2;
3062 }
3063
3064 #ifndef MDASSEMBLE
3065
3066 static int reserve_space(struct supertype *st, int raiddisks,
3067 unsigned long long size, int chunk,
3068 unsigned long long *freesize)
3069 {
3070 /* Find 'raiddisks' spare extents at least 'size' big (but
3071 * only caring about multiples of 'chunk') and remember
3072 * them.
3073 * If the cannot be found, fail.
3074 */
3075 struct dl *dl;
3076 struct ddf_super *ddf = st->sb;
3077 int cnt = 0;
3078
3079 for (dl = ddf->dlist; dl ; dl=dl->next) {
3080 dl->raiddisk = -1;
3081 dl->esize = 0;
3082 }
3083 /* Now find largest extent on each device */
3084 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3085 struct extent *e = get_extents(ddf, dl);
3086 unsigned long long pos = 0;
3087 int i = 0;
3088 int found = 0;
3089 unsigned long long minsize = size;
3090
3091 if (size == 0)
3092 minsize = chunk;
3093
3094 if (!e)
3095 continue;
3096 do {
3097 unsigned long long esize;
3098 esize = e[i].start - pos;
3099 if (esize >= minsize) {
3100 found = 1;
3101 minsize = esize;
3102 }
3103 pos = e[i].start + e[i].size;
3104 i++;
3105 } while (e[i-1].size);
3106 if (found) {
3107 cnt++;
3108 dl->esize = minsize;
3109 }
3110 free(e);
3111 }
3112 if (cnt < raiddisks) {
3113 pr_err("not enough devices with space to create array.\n");
3114 return 0; /* No enough free spaces large enough */
3115 }
3116 if (size == 0) {
3117 /* choose the largest size of which there are at least 'raiddisk' */
3118 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3119 struct dl *dl2;
3120 if (dl->esize <= size)
3121 continue;
3122 /* This is bigger than 'size', see if there are enough */
3123 cnt = 0;
3124 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3125 if (dl2->esize >= dl->esize)
3126 cnt++;
3127 if (cnt >= raiddisks)
3128 size = dl->esize;
3129 }
3130 if (chunk) {
3131 size = size / chunk;
3132 size *= chunk;
3133 }
3134 *freesize = size;
3135 if (size < 32) {
3136 pr_err("not enough spare devices to create array.\n");
3137 return 0;
3138 }
3139 }
3140 /* We have a 'size' of which there are enough spaces.
3141 * We simply do a first-fit */
3142 cnt = 0;
3143 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3144 if (dl->esize < size)
3145 continue;
3146
3147 dl->raiddisk = cnt;
3148 cnt++;
3149 }
3150 return 1;
3151 }
3152
3153 static int
3154 validate_geometry_ddf_container(struct supertype *st,
3155 int level, int layout, int raiddisks,
3156 int chunk, unsigned long long size,
3157 unsigned long long data_offset,
3158 char *dev, unsigned long long *freesize,
3159 int verbose);
3160
3161 static int validate_geometry_ddf_bvd(struct supertype *st,
3162 int level, int layout, int raiddisks,
3163 int *chunk, unsigned long long size,
3164 unsigned long long data_offset,
3165 char *dev, unsigned long long *freesize,
3166 int verbose);
3167
3168 static int validate_geometry_ddf(struct supertype *st,
3169 int level, int layout, int raiddisks,
3170 int *chunk, unsigned long long size,
3171 unsigned long long data_offset,
3172 char *dev, unsigned long long *freesize,
3173 int verbose)
3174 {
3175 int fd;
3176 struct mdinfo *sra;
3177 int cfd;
3178
3179 /* ddf potentially supports lots of things, but it depends on
3180 * what devices are offered (and maybe kernel version?)
3181 * If given unused devices, we will make a container.
3182 * If given devices in a container, we will make a BVD.
3183 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3184 */
3185
3186 if (*chunk == UnSet)
3187 *chunk = DEFAULT_CHUNK;
3188
3189 if (level == -1000000) level = LEVEL_CONTAINER;
3190 if (level == LEVEL_CONTAINER) {
3191 /* Must be a fresh device to add to a container */
3192 return validate_geometry_ddf_container(st, level, layout,
3193 raiddisks, *chunk,
3194 size, data_offset, dev,
3195 freesize,
3196 verbose);
3197 }
3198
3199 if (!dev) {
3200 mdu_array_info_t array = {
3201 .level = level, .layout = layout,
3202 .raid_disks = raiddisks
3203 };
3204 struct vd_config conf;
3205 if (layout_md2ddf(&array, &conf) == -1) {
3206 if (verbose)
3207 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3208 level, layout, raiddisks);
3209 return 0;
3210 }
3211 /* Should check layout? etc */
3212
3213 if (st->sb && freesize) {
3214 /* --create was given a container to create in.
3215 * So we need to check that there are enough
3216 * free spaces and return the amount of space.
3217 * We may as well remember which drives were
3218 * chosen so that add_to_super/getinfo_super
3219 * can return them.
3220 */
3221 return reserve_space(st, raiddisks, size, *chunk, freesize);
3222 }
3223 return 1;
3224 }
3225
3226 if (st->sb) {
3227 /* A container has already been opened, so we are
3228 * creating in there. Maybe a BVD, maybe an SVD.
3229 * Should make a distinction one day.
3230 */
3231 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3232 chunk, size, data_offset, dev,
3233 freesize,
3234 verbose);
3235 }
3236 /* This is the first device for the array.
3237 * If it is a container, we read it in and do automagic allocations,
3238 * no other devices should be given.
3239 * Otherwise it must be a member device of a container, and we
3240 * do manual allocation.
3241 * Later we should check for a BVD and make an SVD.
3242 */
3243 fd = open(dev, O_RDONLY|O_EXCL, 0);
3244 if (fd >= 0) {
3245 sra = sysfs_read(fd, NULL, GET_VERSION);
3246 close(fd);
3247 if (sra && sra->array.major_version == -1 &&
3248 strcmp(sra->text_version, "ddf") == 0) {
3249
3250 /* load super */
3251 /* find space for 'n' devices. */
3252 /* remember the devices */
3253 /* Somehow return the fact that we have enough */
3254 }
3255
3256 if (verbose)
3257 pr_err("ddf: Cannot create this array "
3258 "on device %s - a container is required.\n",
3259 dev);
3260 return 0;
3261 }
3262 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3263 if (verbose)
3264 pr_err("ddf: Cannot open %s: %s\n",
3265 dev, strerror(errno));
3266 return 0;
3267 }
3268 /* Well, it is in use by someone, maybe a 'ddf' container. */
3269 cfd = open_container(fd);
3270 if (cfd < 0) {
3271 close(fd);
3272 if (verbose)
3273 pr_err("ddf: Cannot use %s: %s\n",
3274 dev, strerror(EBUSY));
3275 return 0;
3276 }
3277 sra = sysfs_read(cfd, NULL, GET_VERSION);
3278 close(fd);
3279 if (sra && sra->array.major_version == -1 &&
3280 strcmp(sra->text_version, "ddf") == 0) {
3281 /* This is a member of a ddf container. Load the container
3282 * and try to create a bvd
3283 */
3284 struct ddf_super *ddf;
3285 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3286 st->sb = ddf;
3287 strcpy(st->container_devnm, fd2devnm(cfd));
3288 close(cfd);
3289 return validate_geometry_ddf_bvd(st, level, layout,
3290 raiddisks, chunk, size,
3291 data_offset,
3292 dev, freesize,
3293 verbose);
3294 }
3295 close(cfd);
3296 } else /* device may belong to a different container */
3297 return 0;
3298
3299 return 1;
3300 }
3301
3302 static int
3303 validate_geometry_ddf_container(struct supertype *st,
3304 int level, int layout, int raiddisks,
3305 int chunk, unsigned long long size,
3306 unsigned long long data_offset,
3307 char *dev, unsigned long long *freesize,
3308 int verbose)
3309 {
3310 int fd;
3311 unsigned long long ldsize;
3312
3313 if (level != LEVEL_CONTAINER)
3314 return 0;
3315 if (!dev)
3316 return 1;
3317
3318 fd = open(dev, O_RDONLY|O_EXCL, 0);
3319 if (fd < 0) {
3320 if (verbose)
3321 pr_err("ddf: Cannot open %s: %s\n",
3322 dev, strerror(errno));
3323 return 0;
3324 }
3325 if (!get_dev_size(fd, dev, &ldsize)) {
3326 close(fd);
3327 return 0;
3328 }
3329 close(fd);
3330
3331 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3332 if (*freesize == 0)
3333 return 0;
3334
3335 return 1;
3336 }
3337
3338 static int validate_geometry_ddf_bvd(struct supertype *st,
3339 int level, int layout, int raiddisks,
3340 int *chunk, unsigned long long size,
3341 unsigned long long data_offset,
3342 char *dev, unsigned long long *freesize,
3343 int verbose)
3344 {
3345 struct stat stb;
3346 struct ddf_super *ddf = st->sb;
3347 struct dl *dl;
3348 unsigned long long pos = 0;
3349 unsigned long long maxsize;
3350 struct extent *e;
3351 int i;
3352 /* ddf/bvd supports lots of things, but not containers */
3353 if (level == LEVEL_CONTAINER) {
3354 if (verbose)
3355 pr_err("DDF cannot create a container within an container\n");
3356 return 0;
3357 }
3358 /* We must have the container info already read in. */
3359 if (!ddf)
3360 return 0;
3361
3362 if (!dev) {
3363 /* General test: make sure there is space for
3364 * 'raiddisks' device extents of size 'size'.
3365 */
3366 unsigned long long minsize = size;
3367 int dcnt = 0;
3368 if (minsize == 0)
3369 minsize = 8;
3370 for (dl = ddf->dlist; dl ; dl = dl->next)
3371 {
3372 int found = 0;
3373 pos = 0;
3374
3375 i = 0;
3376 e = get_extents(ddf, dl);
3377 if (!e) continue;
3378 do {
3379 unsigned long long esize;
3380 esize = e[i].start - pos;
3381 if (esize >= minsize)
3382 found = 1;
3383 pos = e[i].start + e[i].size;
3384 i++;
3385 } while (e[i-1].size);
3386 if (found)
3387 dcnt++;
3388 free(e);
3389 }
3390 if (dcnt < raiddisks) {
3391 if (verbose)
3392 pr_err("ddf: Not enough devices with "
3393 "space for this array (%d < %d)\n",
3394 dcnt, raiddisks);
3395 return 0;
3396 }
3397 return 1;
3398 }
3399 /* This device must be a member of the set */
3400 if (stat(dev, &stb) < 0)
3401 return 0;
3402 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3403 return 0;
3404 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3405 if (dl->major == (int)major(stb.st_rdev) &&
3406 dl->minor == (int)minor(stb.st_rdev))
3407 break;
3408 }
3409 if (!dl) {
3410 if (verbose)
3411 pr_err("ddf: %s is not in the "
3412 "same DDF set\n",
3413 dev);
3414 return 0;
3415 }
3416 e = get_extents(ddf, dl);
3417 maxsize = 0;
3418 i = 0;
3419 if (e) do {
3420 unsigned long long esize;
3421 esize = e[i].start - pos;
3422 if (esize >= maxsize)
3423 maxsize = esize;
3424 pos = e[i].start + e[i].size;
3425 i++;
3426 } while (e[i-1].size);
3427 *freesize = maxsize;
3428 // FIXME here I am
3429
3430 return 1;
3431 }
3432
3433 static int load_super_ddf_all(struct supertype *st, int fd,
3434 void **sbp, char *devname)
3435 {
3436 struct mdinfo *sra;
3437 struct ddf_super *super;
3438 struct mdinfo *sd, *best = NULL;
3439 int bestseq = 0;
3440 int seq;
3441 char nm[20];
3442 int dfd;
3443
3444 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3445 if (!sra)
3446 return 1;
3447 if (sra->array.major_version != -1 ||
3448 sra->array.minor_version != -2 ||
3449 strcmp(sra->text_version, "ddf") != 0)
3450 return 1;
3451
3452 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3453 return 1;
3454 memset(super, 0, sizeof(*super));
3455
3456 /* first, try each device, and choose the best ddf */
3457 for (sd = sra->devs ; sd ; sd = sd->next) {
3458 int rv;
3459 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3460 dfd = dev_open(nm, O_RDONLY);
3461 if (dfd < 0)
3462 return 2;
3463 rv = load_ddf_headers(dfd, super, NULL);
3464 close(dfd);
3465 if (rv == 0) {
3466 seq = be32_to_cpu(super->active->seq);
3467 if (super->active->openflag)
3468 seq--;
3469 if (!best || seq > bestseq) {
3470 bestseq = seq;
3471 best = sd;
3472 }
3473 }
3474 }
3475 if (!best)
3476 return 1;
3477 /* OK, load this ddf */
3478 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3479 dfd = dev_open(nm, O_RDONLY);
3480 if (dfd < 0)
3481 return 1;
3482 load_ddf_headers(dfd, super, NULL);
3483 load_ddf_global(dfd, super, NULL);
3484 close(dfd);
3485 /* Now we need the device-local bits */
3486 for (sd = sra->devs ; sd ; sd = sd->next) {
3487 int rv;
3488
3489 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3490 dfd = dev_open(nm, O_RDWR);
3491 if (dfd < 0)
3492 return 2;
3493 rv = load_ddf_headers(dfd, super, NULL);
3494 if (rv == 0)
3495 rv = load_ddf_local(dfd, super, NULL, 1);
3496 if (rv)
3497 return 1;
3498 }
3499
3500 *sbp = super;
3501 if (st->ss == NULL) {
3502 st->ss = &super_ddf;
3503 st->minor_version = 0;
3504 st->max_devs = 512;
3505 }
3506 strcpy(st->container_devnm, fd2devnm(fd));
3507 return 0;
3508 }
3509
3510 static int load_container_ddf(struct supertype *st, int fd,
3511 char *devname)
3512 {
3513 return load_super_ddf_all(st, fd, &st->sb, devname);
3514 }
3515
3516 #endif /* MDASSEMBLE */
3517
3518 static int check_secondary(const struct vcl *vc)
3519 {
3520 const struct vd_config *conf = &vc->conf;
3521 int i;
3522
3523 /* The only DDF secondary RAID level md can support is
3524 * RAID 10, if the stripe sizes and Basic volume sizes
3525 * are all equal.
3526 * Other configurations could in theory be supported by exposing
3527 * the BVDs to user space and using device mapper for the secondary
3528 * mapping. So far we don't support that.
3529 */
3530
3531 __u64 sec_elements[4] = {0, 0, 0, 0};
3532 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3533 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3534
3535 if (vc->other_bvds == NULL) {
3536 pr_err("No BVDs for secondary RAID found\n");
3537 return -1;
3538 }
3539 if (conf->prl != DDF_RAID1) {
3540 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3541 return -1;
3542 }
3543 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3544 pr_err("Secondary RAID level %d is unsupported\n",
3545 conf->srl);
3546 return -1;
3547 }
3548 __set_sec_seen(conf->sec_elmnt_seq);
3549 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3550 const struct vd_config *bvd = vc->other_bvds[i];
3551 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3552 continue;
3553 if (bvd->srl != conf->srl) {
3554 pr_err("Inconsistent secondary RAID level across BVDs\n");
3555 return -1;
3556 }
3557 if (bvd->prl != conf->prl) {
3558 pr_err("Different RAID levels for BVDs are unsupported\n");
3559 return -1;
3560 }
3561 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3562 pr_err("All BVDs must have the same number of primary elements\n");
3563 return -1;
3564 }
3565 if (bvd->chunk_shift != conf->chunk_shift) {
3566 pr_err("Different strip sizes for BVDs are unsupported\n");
3567 return -1;
3568 }
3569 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3570 pr_err("Different BVD sizes are unsupported\n");
3571 return -1;
3572 }
3573 __set_sec_seen(bvd->sec_elmnt_seq);
3574 }
3575 for (i = 0; i < conf->sec_elmnt_count; i++) {
3576 if (!__was_sec_seen(i)) {
3577 pr_err("BVD %d is missing\n", i);
3578 return -1;
3579 }
3580 }
3581 return 0;
3582 }
3583
3584 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3585 be32 refnum, unsigned int nmax,
3586 const struct vd_config **bvd,
3587 unsigned int *idx)
3588 {
3589 unsigned int i, j, n, sec, cnt;
3590
3591 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3592 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3593
3594 for (i = 0, j = 0 ; i < nmax ; i++) {
3595 /* j counts valid entries for this BVD */
3596 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3597 j++;
3598 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3599 *bvd = &vc->conf;
3600 *idx = i;
3601 return sec * cnt + j - 1;
3602 }
3603 }
3604 if (vc->other_bvds == NULL)
3605 goto bad;
3606
3607 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3608 struct vd_config *vd = vc->other_bvds[n-1];
3609 sec = vd->sec_elmnt_seq;
3610 if (sec == DDF_UNUSED_BVD)
3611 continue;
3612 for (i = 0, j = 0 ; i < nmax ; i++) {
3613 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3614 j++;
3615 if (be32_eq(vd->phys_refnum[i], refnum)) {
3616 *bvd = vd;
3617 *idx = i;
3618 return sec * cnt + j - 1;
3619 }
3620 }
3621 }
3622 bad:
3623 *bvd = NULL;
3624 return DDF_NOTFOUND;
3625 }
3626
3627 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3628 {
3629 /* Given a container loaded by load_super_ddf_all,
3630 * extract information about all the arrays into
3631 * an mdinfo tree.
3632 *
3633 * For each vcl in conflist: create an mdinfo, fill it in,
3634 * then look for matching devices (phys_refnum) in dlist
3635 * and create appropriate device mdinfo.
3636 */
3637 struct ddf_super *ddf = st->sb;
3638 struct mdinfo *rest = NULL;
3639 struct vcl *vc;
3640
3641 for (vc = ddf->conflist ; vc ; vc=vc->next)
3642 {
3643 unsigned int i;
3644 unsigned int j;
3645 struct mdinfo *this;
3646 char *ep;
3647 __u32 *cptr;
3648 unsigned int pd;
3649
3650 if (subarray &&
3651 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3652 *ep != '\0'))
3653 continue;
3654
3655 if (vc->conf.sec_elmnt_count > 1) {
3656 if (check_secondary(vc) != 0)
3657 continue;
3658 }
3659
3660 this = xcalloc(1, sizeof(*this));
3661 this->next = rest;
3662 rest = this;
3663
3664 if (layout_ddf2md(&vc->conf, &this->array))
3665 continue;
3666 this->array.md_minor = -1;
3667 this->array.major_version = -1;
3668 this->array.minor_version = -2;
3669 cptr = (__u32 *)(vc->conf.guid + 16);
3670 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3671 this->array.utime = DECADE +
3672 be32_to_cpu(vc->conf.timestamp);
3673 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3674
3675 i = vc->vcnum;
3676 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3677 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3678 DDF_init_full) {
3679 this->array.state = 0;
3680 this->resync_start = 0;
3681 } else {
3682 this->array.state = 1;
3683 this->resync_start = MaxSector;
3684 }
3685 memcpy(this->name, ddf->virt->entries[i].name, 16);
3686 this->name[16]=0;
3687 for(j=0; j<16; j++)
3688 if (this->name[j] == ' ')
3689 this->name[j] = 0;
3690
3691 memset(this->uuid, 0, sizeof(this->uuid));
3692 this->component_size = be64_to_cpu(vc->conf.blocks);
3693 this->array.size = this->component_size / 2;
3694 this->container_member = i;
3695
3696 ddf->currentconf = vc;
3697 uuid_from_super_ddf(st, this->uuid);
3698 if (!subarray)
3699 ddf->currentconf = NULL;
3700
3701 sprintf(this->text_version, "/%s/%d",
3702 st->container_devnm, this->container_member);
3703
3704 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3705 struct mdinfo *dev;
3706 struct dl *d;
3707 const struct vd_config *bvd;
3708 unsigned int iphys;
3709 int stt;
3710
3711 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3712 == 0xFFFFFFFF)
3713 continue;
3714
3715 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3716 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3717 != DDF_Online)
3718 continue;
3719
3720 i = get_pd_index_from_refnum(
3721 vc, ddf->phys->entries[pd].refnum,
3722 ddf->mppe, &bvd, &iphys);
3723 if (i == DDF_NOTFOUND)
3724 continue;
3725
3726 this->array.working_disks++;
3727
3728 for (d = ddf->dlist; d ; d=d->next)
3729 if (be32_eq(d->disk.refnum,
3730 ddf->phys->entries[pd].refnum))
3731 break;
3732 if (d == NULL)
3733 /* Haven't found that one yet, maybe there are others */
3734 continue;
3735
3736 dev = xcalloc(1, sizeof(*dev));
3737 dev->next = this->devs;
3738 this->devs = dev;
3739
3740 dev->disk.number = be32_to_cpu(d->disk.refnum);
3741 dev->disk.major = d->major;
3742 dev->disk.minor = d->minor;
3743 dev->disk.raid_disk = i;
3744 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3745 dev->recovery_start = MaxSector;
3746
3747 dev->events = be32_to_cpu(ddf->primary.seq);
3748 dev->data_offset =
3749 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3750 dev->component_size = be64_to_cpu(bvd->blocks);
3751 if (d->devname)
3752 strcpy(dev->name, d->devname);
3753 }
3754 }
3755 return rest;
3756 }
3757
3758 static int store_super_ddf(struct supertype *st, int fd)
3759 {
3760 struct ddf_super *ddf = st->sb;
3761 unsigned long long dsize;
3762 void *buf;
3763 int rc;
3764
3765 if (!ddf)
3766 return 1;
3767
3768 if (!get_dev_size(fd, NULL, &dsize))
3769 return 1;
3770
3771 if (ddf->dlist || ddf->conflist) {
3772 struct stat sta;
3773 struct dl *dl;
3774 int ofd, ret;
3775
3776 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3777 pr_err("%s: file descriptor for invalid device\n",
3778 __func__);
3779 return 1;
3780 }
3781 for (dl = ddf->dlist; dl; dl = dl->next)
3782 if (dl->major == (int)major(sta.st_rdev) &&
3783 dl->minor == (int)minor(sta.st_rdev))
3784 break;
3785 if (!dl) {
3786 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3787 (int)major(sta.st_rdev),
3788 (int)minor(sta.st_rdev));
3789 return 1;
3790 }
3791 ofd = dl->fd;
3792 dl->fd = fd;
3793 ret = (_write_super_to_disk(ddf, dl) != 1);
3794 dl->fd = ofd;
3795 return ret;
3796 }
3797
3798 if (posix_memalign(&buf, 512, 512) != 0)
3799 return 1;
3800 memset(buf, 0, 512);
3801
3802 lseek64(fd, dsize-512, 0);
3803 rc = write(fd, buf, 512);
3804 free(buf);
3805 if (rc < 0)
3806 return 1;
3807 return 0;
3808 }
3809
3810 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3811 {
3812 /*
3813 * return:
3814 * 0 same, or first was empty, and second was copied
3815 * 1 second had wrong number
3816 * 2 wrong uuid
3817 * 3 wrong other info
3818 */
3819 struct ddf_super *first = st->sb;
3820 struct ddf_super *second = tst->sb;
3821 struct dl *dl1, *dl2;
3822 struct vcl *vl1, *vl2;
3823 unsigned int max_vds, max_pds, pd, vd;
3824
3825 if (!first) {
3826 st->sb = tst->sb;
3827 tst->sb = NULL;
3828 return 0;
3829 }
3830
3831 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3832 return 2;
3833
3834 if (!be32_eq(first->anchor.seq, second->anchor.seq)) {
3835 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3836 be32_to_cpu(first->anchor.seq),
3837 be32_to_cpu(second->anchor.seq));
3838 return 3;
3839 }
3840 if (first->max_part != second->max_part ||
3841 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3842 !be16_eq(first->virt->populated_vdes,
3843 second->virt->populated_vdes)) {
3844 dprintf("%s: PD/VD number mismatch\n", __func__);
3845 return 3;
3846 }
3847
3848 max_pds = be16_to_cpu(first->phys->used_pdes);
3849 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3850 for (pd = 0; pd < max_pds; pd++)
3851 if (be32_eq(first->phys->entries[pd].refnum,
3852 dl2->disk.refnum))
3853 break;
3854 if (pd == max_pds) {
3855 dprintf("%s: no match for disk %08x\n", __func__,
3856 be32_to_cpu(dl2->disk.refnum));
3857 return 3;
3858 }
3859 }
3860
3861 max_vds = be16_to_cpu(first->active->max_vd_entries);
3862 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3863 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3864 continue;
3865 for (vd = 0; vd < max_vds; vd++)
3866 if (!memcmp(first->virt->entries[vd].guid,
3867 vl2->conf.guid, DDF_GUID_LEN))
3868 break;
3869 if (vd == max_vds) {
3870 dprintf("%s: no match for VD config\n", __func__);
3871 return 3;
3872 }
3873 }
3874 /* FIXME should I look at anything else? */
3875
3876 /*
3877 At this point we are fairly sure that the meta data matches.
3878 But the new disk may contain additional local data.
3879 Add it to the super block.
3880 */
3881 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3882 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3883 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3884 DDF_GUID_LEN))
3885 break;
3886 if (vl1) {
3887 if (vl1->other_bvds != NULL &&
3888 vl1->conf.sec_elmnt_seq !=
3889 vl2->conf.sec_elmnt_seq) {
3890 dprintf("%s: adding BVD %u\n", __func__,
3891 vl2->conf.sec_elmnt_seq);
3892 add_other_bvd(vl1, &vl2->conf,
3893 first->conf_rec_len*512);
3894 }
3895 continue;
3896 }
3897
3898 if (posix_memalign((void **)&vl1, 512,
3899 (first->conf_rec_len*512 +
3900 offsetof(struct vcl, conf))) != 0) {
3901 pr_err("%s could not allocate vcl buf\n",
3902 __func__);
3903 return 3;
3904 }
3905
3906 vl1->next = first->conflist;
3907 vl1->block_sizes = NULL;
3908 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3909 if (alloc_other_bvds(first, vl1) != 0) {
3910 pr_err("%s could not allocate other bvds\n",
3911 __func__);
3912 free(vl1);
3913 return 3;
3914 }
3915 for (vd = 0; vd < max_vds; vd++)
3916 if (!memcmp(first->virt->entries[vd].guid,
3917 vl1->conf.guid, DDF_GUID_LEN))
3918 break;
3919 vl1->vcnum = vd;
3920 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3921 first->conflist = vl1;
3922 }
3923
3924 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3925 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3926 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3927 break;
3928 if (dl1)
3929 continue;
3930
3931 if (posix_memalign((void **)&dl1, 512,
3932 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3933 != 0) {
3934 pr_err("%s could not allocate disk info buffer\n",
3935 __func__);
3936 return 3;
3937 }
3938 memcpy(dl1, dl2, sizeof(*dl1));
3939 dl1->mdupdate = NULL;
3940 dl1->next = first->dlist;
3941 dl1->fd = -1;
3942 for (pd = 0; pd < max_pds; pd++)
3943 if (be32_eq(first->phys->entries[pd].refnum,
3944 dl1->disk.refnum))
3945 break;
3946 dl1->pdnum = pd;
3947 if (dl2->spare) {
3948 if (posix_memalign((void **)&dl1->spare, 512,
3949 first->conf_rec_len*512) != 0) {
3950 pr_err("%s could not allocate spare info buf\n",
3951 __func__);
3952 return 3;
3953 }
3954 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3955 }
3956 for (vd = 0 ; vd < first->max_part ; vd++) {
3957 if (!dl2->vlist[vd]) {
3958 dl1->vlist[vd] = NULL;
3959 continue;
3960 }
3961 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3962 if (!memcmp(vl1->conf.guid,
3963 dl2->vlist[vd]->conf.guid,
3964 DDF_GUID_LEN))
3965 break;
3966 dl1->vlist[vd] = vl1;
3967 }
3968 }
3969 first->dlist = dl1;
3970 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3971 be32_to_cpu(dl1->disk.refnum));
3972 }
3973
3974 return 0;
3975 }
3976
3977 #ifndef MDASSEMBLE
3978 /*
3979 * A new array 'a' has been started which claims to be instance 'inst'
3980 * within container 'c'.
3981 * We need to confirm that the array matches the metadata in 'c' so
3982 * that we don't corrupt any metadata.
3983 */
3984 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3985 {
3986 struct ddf_super *ddf = c->sb;
3987 int n = atoi(inst);
3988 struct mdinfo *dev;
3989 struct dl *dl;
3990 static const char faulty[] = "faulty";
3991
3992 if (all_ff(ddf->virt->entries[n].guid)) {
3993 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
3994 return -ENODEV;
3995 }
3996 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
3997 guid_str(ddf->virt->entries[n].guid));
3998 for (dev = a->info.devs; dev; dev = dev->next) {
3999 for (dl = ddf->dlist; dl; dl = dl->next)
4000 if (dl->major == dev->disk.major &&
4001 dl->minor == dev->disk.minor)
4002 break;
4003 if (!dl) {
4004 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4005 __func__, dev->disk.major, dev->disk.minor, n);
4006 return -1;
4007 }
4008 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4009 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4010 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4011 __func__, n, dl->major, dl->minor,
4012 be16_to_cpu(
4013 ddf->phys->entries[dl->pdnum].state));
4014 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4015 sizeof(faulty) - 1)
4016 pr_err("Write to state_fd failed\n");
4017 dev->curr_state = DS_FAULTY;
4018 }
4019 }
4020 a->info.container_member = n;
4021 return 0;
4022 }
4023
4024 /*
4025 * The array 'a' is to be marked clean in the metadata.
4026 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4027 * clean up to the point (in sectors). If that cannot be recorded in the
4028 * metadata, then leave it as dirty.
4029 *
4030 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4031 * !global! virtual_disk.virtual_entry structure.
4032 */
4033 static int ddf_set_array_state(struct active_array *a, int consistent)
4034 {
4035 struct ddf_super *ddf = a->container->sb;
4036 int inst = a->info.container_member;
4037 int old = ddf->virt->entries[inst].state;
4038 if (consistent == 2) {
4039 /* Should check if a recovery should be started FIXME */
4040 consistent = 1;
4041 if (!is_resync_complete(&a->info))
4042 consistent = 0;
4043 }
4044 if (consistent)
4045 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4046 else
4047 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4048 if (old != ddf->virt->entries[inst].state)
4049 ddf_set_updates_pending(ddf);
4050
4051 old = ddf->virt->entries[inst].init_state;
4052 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4053 if (is_resync_complete(&a->info))
4054 ddf->virt->entries[inst].init_state |= DDF_init_full;
4055 else if (a->info.resync_start == 0)
4056 ddf->virt->entries[inst].init_state |= DDF_init_not;
4057 else
4058 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4059 if (old != ddf->virt->entries[inst].init_state)
4060 ddf_set_updates_pending(ddf);
4061
4062 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4063 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4064 consistent?"clean":"dirty",
4065 a->info.resync_start);
4066 return consistent;
4067 }
4068
4069 static int get_bvd_state(const struct ddf_super *ddf,
4070 const struct vd_config *vc)
4071 {
4072 unsigned int i, n_bvd, working = 0;
4073 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4074 int pd, st, state;
4075 for (i = 0; i < n_prim; i++) {
4076 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4077 continue;
4078 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4079 if (pd < 0)
4080 continue;
4081 st = be16_to_cpu(ddf->phys->entries[pd].state);
4082 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4083 == DDF_Online)
4084 working++;
4085 }
4086
4087 state = DDF_state_degraded;
4088 if (working == n_prim)
4089 state = DDF_state_optimal;
4090 else
4091 switch (vc->prl) {
4092 case DDF_RAID0:
4093 case DDF_CONCAT:
4094 case DDF_JBOD:
4095 state = DDF_state_failed;
4096 break;
4097 case DDF_RAID1:
4098 if (working == 0)
4099 state = DDF_state_failed;
4100 else if (working >= 2)
4101 state = DDF_state_part_optimal;
4102 break;
4103 case DDF_RAID4:
4104 case DDF_RAID5:
4105 if (working < n_prim - 1)
4106 state = DDF_state_failed;
4107 break;
4108 case DDF_RAID6:
4109 if (working < n_prim - 2)
4110 state = DDF_state_failed;
4111 else if (working == n_prim - 1)
4112 state = DDF_state_part_optimal;
4113 break;
4114 }
4115 return state;
4116 }
4117
4118 static int secondary_state(int state, int other, int seclevel)
4119 {
4120 if (state == DDF_state_optimal && other == DDF_state_optimal)
4121 return DDF_state_optimal;
4122 if (seclevel == DDF_2MIRRORED) {
4123 if (state == DDF_state_optimal || other == DDF_state_optimal)
4124 return DDF_state_part_optimal;
4125 if (state == DDF_state_failed && other == DDF_state_failed)
4126 return DDF_state_failed;
4127 return DDF_state_degraded;
4128 } else {
4129 if (state == DDF_state_failed || other == DDF_state_failed)
4130 return DDF_state_failed;
4131 if (state == DDF_state_degraded || other == DDF_state_degraded)
4132 return DDF_state_degraded;
4133 return DDF_state_part_optimal;
4134 }
4135 }
4136
4137 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4138 {
4139 int state = get_bvd_state(ddf, &vcl->conf);
4140 unsigned int i;
4141 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4142 state = secondary_state(
4143 state,
4144 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4145 vcl->conf.srl);
4146 }
4147 return state;
4148 }
4149
4150 /*
4151 * The state of each disk is stored in the global phys_disk structure
4152 * in phys_disk.entries[n].state.
4153 * This makes various combinations awkward.
4154 * - When a device fails in any array, it must be failed in all arrays
4155 * that include a part of this device.
4156 * - When a component is rebuilding, we cannot include it officially in the
4157 * array unless this is the only array that uses the device.
4158 *
4159 * So: when transitioning:
4160 * Online -> failed, just set failed flag. monitor will propagate
4161 * spare -> online, the device might need to be added to the array.
4162 * spare -> failed, just set failed. Don't worry if in array or not.
4163 */
4164 static void ddf_set_disk(struct active_array *a, int n, int state)
4165 {
4166 struct ddf_super *ddf = a->container->sb;
4167 unsigned int inst = a->info.container_member, n_bvd;
4168 struct vcl *vcl;
4169 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4170 &n_bvd, &vcl);
4171 int pd;
4172 struct mdinfo *mdi;
4173 struct dl *dl;
4174
4175 dprintf("%s: %d to %x\n", __func__, n, state);
4176 if (vc == NULL) {
4177 dprintf("ddf: cannot find instance %d!!\n", inst);
4178 return;
4179 }
4180 /* Find the matching slot in 'info'. */
4181 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4182 if (mdi->disk.raid_disk == n)
4183 break;
4184 if (!mdi) {
4185 pr_err("%s: cannot find raid disk %d\n",
4186 __func__, n);
4187 return;
4188 }
4189
4190 /* and find the 'dl' entry corresponding to that. */
4191 for (dl = ddf->dlist; dl; dl = dl->next)
4192 if (mdi->state_fd >= 0 &&
4193 mdi->disk.major == dl->major &&
4194 mdi->disk.minor == dl->minor)
4195 break;
4196 if (!dl) {
4197 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4198 __func__, n,
4199 mdi->disk.major, mdi->disk.minor);
4200 return;
4201 }
4202
4203 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4204 if (pd < 0 || pd != dl->pdnum) {
4205 /* disk doesn't currently exist or has changed.
4206 * If it is now in_sync, insert it. */
4207 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4208 __func__, dl->pdnum, dl->major, dl->minor,
4209 be32_to_cpu(dl->disk.refnum));
4210 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4211 __func__, inst, n_bvd,
4212 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4213 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4214 pd = dl->pdnum; /* FIXME: is this really correct ? */
4215 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4216 LBA_OFFSET(ddf, vc)[n_bvd] =
4217 cpu_to_be64(mdi->data_offset);
4218 be16_clear(ddf->phys->entries[pd].type,
4219 cpu_to_be16(DDF_Global_Spare));
4220 be16_set(ddf->phys->entries[pd].type,
4221 cpu_to_be16(DDF_Active_in_VD));
4222 ddf_set_updates_pending(ddf);
4223 }
4224 } else {
4225 be16 old = ddf->phys->entries[pd].state;
4226 if (state & DS_FAULTY)
4227 be16_set(ddf->phys->entries[pd].state,
4228 cpu_to_be16(DDF_Failed));
4229 if (state & DS_INSYNC) {
4230 be16_set(ddf->phys->entries[pd].state,
4231 cpu_to_be16(DDF_Online));
4232 be16_clear(ddf->phys->entries[pd].state,
4233 cpu_to_be16(DDF_Rebuilding));
4234 }
4235 if (!be16_eq(old, ddf->phys->entries[pd].state))
4236 ddf_set_updates_pending(ddf);
4237 }
4238
4239 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4240 be32_to_cpu(dl->disk.refnum), state,
4241 be16_to_cpu(ddf->phys->entries[pd].state));
4242
4243 /* Now we need to check the state of the array and update
4244 * virtual_disk.entries[n].state.
4245 * It needs to be one of "optimal", "degraded", "failed".
4246 * I don't understand 'deleted' or 'missing'.
4247 */
4248 state = get_svd_state(ddf, vcl);
4249
4250 if (ddf->virt->entries[inst].state !=
4251 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4252 | state)) {
4253
4254 ddf->virt->entries[inst].state =
4255 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4256 | state;
4257 ddf_set_updates_pending(ddf);
4258 }
4259
4260 }
4261
4262 static void ddf_sync_metadata(struct supertype *st)
4263 {
4264
4265 /*
4266 * Write all data to all devices.
4267 * Later, we might be able to track whether only local changes
4268 * have been made, or whether any global data has been changed,
4269 * but ddf is sufficiently weird that it probably always
4270 * changes global data ....
4271 */
4272 struct ddf_super *ddf = st->sb;
4273 if (!ddf->updates_pending)
4274 return;
4275 ddf->updates_pending = 0;
4276 __write_init_super_ddf(st);
4277 dprintf("ddf: sync_metadata\n");
4278 }
4279
4280 static int del_from_conflist(struct vcl **list, const char *guid)
4281 {
4282 struct vcl **p;
4283 int found = 0;
4284 for (p = list; p && *p; p = &((*p)->next))
4285 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4286 found = 1;
4287 *p = (*p)->next;
4288 }
4289 return found;
4290 }
4291
4292 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4293 {
4294 struct dl *dl;
4295 unsigned int vdnum, i;
4296 vdnum = find_vde_by_guid(ddf, guid);
4297 if (vdnum == DDF_NOTFOUND) {
4298 pr_err("%s: could not find VD %s\n", __func__,
4299 guid_str(guid));
4300 return -1;
4301 }
4302 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4303 pr_err("%s: could not find conf %s\n", __func__,
4304 guid_str(guid));
4305 return -1;
4306 }
4307 for (dl = ddf->dlist; dl; dl = dl->next)
4308 for (i = 0; i < ddf->max_part; i++)
4309 if (dl->vlist[i] != NULL &&
4310 !memcmp(dl->vlist[i]->conf.guid, guid,
4311 DDF_GUID_LEN))
4312 dl->vlist[i] = NULL;
4313 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4314 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4315 return 0;
4316 }
4317
4318 static int kill_subarray_ddf(struct supertype *st)
4319 {
4320 struct ddf_super *ddf = st->sb;
4321 /*
4322 * currentconf is set in container_content_ddf,
4323 * called with subarray arg
4324 */
4325 struct vcl *victim = ddf->currentconf;
4326 struct vd_config *conf;
4327 ddf->currentconf = NULL;
4328 unsigned int vdnum;
4329 if (!victim) {
4330 pr_err("%s: nothing to kill\n", __func__);
4331 return -1;
4332 }
4333 conf = &victim->conf;
4334 vdnum = find_vde_by_guid(ddf, conf->guid);
4335 if (vdnum == DDF_NOTFOUND) {
4336 pr_err("%s: could not find VD %s\n", __func__,
4337 guid_str(conf->guid));
4338 return -1;
4339 }
4340 if (st->update_tail) {
4341 struct virtual_disk *vd;
4342 int len = sizeof(struct virtual_disk)
4343 + sizeof(struct virtual_entry);
4344 vd = xmalloc(len);
4345 if (vd == NULL) {
4346 pr_err("%s: failed to allocate %d bytes\n", __func__,
4347 len);
4348 return -1;
4349 }
4350 memset(vd, 0 , len);
4351 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4352 vd->populated_vdes = cpu_to_be16(0);
4353 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4354 /* we use DDF_state_deleted as marker */
4355 vd->entries[0].state = DDF_state_deleted;
4356 append_metadata_update(st, vd, len);
4357 } else {
4358 _kill_subarray_ddf(ddf, conf->guid);
4359 ddf_set_updates_pending(ddf);
4360 ddf_sync_metadata(st);
4361 }
4362 return 0;
4363 }
4364
4365 static void copy_matching_bvd(struct ddf_super *ddf,
4366 struct vd_config *conf,
4367 const struct metadata_update *update)
4368 {
4369 unsigned int mppe =
4370 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4371 unsigned int len = ddf->conf_rec_len * 512;
4372 char *p;
4373 struct vd_config *vc;
4374 for (p = update->buf; p < update->buf + update->len; p += len) {
4375 vc = (struct vd_config *) p;
4376 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4377 memcpy(conf->phys_refnum, vc->phys_refnum,
4378 mppe * (sizeof(__u32) + sizeof(__u64)));
4379 return;
4380 }
4381 }
4382 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4383 conf->sec_elmnt_seq, guid_str(conf->guid));
4384 }
4385
4386 static void ddf_process_update(struct supertype *st,
4387 struct metadata_update *update)
4388 {
4389 /* Apply this update to the metadata.
4390 * The first 4 bytes are a DDF_*_MAGIC which guides
4391 * our actions.
4392 * Possible update are:
4393 * DDF_PHYS_RECORDS_MAGIC
4394 * Add a new physical device or remove an old one.
4395 * Changes to this record only happen implicitly.
4396 * used_pdes is the device number.
4397 * DDF_VIRT_RECORDS_MAGIC
4398 * Add a new VD. Possibly also change the 'access' bits.
4399 * populated_vdes is the entry number.
4400 * DDF_VD_CONF_MAGIC
4401 * New or updated VD. the VIRT_RECORD must already
4402 * exist. For an update, phys_refnum and lba_offset
4403 * (at least) are updated, and the VD_CONF must
4404 * be written to precisely those devices listed with
4405 * a phys_refnum.
4406 * DDF_SPARE_ASSIGN_MAGIC
4407 * replacement Spare Assignment Record... but for which device?
4408 *
4409 * So, e.g.:
4410 * - to create a new array, we send a VIRT_RECORD and
4411 * a VD_CONF. Then assemble and start the array.
4412 * - to activate a spare we send a VD_CONF to add the phys_refnum
4413 * and offset. This will also mark the spare as active with
4414 * a spare-assignment record.
4415 */
4416 struct ddf_super *ddf = st->sb;
4417 be32 *magic = (be32 *)update->buf;
4418 struct phys_disk *pd;
4419 struct virtual_disk *vd;
4420 struct vd_config *vc;
4421 struct vcl *vcl;
4422 struct dl *dl;
4423 unsigned int ent;
4424 unsigned int pdnum, pd2, len;
4425
4426 dprintf("Process update %x\n", be32_to_cpu(*magic));
4427
4428 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4429
4430 if (update->len != (sizeof(struct phys_disk) +
4431 sizeof(struct phys_disk_entry)))
4432 return;
4433 pd = (struct phys_disk*)update->buf;
4434
4435 ent = be16_to_cpu(pd->used_pdes);
4436 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4437 return;
4438 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4439 struct dl **dlp;
4440 /* removing this disk. */
4441 be16_set(ddf->phys->entries[ent].state,
4442 cpu_to_be16(DDF_Missing));
4443 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4444 struct dl *dl = *dlp;
4445 if (dl->pdnum == (signed)ent) {
4446 close(dl->fd);
4447 dl->fd = -1;
4448 /* FIXME this doesn't free
4449 * dl->devname */
4450 update->space = dl;
4451 *dlp = dl->next;
4452 break;
4453 }
4454 }
4455 ddf_set_updates_pending(ddf);
4456 return;
4457 }
4458 if (!all_ff(ddf->phys->entries[ent].guid))
4459 return;
4460 ddf->phys->entries[ent] = pd->entries[0];
4461 ddf->phys->used_pdes = cpu_to_be16
4462 (1 + be16_to_cpu(ddf->phys->used_pdes));
4463 ddf_set_updates_pending(ddf);
4464 if (ddf->add_list) {
4465 struct active_array *a;
4466 struct dl *al = ddf->add_list;
4467 ddf->add_list = al->next;
4468
4469 al->next = ddf->dlist;
4470 ddf->dlist = al;
4471
4472 /* As a device has been added, we should check
4473 * for any degraded devices that might make
4474 * use of this spare */
4475 for (a = st->arrays ; a; a=a->next)
4476 a->check_degraded = 1;
4477 }
4478 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4479
4480 if (update->len != (sizeof(struct virtual_disk) +
4481 sizeof(struct virtual_entry)))
4482 return;
4483 vd = (struct virtual_disk*)update->buf;
4484
4485 if (vd->entries[0].state == DDF_state_deleted) {
4486 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4487 return;
4488 } else {
4489
4490 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4491 if (ent != DDF_NOTFOUND) {
4492 dprintf("%s: VD %s exists already in slot %d\n",
4493 __func__, guid_str(vd->entries[0].guid),
4494 ent);
4495 return;
4496 }
4497 ent = find_unused_vde(ddf);
4498 if (ent == DDF_NOTFOUND)
4499 return;
4500 ddf->virt->entries[ent] = vd->entries[0];
4501 ddf->virt->populated_vdes =
4502 cpu_to_be16(
4503 1 + be16_to_cpu(
4504 ddf->virt->populated_vdes));
4505 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4506 __func__, guid_str(vd->entries[0].guid), ent,
4507 ddf->virt->entries[ent].state,
4508 ddf->virt->entries[ent].init_state);
4509 }
4510 ddf_set_updates_pending(ddf);
4511 }
4512
4513 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4514 vc = (struct vd_config*)update->buf;
4515 len = ddf->conf_rec_len * 512;
4516 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4517 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4518 __func__, guid_str(vc->guid), update->len,
4519 vc->sec_elmnt_count);
4520 return;
4521 }
4522 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4523 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4524 break;
4525 dprintf("%s: conf update for %s (%s)\n", __func__,
4526 guid_str(vc->guid), (vcl ? "old" : "new"));
4527 if (vcl) {
4528 /* An update, just copy the phys_refnum and lba_offset
4529 * fields
4530 */
4531 unsigned int i;
4532 unsigned int k;
4533 copy_matching_bvd(ddf, &vcl->conf, update);
4534 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4535 dprintf("BVD %u has %08x at %llu\n", 0,
4536 be32_to_cpu(vcl->conf.phys_refnum[k]),
4537 be64_to_cpu(LBA_OFFSET(ddf,
4538 &vcl->conf)[k]));
4539 for (i = 1; i < vc->sec_elmnt_count; i++) {
4540 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4541 update);
4542 for (k = 0; k < be16_to_cpu(
4543 vc->prim_elmnt_count); k++)
4544 dprintf("BVD %u has %08x at %llu\n", i,
4545 be32_to_cpu
4546 (vcl->other_bvds[i-1]->
4547 phys_refnum[k]),
4548 be64_to_cpu
4549 (LBA_OFFSET
4550 (ddf,
4551 vcl->other_bvds[i-1])[k]));
4552 }
4553 } else {
4554 /* A new VD_CONF */
4555 unsigned int i;
4556 if (!update->space)
4557 return;
4558 vcl = update->space;
4559 update->space = NULL;
4560 vcl->next = ddf->conflist;
4561 memcpy(&vcl->conf, vc, len);
4562 ent = find_vde_by_guid(ddf, vc->guid);
4563 if (ent == DDF_NOTFOUND)
4564 return;
4565 vcl->vcnum = ent;
4566 ddf->conflist = vcl;
4567 for (i = 1; i < vc->sec_elmnt_count; i++)
4568 memcpy(vcl->other_bvds[i-1],
4569 update->buf + len * i, len);
4570 }
4571 /* Set DDF_Transition on all Failed devices - to help
4572 * us detect those that are no longer in use
4573 */
4574 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4575 pdnum++)
4576 if (be16_and(ddf->phys->entries[pdnum].state,
4577 cpu_to_be16(DDF_Failed)))
4578 be16_set(ddf->phys->entries[pdnum].state,
4579 cpu_to_be16(DDF_Transition));
4580 /* Now make sure vlist is correct for each dl. */
4581 for (dl = ddf->dlist; dl; dl = dl->next) {
4582 unsigned int vn = 0;
4583 int in_degraded = 0;
4584 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4585 unsigned int dn, ibvd;
4586 const struct vd_config *conf;
4587 int vstate;
4588 dn = get_pd_index_from_refnum(vcl,
4589 dl->disk.refnum,
4590 ddf->mppe,
4591 &conf, &ibvd);
4592 if (dn == DDF_NOTFOUND)
4593 continue;
4594 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4595 dl->pdnum,
4596 be32_to_cpu(dl->disk.refnum),
4597 guid_str(conf->guid),
4598 conf->sec_elmnt_seq, vn);
4599 /* Clear the Transition flag */
4600 if (be16_and
4601 (ddf->phys->entries[dl->pdnum].state,
4602 cpu_to_be16(DDF_Failed)))
4603 be16_clear(ddf->phys
4604 ->entries[dl->pdnum].state,
4605 cpu_to_be16(DDF_Transition));
4606 dl->vlist[vn++] = vcl;
4607 vstate = ddf->virt->entries[vcl->vcnum].state
4608 & DDF_state_mask;
4609 if (vstate == DDF_state_degraded ||
4610 vstate == DDF_state_part_optimal)
4611 in_degraded = 1;
4612 }
4613 while (vn < ddf->max_part)
4614 dl->vlist[vn++] = NULL;
4615 if (dl->vlist[0]) {
4616 be16_clear(ddf->phys->entries[dl->pdnum].type,
4617 cpu_to_be16(DDF_Global_Spare));
4618 if (!be16_and(ddf->phys
4619 ->entries[dl->pdnum].type,
4620 cpu_to_be16(DDF_Active_in_VD))) {
4621 be16_set(ddf->phys
4622 ->entries[dl->pdnum].type,
4623 cpu_to_be16(DDF_Active_in_VD));
4624 if (in_degraded)
4625 be16_set(ddf->phys
4626 ->entries[dl->pdnum]
4627 .state,
4628 cpu_to_be16
4629 (DDF_Rebuilding));
4630 }
4631 }
4632 if (dl->spare) {
4633 be16_clear(ddf->phys->entries[dl->pdnum].type,
4634 cpu_to_be16(DDF_Global_Spare));
4635 be16_set(ddf->phys->entries[dl->pdnum].type,
4636 cpu_to_be16(DDF_Spare));
4637 }
4638 if (!dl->vlist[0] && !dl->spare) {
4639 be16_set(ddf->phys->entries[dl->pdnum].type,
4640 cpu_to_be16(DDF_Global_Spare));
4641 be16_clear(ddf->phys->entries[dl->pdnum].type,
4642 cpu_to_be16(DDF_Spare));
4643 be16_clear(ddf->phys->entries[dl->pdnum].type,
4644 cpu_to_be16(DDF_Active_in_VD));
4645 }
4646 }
4647
4648 /* Now remove any 'Failed' devices that are not part
4649 * of any VD. They will have the Transition flag set.
4650 * Once done, we need to update all dl->pdnum numbers.
4651 */
4652 pd2 = 0;
4653 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4654 pdnum++) {
4655 if (be16_and(ddf->phys->entries[pdnum].state,
4656 cpu_to_be16(DDF_Failed))
4657 && be16_and(ddf->phys->entries[pdnum].state,
4658 cpu_to_be16(DDF_Transition))) {
4659 /* skip this one unless in dlist*/
4660 for (dl = ddf->dlist; dl; dl = dl->next)
4661 if (dl->pdnum == (int)pdnum)
4662 break;
4663 if (!dl)
4664 continue;
4665 }
4666 if (pdnum == pd2)
4667 pd2++;
4668 else {
4669 ddf->phys->entries[pd2] =
4670 ddf->phys->entries[pdnum];
4671 for (dl = ddf->dlist; dl; dl = dl->next)
4672 if (dl->pdnum == (int)pdnum)
4673 dl->pdnum = pd2;
4674 pd2++;
4675 }
4676 }
4677 ddf->phys->used_pdes = cpu_to_be16(pd2);
4678 while (pd2 < pdnum) {
4679 memset(ddf->phys->entries[pd2].guid, 0xff,
4680 DDF_GUID_LEN);
4681 pd2++;
4682 }
4683
4684 ddf_set_updates_pending(ddf);
4685 }
4686 /* case DDF_SPARE_ASSIGN_MAGIC */
4687 }
4688
4689 static void ddf_prepare_update(struct supertype *st,
4690 struct metadata_update *update)
4691 {
4692 /* This update arrived at managemon.
4693 * We are about to pass it to monitor.
4694 * If a malloc is needed, do it here.
4695 */
4696 struct ddf_super *ddf = st->sb;
4697 be32 *magic = (be32 *)update->buf;
4698 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4699 struct vcl *vcl;
4700 struct vd_config *conf = (struct vd_config *) update->buf;
4701 if (posix_memalign(&update->space, 512,
4702 offsetof(struct vcl, conf)
4703 + ddf->conf_rec_len * 512) != 0) {
4704 update->space = NULL;
4705 return;
4706 }
4707 vcl = update->space;
4708 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4709 if (alloc_other_bvds(ddf, vcl) != 0) {
4710 free(update->space);
4711 update->space = NULL;
4712 }
4713 }
4714 }
4715
4716 /*
4717 * Check degraded state of a RAID10.
4718 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4719 */
4720 static int raid10_degraded(struct mdinfo *info)
4721 {
4722 int n_prim, n_bvds;
4723 int i;
4724 struct mdinfo *d;
4725 char *found;
4726 int ret = -1;
4727
4728 n_prim = info->array.layout & ~0x100;
4729 n_bvds = info->array.raid_disks / n_prim;
4730 found = xmalloc(n_bvds);
4731 if (found == NULL)
4732 return ret;
4733 memset(found, 0, n_bvds);
4734 for (d = info->devs; d; d = d->next) {
4735 i = d->disk.raid_disk / n_prim;
4736 if (i >= n_bvds) {
4737 pr_err("%s: BUG: invalid raid disk\n", __func__);
4738 goto out;
4739 }
4740 if (d->state_fd > 0)
4741 found[i]++;
4742 }
4743 ret = 2;
4744 for (i = 0; i < n_bvds; i++)
4745 if (!found[i]) {
4746 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4747 ret = 0;
4748 goto out;
4749 } else if (found[i] < n_prim) {
4750 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4751 n_bvds);
4752 ret = 1;
4753 }
4754 out:
4755 free(found);
4756 return ret;
4757 }
4758
4759 /*
4760 * Check if the array 'a' is degraded but not failed.
4761 * If it is, find as many spares as are available and needed and
4762 * arrange for their inclusion.
4763 * We only choose devices which are not already in the array,
4764 * and prefer those with a spare-assignment to this array.
4765 * otherwise we choose global spares - assuming always that
4766 * there is enough room.
4767 * For each spare that we assign, we return an 'mdinfo' which
4768 * describes the position for the device in the array.
4769 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4770 * the new phys_refnum and lba_offset values.
4771 *
4772 * Only worry about BVDs at the moment.
4773 */
4774 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4775 struct metadata_update **updates)
4776 {
4777 int working = 0;
4778 struct mdinfo *d;
4779 struct ddf_super *ddf = a->container->sb;
4780 int global_ok = 0;
4781 struct mdinfo *rv = NULL;
4782 struct mdinfo *di;
4783 struct metadata_update *mu;
4784 struct dl *dl;
4785 int i;
4786 unsigned int j;
4787 struct vcl *vcl;
4788 struct vd_config *vc;
4789 unsigned int n_bvd;
4790
4791 for (d = a->info.devs ; d ; d = d->next) {
4792 if ((d->curr_state & DS_FAULTY) &&
4793 d->state_fd >= 0)
4794 /* wait for Removal to happen */
4795 return NULL;
4796 if (d->state_fd >= 0)
4797 working ++;
4798 }
4799
4800 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4801 a->info.array.raid_disks,
4802 a->info.array.level);
4803 if (working == a->info.array.raid_disks)
4804 return NULL; /* array not degraded */
4805 switch (a->info.array.level) {
4806 case 1:
4807 if (working == 0)
4808 return NULL; /* failed */
4809 break;
4810 case 4:
4811 case 5:
4812 if (working < a->info.array.raid_disks - 1)
4813 return NULL; /* failed */
4814 break;
4815 case 6:
4816 if (working < a->info.array.raid_disks - 2)
4817 return NULL; /* failed */
4818 break;
4819 case 10:
4820 if (raid10_degraded(&a->info) < 1)
4821 return NULL;
4822 break;
4823 default: /* concat or stripe */
4824 return NULL; /* failed */
4825 }
4826
4827 /* For each slot, if it is not working, find a spare */
4828 dl = ddf->dlist;
4829 for (i = 0; i < a->info.array.raid_disks; i++) {
4830 for (d = a->info.devs ; d ; d = d->next)
4831 if (d->disk.raid_disk == i)
4832 break;
4833 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4834 if (d && (d->state_fd >= 0))
4835 continue;
4836
4837 /* OK, this device needs recovery. Find a spare */
4838 again:
4839 for ( ; dl ; dl = dl->next) {
4840 unsigned long long esize;
4841 unsigned long long pos;
4842 struct mdinfo *d2;
4843 int is_global = 0;
4844 int is_dedicated = 0;
4845 struct extent *ex;
4846 unsigned int j;
4847 be16 state = ddf->phys->entries[dl->pdnum].state;
4848 if (be16_and(state,
4849 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4850 !be16_and(state,
4851 cpu_to_be16(DDF_Online)))
4852 continue;
4853
4854 /* If in this array, skip */
4855 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4856 if (d2->state_fd >= 0 &&
4857 d2->disk.major == dl->major &&
4858 d2->disk.minor == dl->minor) {
4859 dprintf("%x:%x (%08x) already in array\n",
4860 dl->major, dl->minor,
4861 be32_to_cpu(dl->disk.refnum));
4862 break;
4863 }
4864 if (d2)
4865 continue;
4866 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4867 cpu_to_be16(DDF_Spare))) {
4868 /* Check spare assign record */
4869 if (dl->spare) {
4870 if (dl->spare->type & DDF_spare_dedicated) {
4871 /* check spare_ents for guid */
4872 for (j = 0 ;
4873 j < be16_to_cpu
4874 (dl->spare
4875 ->populated);
4876 j++) {
4877 if (memcmp(dl->spare->spare_ents[j].guid,
4878 ddf->virt->entries[a->info.container_member].guid,
4879 DDF_GUID_LEN) == 0)
4880 is_dedicated = 1;
4881 }
4882 } else
4883 is_global = 1;
4884 }
4885 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4886 cpu_to_be16(DDF_Global_Spare))) {
4887 is_global = 1;
4888 } else if (!be16_and(ddf->phys
4889 ->entries[dl->pdnum].state,
4890 cpu_to_be16(DDF_Failed))) {
4891 /* we can possibly use some of this */
4892 is_global = 1;
4893 }
4894 if ( ! (is_dedicated ||
4895 (is_global && global_ok))) {
4896 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4897 is_dedicated, is_global);
4898 continue;
4899 }
4900
4901 /* We are allowed to use this device - is there space?
4902 * We need a->info.component_size sectors */
4903 ex = get_extents(ddf, dl);
4904 if (!ex) {
4905 dprintf("cannot get extents\n");
4906 continue;
4907 }
4908 j = 0; pos = 0;
4909 esize = 0;
4910
4911 do {
4912 esize = ex[j].start - pos;
4913 if (esize >= a->info.component_size)
4914 break;
4915 pos = ex[j].start + ex[j].size;
4916 j++;
4917 } while (ex[j-1].size);
4918
4919 free(ex);
4920 if (esize < a->info.component_size) {
4921 dprintf("%x:%x has no room: %llu %llu\n",
4922 dl->major, dl->minor,
4923 esize, a->info.component_size);
4924 /* No room */
4925 continue;
4926 }
4927
4928 /* Cool, we have a device with some space at pos */
4929 di = xcalloc(1, sizeof(*di));
4930 di->disk.number = i;
4931 di->disk.raid_disk = i;
4932 di->disk.major = dl->major;
4933 di->disk.minor = dl->minor;
4934 di->disk.state = 0;
4935 di->recovery_start = 0;
4936 di->data_offset = pos;
4937 di->component_size = a->info.component_size;
4938 di->container_member = dl->pdnum;
4939 di->next = rv;
4940 rv = di;
4941 dprintf("%x:%x (%08x) to be %d at %llu\n",
4942 dl->major, dl->minor,
4943 be32_to_cpu(dl->disk.refnum), i, pos);
4944
4945 break;
4946 }
4947 if (!dl && ! global_ok) {
4948 /* not enough dedicated spares, try global */
4949 global_ok = 1;
4950 dl = ddf->dlist;
4951 goto again;
4952 }
4953 }
4954
4955 if (!rv)
4956 /* No spares found */
4957 return rv;
4958 /* Now 'rv' has a list of devices to return.
4959 * Create a metadata_update record to update the
4960 * phys_refnum and lba_offset values
4961 */
4962 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
4963 &n_bvd, &vcl);
4964 if (vc == NULL)
4965 return NULL;
4966
4967 mu = xmalloc(sizeof(*mu));
4968 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4969 free(mu);
4970 mu = NULL;
4971 }
4972
4973 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
4974 mu->buf = xmalloc(mu->len);
4975 mu->space = NULL;
4976 mu->space_list = NULL;
4977 mu->next = *updates;
4978 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
4979 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
4980 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
4981 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
4982
4983 vc = (struct vd_config*)mu->buf;
4984 for (di = rv ; di ; di = di->next) {
4985 unsigned int i_sec, i_prim;
4986 i_sec = di->disk.raid_disk
4987 / be16_to_cpu(vcl->conf.prim_elmnt_count);
4988 i_prim = di->disk.raid_disk
4989 % be16_to_cpu(vcl->conf.prim_elmnt_count);
4990 vc = (struct vd_config *)(mu->buf
4991 + i_sec * ddf->conf_rec_len * 512);
4992 for (dl = ddf->dlist; dl; dl = dl->next)
4993 if (dl->major == di->disk.major
4994 && dl->minor == di->disk.minor)
4995 break;
4996 if (!dl) {
4997 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
4998 __func__, di->disk.raid_disk,
4999 di->disk.major, di->disk.minor);
5000 return NULL;
5001 }
5002 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5003 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5004 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5005 be32_to_cpu(vc->phys_refnum[i_prim]),
5006 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5007 }
5008 *updates = mu;
5009 return rv;
5010 }
5011 #endif /* MDASSEMBLE */
5012
5013 static int ddf_level_to_layout(int level)
5014 {
5015 switch(level) {
5016 case 0:
5017 case 1:
5018 return 0;
5019 case 5:
5020 return ALGORITHM_LEFT_SYMMETRIC;
5021 case 6:
5022 return ALGORITHM_ROTATING_N_CONTINUE;
5023 case 10:
5024 return 0x102;
5025 default:
5026 return UnSet;
5027 }
5028 }
5029
5030 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5031 {
5032 if (level && *level == UnSet)
5033 *level = LEVEL_CONTAINER;
5034
5035 if (level && layout && *layout == UnSet)
5036 *layout = ddf_level_to_layout(*level);
5037 }
5038
5039 struct superswitch super_ddf = {
5040 #ifndef MDASSEMBLE
5041 .examine_super = examine_super_ddf,
5042 .brief_examine_super = brief_examine_super_ddf,
5043 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5044 .export_examine_super = export_examine_super_ddf,
5045 .detail_super = detail_super_ddf,
5046 .brief_detail_super = brief_detail_super_ddf,
5047 .validate_geometry = validate_geometry_ddf,
5048 .write_init_super = write_init_super_ddf,
5049 .add_to_super = add_to_super_ddf,
5050 .remove_from_super = remove_from_super_ddf,
5051 .load_container = load_container_ddf,
5052 .copy_metadata = copy_metadata_ddf,
5053 .kill_subarray = kill_subarray_ddf,
5054 #endif
5055 .match_home = match_home_ddf,
5056 .uuid_from_super= uuid_from_super_ddf,
5057 .getinfo_super = getinfo_super_ddf,
5058 .update_super = update_super_ddf,
5059
5060 .avail_size = avail_size_ddf,
5061
5062 .compare_super = compare_super_ddf,
5063
5064 .load_super = load_super_ddf,
5065 .init_super = init_super_ddf,
5066 .store_super = store_super_ddf,
5067 .free_super = free_super_ddf,
5068 .match_metadata_desc = match_metadata_desc_ddf,
5069 .container_content = container_content_ddf,
5070 .default_geometry = default_geometry_ddf,
5071
5072 .external = 1,
5073
5074 #ifndef MDASSEMBLE
5075 /* for mdmon */
5076 .open_new = ddf_open_new,
5077 .set_array_state= ddf_set_array_state,
5078 .set_disk = ddf_set_disk,
5079 .sync_metadata = ddf_sync_metadata,
5080 .process_update = ddf_process_update,
5081 .prepare_update = ddf_prepare_update,
5082 .activate_spare = ddf_activate_spare,
5083 #endif
5084 .name = "ddf",
5085 };