]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: ddf_activate_spare: Add RAID10 code
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* The DDF metadata handling.
51 * DDF metadata lives at the end of the device.
52 * The last 512 byte block provides an 'anchor' which is used to locate
53 * the rest of the metadata which usually lives immediately behind the anchor.
54 *
55 * Note:
56 * - all multibyte numeric fields are bigendian.
57 * - all strings are space padded.
58 *
59 */
60
61 typedef struct __be16 {
62 __u16 _v16;
63 } be16;
64 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
65 #define be16_and(x, y) ((x)._v16 & (y)._v16)
66 #define be16_or(x, y) ((x)._v16 | (y)._v16)
67 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
68 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
69
70 typedef struct __be32 {
71 __u32 _v32;
72 } be32;
73 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
74
75 typedef struct __be64 {
76 __u64 _v64;
77 } be64;
78 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
79
80 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
81 static inline be16 cpu_to_be16(__u16 x)
82 {
83 be16 be = { ._v16 = __cpu_to_be16(x) };
84 return be;
85 }
86
87 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
88 static inline be32 cpu_to_be32(__u32 x)
89 {
90 be32 be = { ._v32 = __cpu_to_be32(x) };
91 return be;
92 }
93
94 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
95 static inline be64 cpu_to_be64(__u64 x)
96 {
97 be64 be = { ._v64 = __cpu_to_be64(x) };
98 return be;
99 }
100
101 /* Primary Raid Level (PRL) */
102 #define DDF_RAID0 0x00
103 #define DDF_RAID1 0x01
104 #define DDF_RAID3 0x03
105 #define DDF_RAID4 0x04
106 #define DDF_RAID5 0x05
107 #define DDF_RAID1E 0x11
108 #define DDF_JBOD 0x0f
109 #define DDF_CONCAT 0x1f
110 #define DDF_RAID5E 0x15
111 #define DDF_RAID5EE 0x25
112 #define DDF_RAID6 0x06
113
114 /* Raid Level Qualifier (RLQ) */
115 #define DDF_RAID0_SIMPLE 0x00
116 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
117 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
118 #define DDF_RAID3_0 0x00 /* parity in first extent */
119 #define DDF_RAID3_N 0x01 /* parity in last extent */
120 #define DDF_RAID4_0 0x00 /* parity in first extent */
121 #define DDF_RAID4_N 0x01 /* parity in last extent */
122 /* these apply to raid5e and raid5ee as well */
123 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
124 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
125 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
126 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
127
128 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
129 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
130
131 /* Secondary RAID Level (SRL) */
132 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
133 #define DDF_2MIRRORED 0x01
134 #define DDF_2CONCAT 0x02
135 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
136
137 /* Magic numbers */
138 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
139 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
140 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
141 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
142 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
143 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
144 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
145 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
146 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
147 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
148
149 #define DDF_GUID_LEN 24
150 #define DDF_REVISION_0 "01.00.00"
151 #define DDF_REVISION_2 "01.02.00"
152
153 struct ddf_header {
154 be32 magic; /* DDF_HEADER_MAGIC */
155 be32 crc;
156 char guid[DDF_GUID_LEN];
157 char revision[8]; /* 01.02.00 */
158 be32 seq; /* starts at '1' */
159 be32 timestamp;
160 __u8 openflag;
161 __u8 foreignflag;
162 __u8 enforcegroups;
163 __u8 pad0; /* 0xff */
164 __u8 pad1[12]; /* 12 * 0xff */
165 /* 64 bytes so far */
166 __u8 header_ext[32]; /* reserved: fill with 0xff */
167 be64 primary_lba;
168 be64 secondary_lba;
169 __u8 type;
170 __u8 pad2[3]; /* 0xff */
171 be32 workspace_len; /* sectors for vendor space -
172 * at least 32768(sectors) */
173 be64 workspace_lba;
174 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
175 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
176 be16 max_partitions; /* i.e. max num of configuration
177 record entries per disk */
178 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
179 *12/512) */
180 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
181 __u8 pad3[54]; /* 0xff */
182 /* 192 bytes so far */
183 be32 controller_section_offset;
184 be32 controller_section_length;
185 be32 phys_section_offset;
186 be32 phys_section_length;
187 be32 virt_section_offset;
188 be32 virt_section_length;
189 be32 config_section_offset;
190 be32 config_section_length;
191 be32 data_section_offset;
192 be32 data_section_length;
193 be32 bbm_section_offset;
194 be32 bbm_section_length;
195 be32 diag_space_offset;
196 be32 diag_space_length;
197 be32 vendor_offset;
198 be32 vendor_length;
199 /* 256 bytes so far */
200 __u8 pad4[256]; /* 0xff */
201 };
202
203 /* type field */
204 #define DDF_HEADER_ANCHOR 0x00
205 #define DDF_HEADER_PRIMARY 0x01
206 #define DDF_HEADER_SECONDARY 0x02
207
208 /* The content of the 'controller section' - global scope */
209 struct ddf_controller_data {
210 be32 magic; /* DDF_CONTROLLER_MAGIC */
211 be32 crc;
212 char guid[DDF_GUID_LEN];
213 struct controller_type {
214 be16 vendor_id;
215 be16 device_id;
216 be16 sub_vendor_id;
217 be16 sub_device_id;
218 } type;
219 char product_id[16];
220 __u8 pad[8]; /* 0xff */
221 __u8 vendor_data[448];
222 };
223
224 /* The content of phys_section - global scope */
225 struct phys_disk {
226 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
227 be32 crc;
228 be16 used_pdes;
229 be16 max_pdes;
230 __u8 pad[52];
231 struct phys_disk_entry {
232 char guid[DDF_GUID_LEN];
233 be32 refnum;
234 be16 type;
235 be16 state;
236 be64 config_size; /* DDF structures must be after here */
237 char path[18]; /* another horrible structure really */
238 __u8 pad[6];
239 } entries[0];
240 };
241
242 /* phys_disk_entry.type is a bitmap - bigendian remember */
243 #define DDF_Forced_PD_GUID 1
244 #define DDF_Active_in_VD 2
245 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
246 #define DDF_Spare 8 /* overrides Global_spare */
247 #define DDF_Foreign 16
248 #define DDF_Legacy 32 /* no DDF on this device */
249
250 #define DDF_Interface_mask 0xf00
251 #define DDF_Interface_SCSI 0x100
252 #define DDF_Interface_SAS 0x200
253 #define DDF_Interface_SATA 0x300
254 #define DDF_Interface_FC 0x400
255
256 /* phys_disk_entry.state is a bigendian bitmap */
257 #define DDF_Online 1
258 #define DDF_Failed 2 /* overrides 1,4,8 */
259 #define DDF_Rebuilding 4
260 #define DDF_Transition 8
261 #define DDF_SMART 16
262 #define DDF_ReadErrors 32
263 #define DDF_Missing 64
264
265 /* The content of the virt_section global scope */
266 struct virtual_disk {
267 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
268 be32 crc;
269 be16 populated_vdes;
270 be16 max_vdes;
271 __u8 pad[52];
272 struct virtual_entry {
273 char guid[DDF_GUID_LEN];
274 be16 unit;
275 __u16 pad0; /* 0xffff */
276 be16 guid_crc;
277 be16 type;
278 __u8 state;
279 __u8 init_state;
280 __u8 pad1[14];
281 char name[16];
282 } entries[0];
283 };
284
285 /* virtual_entry.type is a bitmap - bigendian */
286 #define DDF_Shared 1
287 #define DDF_Enforce_Groups 2
288 #define DDF_Unicode 4
289 #define DDF_Owner_Valid 8
290
291 /* virtual_entry.state is a bigendian bitmap */
292 #define DDF_state_mask 0x7
293 #define DDF_state_optimal 0x0
294 #define DDF_state_degraded 0x1
295 #define DDF_state_deleted 0x2
296 #define DDF_state_missing 0x3
297 #define DDF_state_failed 0x4
298 #define DDF_state_part_optimal 0x5
299
300 #define DDF_state_morphing 0x8
301 #define DDF_state_inconsistent 0x10
302
303 /* virtual_entry.init_state is a bigendian bitmap */
304 #define DDF_initstate_mask 0x03
305 #define DDF_init_not 0x00
306 #define DDF_init_quick 0x01 /* initialisation is progress.
307 * i.e. 'state_inconsistent' */
308 #define DDF_init_full 0x02
309
310 #define DDF_access_mask 0xc0
311 #define DDF_access_rw 0x00
312 #define DDF_access_ro 0x80
313 #define DDF_access_blocked 0xc0
314
315 /* The content of the config_section - local scope
316 * It has multiple records each config_record_len sectors
317 * They can be vd_config or spare_assign
318 */
319
320 struct vd_config {
321 be32 magic; /* DDF_VD_CONF_MAGIC */
322 be32 crc;
323 char guid[DDF_GUID_LEN];
324 be32 timestamp;
325 be32 seqnum;
326 __u8 pad0[24];
327 be16 prim_elmnt_count;
328 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
329 __u8 prl;
330 __u8 rlq;
331 __u8 sec_elmnt_count;
332 __u8 sec_elmnt_seq;
333 __u8 srl;
334 be64 blocks; /* blocks per component could be different
335 * on different component devices...(only
336 * for concat I hope) */
337 be64 array_blocks; /* blocks in array */
338 __u8 pad1[8];
339 be32 spare_refs[8];
340 __u8 cache_pol[8];
341 __u8 bg_rate;
342 __u8 pad2[3];
343 __u8 pad3[52];
344 __u8 pad4[192];
345 __u8 v0[32]; /* reserved- 0xff */
346 __u8 v1[32]; /* reserved- 0xff */
347 __u8 v2[16]; /* reserved- 0xff */
348 __u8 v3[16]; /* reserved- 0xff */
349 __u8 vendor[32];
350 be32 phys_refnum[0]; /* refnum of each disk in sequence */
351 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
352 bvd are always the same size */
353 };
354 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
355
356 /* vd_config.cache_pol[7] is a bitmap */
357 #define DDF_cache_writeback 1 /* else writethrough */
358 #define DDF_cache_wadaptive 2 /* only applies if writeback */
359 #define DDF_cache_readahead 4
360 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
361 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
362 #define DDF_cache_wallowed 32 /* enable write caching */
363 #define DDF_cache_rallowed 64 /* enable read caching */
364
365 struct spare_assign {
366 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
367 be32 crc;
368 be32 timestamp;
369 __u8 reserved[7];
370 __u8 type;
371 be16 populated; /* SAEs used */
372 be16 max; /* max SAEs */
373 __u8 pad[8];
374 struct spare_assign_entry {
375 char guid[DDF_GUID_LEN];
376 be16 secondary_element;
377 __u8 pad[6];
378 } spare_ents[0];
379 };
380 /* spare_assign.type is a bitmap */
381 #define DDF_spare_dedicated 0x1 /* else global */
382 #define DDF_spare_revertible 0x2 /* else committable */
383 #define DDF_spare_active 0x4 /* else not active */
384 #define DDF_spare_affinity 0x8 /* enclosure affinity */
385
386 /* The data_section contents - local scope */
387 struct disk_data {
388 be32 magic; /* DDF_PHYS_DATA_MAGIC */
389 be32 crc;
390 char guid[DDF_GUID_LEN];
391 be32 refnum; /* crc of some magic drive data ... */
392 __u8 forced_ref; /* set when above was not result of magic */
393 __u8 forced_guid; /* set if guid was forced rather than magic */
394 __u8 vendor[32];
395 __u8 pad[442];
396 };
397
398 /* bbm_section content */
399 struct bad_block_log {
400 be32 magic;
401 be32 crc;
402 be16 entry_count;
403 be32 spare_count;
404 __u8 pad[10];
405 be64 first_spare;
406 struct mapped_block {
407 be64 defective_start;
408 be32 replacement_start;
409 be16 remap_count;
410 __u8 pad[2];
411 } entries[0];
412 };
413
414 /* Struct for internally holding ddf structures */
415 /* The DDF structure stored on each device is potentially
416 * quite different, as some data is global and some is local.
417 * The global data is:
418 * - ddf header
419 * - controller_data
420 * - Physical disk records
421 * - Virtual disk records
422 * The local data is:
423 * - Configuration records
424 * - Physical Disk data section
425 * ( and Bad block and vendor which I don't care about yet).
426 *
427 * The local data is parsed into separate lists as it is read
428 * and reconstructed for writing. This means that we only need
429 * to make config changes once and they are automatically
430 * propagated to all devices.
431 * Note that the ddf_super has space of the conf and disk data
432 * for this disk and also for a list of all such data.
433 * The list is only used for the superblock that is being
434 * built in Create or Assemble to describe the whole array.
435 */
436 struct ddf_super {
437 struct ddf_header anchor, primary, secondary;
438 struct ddf_controller_data controller;
439 struct ddf_header *active;
440 struct phys_disk *phys;
441 struct virtual_disk *virt;
442 int pdsize, vdsize;
443 unsigned int max_part, mppe, conf_rec_len;
444 int currentdev;
445 int updates_pending;
446 struct vcl {
447 union {
448 char space[512];
449 struct {
450 struct vcl *next;
451 unsigned int vcnum; /* index into ->virt */
452 struct vd_config **other_bvds;
453 __u64 *block_sizes; /* NULL if all the same */
454 };
455 };
456 struct vd_config conf;
457 } *conflist, *currentconf;
458 struct dl {
459 union {
460 char space[512];
461 struct {
462 struct dl *next;
463 int major, minor;
464 char *devname;
465 int fd;
466 unsigned long long size; /* sectors */
467 be64 primary_lba; /* sectors */
468 be64 secondary_lba; /* sectors */
469 be64 workspace_lba; /* sectors */
470 int pdnum; /* index in ->phys */
471 struct spare_assign *spare;
472 void *mdupdate; /* hold metadata update */
473
474 /* These fields used by auto-layout */
475 int raiddisk; /* slot to fill in autolayout */
476 __u64 esize;
477 };
478 };
479 struct disk_data disk;
480 struct vcl *vlist[0]; /* max_part in size */
481 } *dlist, *add_list;
482 };
483
484 #ifndef offsetof
485 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
486 #endif
487
488 #if DEBUG
489 static int all_ff(const char *guid);
490 static void pr_state(struct ddf_super *ddf, const char *msg)
491 {
492 unsigned int i;
493 dprintf("%s/%s: ", __func__, msg);
494 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
495 if (all_ff(ddf->virt->entries[i].guid))
496 continue;
497 dprintf("%u(s=%02x i=%02x) ", i,
498 ddf->virt->entries[i].state,
499 ddf->virt->entries[i].init_state);
500 }
501 dprintf("\n");
502 }
503 #else
504 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
505 #endif
506
507 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
508 {
509 ddf->updates_pending = 1;
510 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
511 pr_state(ddf, func);
512 }
513
514 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
515
516 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
517 be32 refnum, unsigned int nmax,
518 const struct vd_config **bvd,
519 unsigned int *idx);
520
521 static be32 calc_crc(void *buf, int len)
522 {
523 /* crcs are always at the same place as in the ddf_header */
524 struct ddf_header *ddf = buf;
525 be32 oldcrc = ddf->crc;
526 __u32 newcrc;
527 ddf->crc = cpu_to_be32(0xffffffff);
528
529 newcrc = crc32(0, buf, len);
530 ddf->crc = oldcrc;
531 /* The crc is store (like everything) bigendian, so convert
532 * here for simplicity
533 */
534 return cpu_to_be32(newcrc);
535 }
536
537 #define DDF_INVALID_LEVEL 0xff
538 #define DDF_NO_SECONDARY 0xff
539 static int err_bad_md_layout(const mdu_array_info_t *array)
540 {
541 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
542 array->level, array->layout, array->raid_disks);
543 return -1;
544 }
545
546 static int layout_md2ddf(const mdu_array_info_t *array,
547 struct vd_config *conf)
548 {
549 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
550 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
551 __u8 sec_elmnt_count = 1;
552 __u8 srl = DDF_NO_SECONDARY;
553
554 switch (array->level) {
555 case LEVEL_LINEAR:
556 prl = DDF_CONCAT;
557 break;
558 case 0:
559 rlq = DDF_RAID0_SIMPLE;
560 prl = DDF_RAID0;
561 break;
562 case 1:
563 switch (array->raid_disks) {
564 case 2:
565 rlq = DDF_RAID1_SIMPLE;
566 break;
567 case 3:
568 rlq = DDF_RAID1_MULTI;
569 break;
570 default:
571 return err_bad_md_layout(array);
572 }
573 prl = DDF_RAID1;
574 break;
575 case 4:
576 if (array->layout != 0)
577 return err_bad_md_layout(array);
578 rlq = DDF_RAID4_N;
579 prl = DDF_RAID4;
580 break;
581 case 5:
582 switch (array->layout) {
583 case ALGORITHM_LEFT_ASYMMETRIC:
584 rlq = DDF_RAID5_N_RESTART;
585 break;
586 case ALGORITHM_RIGHT_ASYMMETRIC:
587 rlq = DDF_RAID5_0_RESTART;
588 break;
589 case ALGORITHM_LEFT_SYMMETRIC:
590 rlq = DDF_RAID5_N_CONTINUE;
591 break;
592 case ALGORITHM_RIGHT_SYMMETRIC:
593 /* not mentioned in standard */
594 default:
595 return err_bad_md_layout(array);
596 }
597 prl = DDF_RAID5;
598 break;
599 case 6:
600 switch (array->layout) {
601 case ALGORITHM_ROTATING_N_RESTART:
602 rlq = DDF_RAID5_N_RESTART;
603 break;
604 case ALGORITHM_ROTATING_ZERO_RESTART:
605 rlq = DDF_RAID6_0_RESTART;
606 break;
607 case ALGORITHM_ROTATING_N_CONTINUE:
608 rlq = DDF_RAID5_N_CONTINUE;
609 break;
610 default:
611 return err_bad_md_layout(array);
612 }
613 prl = DDF_RAID6;
614 break;
615 case 10:
616 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
617 rlq = DDF_RAID1_SIMPLE;
618 prim_elmnt_count = cpu_to_be16(2);
619 sec_elmnt_count = array->raid_disks / 2;
620 } else if (array->raid_disks % 3 == 0
621 && array->layout == 0x103) {
622 rlq = DDF_RAID1_MULTI;
623 prim_elmnt_count = cpu_to_be16(3);
624 sec_elmnt_count = array->raid_disks / 3;
625 } else
626 return err_bad_md_layout(array);
627 srl = DDF_2SPANNED;
628 prl = DDF_RAID1;
629 break;
630 default:
631 return err_bad_md_layout(array);
632 }
633 conf->prl = prl;
634 conf->prim_elmnt_count = prim_elmnt_count;
635 conf->rlq = rlq;
636 conf->srl = srl;
637 conf->sec_elmnt_count = sec_elmnt_count;
638 return 0;
639 }
640
641 static int err_bad_ddf_layout(const struct vd_config *conf)
642 {
643 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
644 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
645 return -1;
646 }
647
648 static int layout_ddf2md(const struct vd_config *conf,
649 mdu_array_info_t *array)
650 {
651 int level = LEVEL_UNSUPPORTED;
652 int layout = 0;
653 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
654
655 if (conf->sec_elmnt_count > 1) {
656 /* see also check_secondary() */
657 if (conf->prl != DDF_RAID1 ||
658 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
659 pr_err("Unsupported secondary RAID level %u/%u\n",
660 conf->prl, conf->srl);
661 return -1;
662 }
663 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
664 layout = 0x102;
665 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
666 layout = 0x103;
667 else
668 return err_bad_ddf_layout(conf);
669 raiddisks *= conf->sec_elmnt_count;
670 level = 10;
671 goto good;
672 }
673
674 switch (conf->prl) {
675 case DDF_CONCAT:
676 level = LEVEL_LINEAR;
677 break;
678 case DDF_RAID0:
679 if (conf->rlq != DDF_RAID0_SIMPLE)
680 return err_bad_ddf_layout(conf);
681 level = 0;
682 break;
683 case DDF_RAID1:
684 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
685 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
686 return err_bad_ddf_layout(conf);
687 level = 1;
688 break;
689 case DDF_RAID4:
690 if (conf->rlq != DDF_RAID4_N)
691 return err_bad_ddf_layout(conf);
692 level = 4;
693 break;
694 case DDF_RAID5:
695 switch (conf->rlq) {
696 case DDF_RAID5_N_RESTART:
697 layout = ALGORITHM_LEFT_ASYMMETRIC;
698 break;
699 case DDF_RAID5_0_RESTART:
700 layout = ALGORITHM_RIGHT_ASYMMETRIC;
701 break;
702 case DDF_RAID5_N_CONTINUE:
703 layout = ALGORITHM_LEFT_SYMMETRIC;
704 break;
705 default:
706 return err_bad_ddf_layout(conf);
707 }
708 level = 5;
709 break;
710 case DDF_RAID6:
711 switch (conf->rlq) {
712 case DDF_RAID5_N_RESTART:
713 layout = ALGORITHM_ROTATING_N_RESTART;
714 break;
715 case DDF_RAID6_0_RESTART:
716 layout = ALGORITHM_ROTATING_ZERO_RESTART;
717 break;
718 case DDF_RAID5_N_CONTINUE:
719 layout = ALGORITHM_ROTATING_N_CONTINUE;
720 break;
721 default:
722 return err_bad_ddf_layout(conf);
723 }
724 level = 6;
725 break;
726 default:
727 return err_bad_ddf_layout(conf);
728 };
729
730 good:
731 array->level = level;
732 array->layout = layout;
733 array->raid_disks = raiddisks;
734 return 0;
735 }
736
737 static int load_ddf_header(int fd, unsigned long long lba,
738 unsigned long long size,
739 int type,
740 struct ddf_header *hdr, struct ddf_header *anchor)
741 {
742 /* read a ddf header (primary or secondary) from fd/lba
743 * and check that it is consistent with anchor
744 * Need to check:
745 * magic, crc, guid, rev, and LBA's header_type, and
746 * everything after header_type must be the same
747 */
748 if (lba >= size-1)
749 return 0;
750
751 if (lseek64(fd, lba<<9, 0) < 0)
752 return 0;
753
754 if (read(fd, hdr, 512) != 512)
755 return 0;
756
757 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC))
758 return 0;
759 if (!be32_eq(calc_crc(hdr, 512), hdr->crc))
760 return 0;
761 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
762 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
763 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
764 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
765 hdr->type != type ||
766 memcmp(anchor->pad2, hdr->pad2, 512 -
767 offsetof(struct ddf_header, pad2)) != 0)
768 return 0;
769
770 /* Looks good enough to me... */
771 return 1;
772 }
773
774 static void *load_section(int fd, struct ddf_super *super, void *buf,
775 be32 offset_be, be32 len_be, int check)
776 {
777 unsigned long long offset = be32_to_cpu(offset_be);
778 unsigned long long len = be32_to_cpu(len_be);
779 int dofree = (buf == NULL);
780
781 if (check)
782 if (len != 2 && len != 8 && len != 32
783 && len != 128 && len != 512)
784 return NULL;
785
786 if (len > 1024)
787 return NULL;
788 if (buf) {
789 /* All pre-allocated sections are a single block */
790 if (len != 1)
791 return NULL;
792 } else if (posix_memalign(&buf, 512, len<<9) != 0)
793 buf = NULL;
794
795 if (!buf)
796 return NULL;
797
798 if (super->active->type == 1)
799 offset += be64_to_cpu(super->active->primary_lba);
800 else
801 offset += be64_to_cpu(super->active->secondary_lba);
802
803 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
804 if (dofree)
805 free(buf);
806 return NULL;
807 }
808 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
809 if (dofree)
810 free(buf);
811 return NULL;
812 }
813 return buf;
814 }
815
816 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
817 {
818 unsigned long long dsize;
819
820 get_dev_size(fd, NULL, &dsize);
821
822 if (lseek64(fd, dsize-512, 0) < 0) {
823 if (devname)
824 pr_err("Cannot seek to anchor block on %s: %s\n",
825 devname, strerror(errno));
826 return 1;
827 }
828 if (read(fd, &super->anchor, 512) != 512) {
829 if (devname)
830 pr_err("Cannot read anchor block on %s: %s\n",
831 devname, strerror(errno));
832 return 1;
833 }
834 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
835 if (devname)
836 pr_err("no DDF anchor found on %s\n",
837 devname);
838 return 2;
839 }
840 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
841 if (devname)
842 pr_err("bad CRC on anchor on %s\n",
843 devname);
844 return 2;
845 }
846 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
847 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
848 if (devname)
849 pr_err("can only support super revision"
850 " %.8s and earlier, not %.8s on %s\n",
851 DDF_REVISION_2, super->anchor.revision,devname);
852 return 2;
853 }
854 super->active = NULL;
855 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
856 dsize >> 9, 1,
857 &super->primary, &super->anchor) == 0) {
858 if (devname)
859 pr_err("Failed to load primary DDF header "
860 "on %s\n", devname);
861 } else
862 super->active = &super->primary;
863
864 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
865 dsize >> 9, 2,
866 &super->secondary, &super->anchor)) {
867 if (super->active == NULL
868 || (be32_to_cpu(super->primary.seq)
869 < be32_to_cpu(super->secondary.seq) &&
870 !super->secondary.openflag)
871 || (be32_to_cpu(super->primary.seq)
872 == be32_to_cpu(super->secondary.seq) &&
873 super->primary.openflag && !super->secondary.openflag)
874 )
875 super->active = &super->secondary;
876 } else if (devname)
877 pr_err("Failed to load secondary DDF header on %s\n",
878 devname);
879 if (super->active == NULL)
880 return 2;
881 return 0;
882 }
883
884 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
885 {
886 void *ok;
887 ok = load_section(fd, super, &super->controller,
888 super->active->controller_section_offset,
889 super->active->controller_section_length,
890 0);
891 super->phys = load_section(fd, super, NULL,
892 super->active->phys_section_offset,
893 super->active->phys_section_length,
894 1);
895 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
896
897 super->virt = load_section(fd, super, NULL,
898 super->active->virt_section_offset,
899 super->active->virt_section_length,
900 1);
901 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
902 if (!ok ||
903 !super->phys ||
904 !super->virt) {
905 free(super->phys);
906 free(super->virt);
907 super->phys = NULL;
908 super->virt = NULL;
909 return 2;
910 }
911 super->conflist = NULL;
912 super->dlist = NULL;
913
914 super->max_part = be16_to_cpu(super->active->max_partitions);
915 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
916 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
917 return 0;
918 }
919
920 #define DDF_UNUSED_BVD 0xff
921 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
922 {
923 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
924 unsigned int i, vdsize;
925 void *p;
926 if (n_vds == 0) {
927 vcl->other_bvds = NULL;
928 return 0;
929 }
930 vdsize = ddf->conf_rec_len * 512;
931 if (posix_memalign(&p, 512, n_vds *
932 (vdsize + sizeof(struct vd_config *))) != 0)
933 return -1;
934 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
935 for (i = 0; i < n_vds; i++) {
936 vcl->other_bvds[i] = p + i * vdsize;
937 memset(vcl->other_bvds[i], 0, vdsize);
938 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
939 }
940 return 0;
941 }
942
943 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
944 unsigned int len)
945 {
946 int i;
947 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
948 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
949 break;
950
951 if (i < vcl->conf.sec_elmnt_count-1) {
952 if (be32_to_cpu(vd->seqnum) <=
953 be32_to_cpu(vcl->other_bvds[i]->seqnum))
954 return;
955 } else {
956 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
957 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
958 break;
959 if (i == vcl->conf.sec_elmnt_count-1) {
960 pr_err("no space for sec level config %u, count is %u\n",
961 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
962 return;
963 }
964 }
965 memcpy(vcl->other_bvds[i], vd, len);
966 }
967
968 static int load_ddf_local(int fd, struct ddf_super *super,
969 char *devname, int keep)
970 {
971 struct dl *dl;
972 struct stat stb;
973 char *conf;
974 unsigned int i;
975 unsigned int confsec;
976 int vnum;
977 unsigned int max_virt_disks = be16_to_cpu
978 (super->active->max_vd_entries);
979 unsigned long long dsize;
980
981 /* First the local disk info */
982 if (posix_memalign((void**)&dl, 512,
983 sizeof(*dl) +
984 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
985 pr_err("%s could not allocate disk info buffer\n",
986 __func__);
987 return 1;
988 }
989
990 load_section(fd, super, &dl->disk,
991 super->active->data_section_offset,
992 super->active->data_section_length,
993 0);
994 dl->devname = devname ? xstrdup(devname) : NULL;
995
996 fstat(fd, &stb);
997 dl->major = major(stb.st_rdev);
998 dl->minor = minor(stb.st_rdev);
999 dl->next = super->dlist;
1000 dl->fd = keep ? fd : -1;
1001
1002 dl->size = 0;
1003 if (get_dev_size(fd, devname, &dsize))
1004 dl->size = dsize >> 9;
1005 /* If the disks have different sizes, the LBAs will differ
1006 * between phys disks.
1007 * At this point here, the values in super->active must be valid
1008 * for this phys disk. */
1009 dl->primary_lba = super->active->primary_lba;
1010 dl->secondary_lba = super->active->secondary_lba;
1011 dl->workspace_lba = super->active->workspace_lba;
1012 dl->spare = NULL;
1013 for (i = 0 ; i < super->max_part ; i++)
1014 dl->vlist[i] = NULL;
1015 super->dlist = dl;
1016 dl->pdnum = -1;
1017 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1018 if (memcmp(super->phys->entries[i].guid,
1019 dl->disk.guid, DDF_GUID_LEN) == 0)
1020 dl->pdnum = i;
1021
1022 /* Now the config list. */
1023 /* 'conf' is an array of config entries, some of which are
1024 * probably invalid. Those which are good need to be copied into
1025 * the conflist
1026 */
1027
1028 conf = load_section(fd, super, NULL,
1029 super->active->config_section_offset,
1030 super->active->config_section_length,
1031 0);
1032
1033 vnum = 0;
1034 for (confsec = 0;
1035 confsec < be32_to_cpu(super->active->config_section_length);
1036 confsec += super->conf_rec_len) {
1037 struct vd_config *vd =
1038 (struct vd_config *)((char*)conf + confsec*512);
1039 struct vcl *vcl;
1040
1041 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1042 if (dl->spare)
1043 continue;
1044 if (posix_memalign((void**)&dl->spare, 512,
1045 super->conf_rec_len*512) != 0) {
1046 pr_err("%s could not allocate spare info buf\n",
1047 __func__);
1048 return 1;
1049 }
1050
1051 memcpy(dl->spare, vd, super->conf_rec_len*512);
1052 continue;
1053 }
1054 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1055 continue;
1056 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1057 if (memcmp(vcl->conf.guid,
1058 vd->guid, DDF_GUID_LEN) == 0)
1059 break;
1060 }
1061
1062 if (vcl) {
1063 dl->vlist[vnum++] = vcl;
1064 if (vcl->other_bvds != NULL &&
1065 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1066 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1067 continue;
1068 }
1069 if (be32_to_cpu(vd->seqnum) <=
1070 be32_to_cpu(vcl->conf.seqnum))
1071 continue;
1072 } else {
1073 if (posix_memalign((void**)&vcl, 512,
1074 (super->conf_rec_len*512 +
1075 offsetof(struct vcl, conf))) != 0) {
1076 pr_err("%s could not allocate vcl buf\n",
1077 __func__);
1078 return 1;
1079 }
1080 vcl->next = super->conflist;
1081 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1082 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1083 if (alloc_other_bvds(super, vcl) != 0) {
1084 pr_err("%s could not allocate other bvds\n",
1085 __func__);
1086 free(vcl);
1087 return 1;
1088 };
1089 super->conflist = vcl;
1090 dl->vlist[vnum++] = vcl;
1091 }
1092 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1093 for (i=0; i < max_virt_disks ; i++)
1094 if (memcmp(super->virt->entries[i].guid,
1095 vcl->conf.guid, DDF_GUID_LEN)==0)
1096 break;
1097 if (i < max_virt_disks)
1098 vcl->vcnum = i;
1099 }
1100 free(conf);
1101
1102 return 0;
1103 }
1104
1105 #ifndef MDASSEMBLE
1106 static int load_super_ddf_all(struct supertype *st, int fd,
1107 void **sbp, char *devname);
1108 #endif
1109
1110 static void free_super_ddf(struct supertype *st);
1111
1112 static int load_super_ddf(struct supertype *st, int fd,
1113 char *devname)
1114 {
1115 unsigned long long dsize;
1116 struct ddf_super *super;
1117 int rv;
1118
1119 if (get_dev_size(fd, devname, &dsize) == 0)
1120 return 1;
1121
1122 if (!st->ignore_hw_compat && test_partition(fd))
1123 /* DDF is not allowed on partitions */
1124 return 1;
1125
1126 /* 32M is a lower bound */
1127 if (dsize <= 32*1024*1024) {
1128 if (devname)
1129 pr_err("%s is too small for ddf: "
1130 "size is %llu sectors.\n",
1131 devname, dsize>>9);
1132 return 1;
1133 }
1134 if (dsize & 511) {
1135 if (devname)
1136 pr_err("%s is an odd size for ddf: "
1137 "size is %llu bytes.\n",
1138 devname, dsize);
1139 return 1;
1140 }
1141
1142 free_super_ddf(st);
1143
1144 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1145 pr_err("malloc of %zu failed.\n",
1146 sizeof(*super));
1147 return 1;
1148 }
1149 memset(super, 0, sizeof(*super));
1150
1151 rv = load_ddf_headers(fd, super, devname);
1152 if (rv) {
1153 free(super);
1154 return rv;
1155 }
1156
1157 /* Have valid headers and have chosen the best. Let's read in the rest*/
1158
1159 rv = load_ddf_global(fd, super, devname);
1160
1161 if (rv) {
1162 if (devname)
1163 pr_err("Failed to load all information "
1164 "sections on %s\n", devname);
1165 free(super);
1166 return rv;
1167 }
1168
1169 rv = load_ddf_local(fd, super, devname, 0);
1170
1171 if (rv) {
1172 if (devname)
1173 pr_err("Failed to load all information "
1174 "sections on %s\n", devname);
1175 free(super);
1176 return rv;
1177 }
1178
1179 /* Should possibly check the sections .... */
1180
1181 st->sb = super;
1182 if (st->ss == NULL) {
1183 st->ss = &super_ddf;
1184 st->minor_version = 0;
1185 st->max_devs = 512;
1186 }
1187 return 0;
1188
1189 }
1190
1191 static void free_super_ddf(struct supertype *st)
1192 {
1193 struct ddf_super *ddf = st->sb;
1194 if (ddf == NULL)
1195 return;
1196 free(ddf->phys);
1197 free(ddf->virt);
1198 while (ddf->conflist) {
1199 struct vcl *v = ddf->conflist;
1200 ddf->conflist = v->next;
1201 if (v->block_sizes)
1202 free(v->block_sizes);
1203 if (v->other_bvds)
1204 /*
1205 v->other_bvds[0] points to beginning of buffer,
1206 see alloc_other_bvds()
1207 */
1208 free(v->other_bvds[0]);
1209 free(v);
1210 }
1211 while (ddf->dlist) {
1212 struct dl *d = ddf->dlist;
1213 ddf->dlist = d->next;
1214 if (d->fd >= 0)
1215 close(d->fd);
1216 if (d->spare)
1217 free(d->spare);
1218 free(d);
1219 }
1220 while (ddf->add_list) {
1221 struct dl *d = ddf->add_list;
1222 ddf->add_list = d->next;
1223 if (d->fd >= 0)
1224 close(d->fd);
1225 if (d->spare)
1226 free(d->spare);
1227 free(d);
1228 }
1229 free(ddf);
1230 st->sb = NULL;
1231 }
1232
1233 static struct supertype *match_metadata_desc_ddf(char *arg)
1234 {
1235 /* 'ddf' only support containers */
1236 struct supertype *st;
1237 if (strcmp(arg, "ddf") != 0 &&
1238 strcmp(arg, "default") != 0
1239 )
1240 return NULL;
1241
1242 st = xcalloc(1, sizeof(*st));
1243 st->ss = &super_ddf;
1244 st->max_devs = 512;
1245 st->minor_version = 0;
1246 st->sb = NULL;
1247 return st;
1248 }
1249
1250 #ifndef MDASSEMBLE
1251
1252 static mapping_t ddf_state[] = {
1253 { "Optimal", 0},
1254 { "Degraded", 1},
1255 { "Deleted", 2},
1256 { "Missing", 3},
1257 { "Failed", 4},
1258 { "Partially Optimal", 5},
1259 { "-reserved-", 6},
1260 { "-reserved-", 7},
1261 { NULL, 0}
1262 };
1263
1264 static mapping_t ddf_init_state[] = {
1265 { "Not Initialised", 0},
1266 { "QuickInit in Progress", 1},
1267 { "Fully Initialised", 2},
1268 { "*UNKNOWN*", 3},
1269 { NULL, 0}
1270 };
1271 static mapping_t ddf_access[] = {
1272 { "Read/Write", 0},
1273 { "Reserved", 1},
1274 { "Read Only", 2},
1275 { "Blocked (no access)", 3},
1276 { NULL ,0}
1277 };
1278
1279 static mapping_t ddf_level[] = {
1280 { "RAID0", DDF_RAID0},
1281 { "RAID1", DDF_RAID1},
1282 { "RAID3", DDF_RAID3},
1283 { "RAID4", DDF_RAID4},
1284 { "RAID5", DDF_RAID5},
1285 { "RAID1E",DDF_RAID1E},
1286 { "JBOD", DDF_JBOD},
1287 { "CONCAT",DDF_CONCAT},
1288 { "RAID5E",DDF_RAID5E},
1289 { "RAID5EE",DDF_RAID5EE},
1290 { "RAID6", DDF_RAID6},
1291 { NULL, 0}
1292 };
1293 static mapping_t ddf_sec_level[] = {
1294 { "Striped", DDF_2STRIPED},
1295 { "Mirrored", DDF_2MIRRORED},
1296 { "Concat", DDF_2CONCAT},
1297 { "Spanned", DDF_2SPANNED},
1298 { NULL, 0}
1299 };
1300 #endif
1301
1302 static int all_ff(const char *guid)
1303 {
1304 int i;
1305 for (i = 0; i < DDF_GUID_LEN; i++)
1306 if (guid[i] != (char)0xff)
1307 return 0;
1308 return 1;
1309 }
1310
1311 static const char *guid_str(const char *guid)
1312 {
1313 static char buf[DDF_GUID_LEN*2+1];
1314 int i;
1315 char *p = buf;
1316 for (i = 0; i < DDF_GUID_LEN; i++) {
1317 unsigned char c = guid[i];
1318 if (c >= 32 && c < 127)
1319 p += sprintf(p, "%c", c);
1320 else
1321 p += sprintf(p, "%02x", c);
1322 }
1323 *p = '\0';
1324 return (const char *) buf;
1325 }
1326
1327 #ifndef MDASSEMBLE
1328 static void print_guid(char *guid, int tstamp)
1329 {
1330 /* A GUIDs are part (or all) ASCII and part binary.
1331 * They tend to be space padded.
1332 * We print the GUID in HEX, then in parentheses add
1333 * any initial ASCII sequence, and a possible
1334 * time stamp from bytes 16-19
1335 */
1336 int l = DDF_GUID_LEN;
1337 int i;
1338
1339 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1340 if ((i&3)==0 && i != 0) printf(":");
1341 printf("%02X", guid[i]&255);
1342 }
1343
1344 printf("\n (");
1345 while (l && guid[l-1] == ' ')
1346 l--;
1347 for (i=0 ; i<l ; i++) {
1348 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1349 fputc(guid[i], stdout);
1350 else
1351 break;
1352 }
1353 if (tstamp) {
1354 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1355 char tbuf[100];
1356 struct tm *tm;
1357 tm = localtime(&then);
1358 strftime(tbuf, 100, " %D %T",tm);
1359 fputs(tbuf, stdout);
1360 }
1361 printf(")");
1362 }
1363
1364 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1365 {
1366 int crl = sb->conf_rec_len;
1367 struct vcl *vcl;
1368
1369 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1370 unsigned int i;
1371 struct vd_config *vc = &vcl->conf;
1372
1373 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1374 continue;
1375 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1376 continue;
1377
1378 /* Ok, we know about this VD, let's give more details */
1379 printf(" Raid Devices[%d] : %d (", n,
1380 be16_to_cpu(vc->prim_elmnt_count));
1381 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1382 int j;
1383 int cnt = be16_to_cpu(sb->phys->used_pdes);
1384 for (j=0; j<cnt; j++)
1385 if (be32_eq(vc->phys_refnum[i],
1386 sb->phys->entries[j].refnum))
1387 break;
1388 if (i) printf(" ");
1389 if (j < cnt)
1390 printf("%d", j);
1391 else
1392 printf("--");
1393 }
1394 printf(")\n");
1395 if (vc->chunk_shift != 255)
1396 printf(" Chunk Size[%d] : %d sectors\n", n,
1397 1 << vc->chunk_shift);
1398 printf(" Raid Level[%d] : %s\n", n,
1399 map_num(ddf_level, vc->prl)?:"-unknown-");
1400 if (vc->sec_elmnt_count != 1) {
1401 printf(" Secondary Position[%d] : %d of %d\n", n,
1402 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1403 printf(" Secondary Level[%d] : %s\n", n,
1404 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1405 }
1406 printf(" Device Size[%d] : %llu\n", n,
1407 be64_to_cpu(vc->blocks)/2);
1408 printf(" Array Size[%d] : %llu\n", n,
1409 be64_to_cpu(vc->array_blocks)/2);
1410 }
1411 }
1412
1413 static void examine_vds(struct ddf_super *sb)
1414 {
1415 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1416 unsigned int i;
1417 printf(" Virtual Disks : %d\n", cnt);
1418
1419 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1420 struct virtual_entry *ve = &sb->virt->entries[i];
1421 if (all_ff(ve->guid))
1422 continue;
1423 printf("\n");
1424 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1425 printf("\n");
1426 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1427 printf(" state[%d] : %s, %s%s\n", i,
1428 map_num(ddf_state, ve->state & 7),
1429 (ve->state & 8) ? "Morphing, ": "",
1430 (ve->state & 16)? "Not Consistent" : "Consistent");
1431 printf(" init state[%d] : %s\n", i,
1432 map_num(ddf_init_state, ve->init_state&3));
1433 printf(" access[%d] : %s\n", i,
1434 map_num(ddf_access, (ve->init_state>>6) & 3));
1435 printf(" Name[%d] : %.16s\n", i, ve->name);
1436 examine_vd(i, sb, ve->guid);
1437 }
1438 if (cnt) printf("\n");
1439 }
1440
1441 static void examine_pds(struct ddf_super *sb)
1442 {
1443 int cnt = be16_to_cpu(sb->phys->used_pdes);
1444 int i;
1445 struct dl *dl;
1446 printf(" Physical Disks : %d\n", cnt);
1447 printf(" Number RefNo Size Device Type/State\n");
1448
1449 for (i=0 ; i<cnt ; i++) {
1450 struct phys_disk_entry *pd = &sb->phys->entries[i];
1451 int type = be16_to_cpu(pd->type);
1452 int state = be16_to_cpu(pd->state);
1453
1454 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1455 //printf("\n");
1456 printf(" %3d %08x ", i,
1457 be32_to_cpu(pd->refnum));
1458 printf("%8lluK ",
1459 be64_to_cpu(pd->config_size)>>1);
1460 for (dl = sb->dlist; dl ; dl = dl->next) {
1461 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1462 char *dv = map_dev(dl->major, dl->minor, 0);
1463 if (dv) {
1464 printf("%-15s", dv);
1465 break;
1466 }
1467 }
1468 }
1469 if (!dl)
1470 printf("%15s","");
1471 printf(" %s%s%s%s%s",
1472 (type&2) ? "active":"",
1473 (type&4) ? "Global-Spare":"",
1474 (type&8) ? "spare" : "",
1475 (type&16)? ", foreign" : "",
1476 (type&32)? "pass-through" : "");
1477 if (state & DDF_Failed)
1478 /* This over-rides these three */
1479 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1480 printf("/%s%s%s%s%s%s%s",
1481 (state&1)? "Online": "Offline",
1482 (state&2)? ", Failed": "",
1483 (state&4)? ", Rebuilding": "",
1484 (state&8)? ", in-transition": "",
1485 (state&16)? ", SMART-errors": "",
1486 (state&32)? ", Unrecovered-Read-Errors": "",
1487 (state&64)? ", Missing" : "");
1488 printf("\n");
1489 }
1490 }
1491
1492 static void examine_super_ddf(struct supertype *st, char *homehost)
1493 {
1494 struct ddf_super *sb = st->sb;
1495
1496 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1497 printf(" Version : %.8s\n", sb->anchor.revision);
1498 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1499 printf("\n");
1500 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1501 printf("\n");
1502 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1503 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1504 DDF_HEADER_MAGIC)
1505 ?"yes" : "no");
1506 examine_vds(sb);
1507 examine_pds(sb);
1508 }
1509
1510 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1511
1512 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1513 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1514
1515 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1516 {
1517 /*
1518 * Figure out the VD number for this supertype.
1519 * Returns DDF_CONTAINER for the container itself,
1520 * and DDF_NOTFOUND on error.
1521 */
1522 struct ddf_super *ddf = st->sb;
1523 struct mdinfo *sra;
1524 char *sub, *end;
1525 unsigned int vcnum;
1526
1527 if (*st->container_devnm == '\0')
1528 return DDF_CONTAINER;
1529
1530 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1531 if (!sra || sra->array.major_version != -1 ||
1532 sra->array.minor_version != -2 ||
1533 !is_subarray(sra->text_version))
1534 return DDF_NOTFOUND;
1535
1536 sub = strchr(sra->text_version + 1, '/');
1537 if (sub != NULL)
1538 vcnum = strtoul(sub + 1, &end, 10);
1539 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1540 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1541 return DDF_NOTFOUND;
1542
1543 return vcnum;
1544 }
1545
1546 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1547 {
1548 /* We just write a generic DDF ARRAY entry
1549 */
1550 struct mdinfo info;
1551 char nbuf[64];
1552 getinfo_super_ddf(st, &info, NULL);
1553 fname_from_uuid(st, &info, nbuf, ':');
1554
1555 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1556 }
1557
1558 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1559 {
1560 /* We just write a generic DDF ARRAY entry
1561 */
1562 struct ddf_super *ddf = st->sb;
1563 struct mdinfo info;
1564 unsigned int i;
1565 char nbuf[64];
1566 getinfo_super_ddf(st, &info, NULL);
1567 fname_from_uuid(st, &info, nbuf, ':');
1568
1569 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1570 struct virtual_entry *ve = &ddf->virt->entries[i];
1571 struct vcl vcl;
1572 char nbuf1[64];
1573 if (all_ff(ve->guid))
1574 continue;
1575 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1576 ddf->currentconf =&vcl;
1577 uuid_from_super_ddf(st, info.uuid);
1578 fname_from_uuid(st, &info, nbuf1, ':');
1579 printf("ARRAY container=%s member=%d UUID=%s\n",
1580 nbuf+5, i, nbuf1+5);
1581 }
1582 }
1583
1584 static void export_examine_super_ddf(struct supertype *st)
1585 {
1586 struct mdinfo info;
1587 char nbuf[64];
1588 getinfo_super_ddf(st, &info, NULL);
1589 fname_from_uuid(st, &info, nbuf, ':');
1590 printf("MD_METADATA=ddf\n");
1591 printf("MD_LEVEL=container\n");
1592 printf("MD_UUID=%s\n", nbuf+5);
1593 }
1594
1595 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1596 {
1597 void *buf;
1598 unsigned long long dsize, offset;
1599 int bytes;
1600 struct ddf_header *ddf;
1601 int written = 0;
1602
1603 /* The meta consists of an anchor, a primary, and a secondary.
1604 * This all lives at the end of the device.
1605 * So it is easiest to find the earliest of primary and
1606 * secondary, and copy everything from there.
1607 *
1608 * Anchor is 512 from end It contains primary_lba and secondary_lba
1609 * we choose one of those
1610 */
1611
1612 if (posix_memalign(&buf, 4096, 4096) != 0)
1613 return 1;
1614
1615 if (!get_dev_size(from, NULL, &dsize))
1616 goto err;
1617
1618 if (lseek64(from, dsize-512, 0) < 0)
1619 goto err;
1620 if (read(from, buf, 512) != 512)
1621 goto err;
1622 ddf = buf;
1623 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1624 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1625 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1626 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1627 goto err;
1628
1629 offset = dsize - 512;
1630 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1631 offset = be64_to_cpu(ddf->primary_lba) << 9;
1632 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1633 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1634
1635 bytes = dsize - offset;
1636
1637 if (lseek64(from, offset, 0) < 0 ||
1638 lseek64(to, offset, 0) < 0)
1639 goto err;
1640 while (written < bytes) {
1641 int n = bytes - written;
1642 if (n > 4096)
1643 n = 4096;
1644 if (read(from, buf, n) != n)
1645 goto err;
1646 if (write(to, buf, n) != n)
1647 goto err;
1648 written += n;
1649 }
1650 free(buf);
1651 return 0;
1652 err:
1653 free(buf);
1654 return 1;
1655 }
1656
1657 static void detail_super_ddf(struct supertype *st, char *homehost)
1658 {
1659 /* FIXME later
1660 * Could print DDF GUID
1661 * Need to find which array
1662 * If whole, briefly list all arrays
1663 * If one, give name
1664 */
1665 }
1666
1667 static void brief_detail_super_ddf(struct supertype *st)
1668 {
1669 struct mdinfo info;
1670 char nbuf[64];
1671 struct ddf_super *ddf = st->sb;
1672 unsigned int vcnum = get_vd_num_of_subarray(st);
1673 if (vcnum == DDF_CONTAINER)
1674 uuid_from_super_ddf(st, info.uuid);
1675 else if (vcnum == DDF_NOTFOUND)
1676 return;
1677 else
1678 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, info.uuid);
1679 fname_from_uuid(st, &info, nbuf,':');
1680 printf(" UUID=%s", nbuf + 5);
1681 }
1682 #endif
1683
1684 static int match_home_ddf(struct supertype *st, char *homehost)
1685 {
1686 /* It matches 'this' host if the controller is a
1687 * Linux-MD controller with vendor_data matching
1688 * the hostname
1689 */
1690 struct ddf_super *ddf = st->sb;
1691 unsigned int len;
1692
1693 if (!homehost)
1694 return 0;
1695 len = strlen(homehost);
1696
1697 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1698 len < sizeof(ddf->controller.vendor_data) &&
1699 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1700 ddf->controller.vendor_data[len] == 0);
1701 }
1702
1703 #ifndef MDASSEMBLE
1704 static int find_index_in_bvd(const struct ddf_super *ddf,
1705 const struct vd_config *conf, unsigned int n,
1706 unsigned int *n_bvd)
1707 {
1708 /*
1709 * Find the index of the n-th valid physical disk in this BVD
1710 */
1711 unsigned int i, j;
1712 for (i = 0, j = 0; i < ddf->mppe &&
1713 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1714 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1715 if (n == j) {
1716 *n_bvd = i;
1717 return 1;
1718 }
1719 j++;
1720 }
1721 }
1722 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1723 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1724 return 0;
1725 }
1726
1727 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1728 unsigned int n,
1729 unsigned int *n_bvd, struct vcl **vcl)
1730 {
1731 struct vcl *v;
1732
1733 for (v = ddf->conflist; v; v = v->next) {
1734 unsigned int nsec, ibvd = 0;
1735 struct vd_config *conf;
1736 if (inst != v->vcnum)
1737 continue;
1738 conf = &v->conf;
1739 if (conf->sec_elmnt_count == 1) {
1740 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1741 *vcl = v;
1742 return conf;
1743 } else
1744 goto bad;
1745 }
1746 if (v->other_bvds == NULL) {
1747 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1748 __func__, conf->sec_elmnt_count);
1749 goto bad;
1750 }
1751 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1752 if (conf->sec_elmnt_seq != nsec) {
1753 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1754 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1755 == nsec)
1756 break;
1757 }
1758 if (ibvd == conf->sec_elmnt_count)
1759 goto bad;
1760 conf = v->other_bvds[ibvd-1];
1761 }
1762 if (!find_index_in_bvd(ddf, conf,
1763 n - nsec*conf->sec_elmnt_count, n_bvd))
1764 goto bad;
1765 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1766 , __func__, n, *n_bvd, ibvd, inst);
1767 *vcl = v;
1768 return conf;
1769 }
1770 bad:
1771 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1772 return NULL;
1773 }
1774 #endif
1775
1776 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1777 {
1778 /* Find the entry in phys_disk which has the given refnum
1779 * and return it's index
1780 */
1781 unsigned int i;
1782 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1783 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1784 return i;
1785 return -1;
1786 }
1787
1788 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1789 {
1790 char buf[20];
1791 struct sha1_ctx ctx;
1792 sha1_init_ctx(&ctx);
1793 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1794 sha1_finish_ctx(&ctx, buf);
1795 memcpy(uuid, buf, 4*4);
1796 }
1797
1798 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1799 {
1800 /* The uuid returned here is used for:
1801 * uuid to put into bitmap file (Create, Grow)
1802 * uuid for backup header when saving critical section (Grow)
1803 * comparing uuids when re-adding a device into an array
1804 * In these cases the uuid required is that of the data-array,
1805 * not the device-set.
1806 * uuid to recognise same set when adding a missing device back
1807 * to an array. This is a uuid for the device-set.
1808 *
1809 * For each of these we can make do with a truncated
1810 * or hashed uuid rather than the original, as long as
1811 * everyone agrees.
1812 * In the case of SVD we assume the BVD is of interest,
1813 * though that might be the case if a bitmap were made for
1814 * a mirrored SVD - worry about that later.
1815 * So we need to find the VD configuration record for the
1816 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1817 * The first 16 bytes of the sha1 of these is used.
1818 */
1819 struct ddf_super *ddf = st->sb;
1820 struct vcl *vcl = ddf->currentconf;
1821 char *guid;
1822
1823 if (vcl)
1824 guid = vcl->conf.guid;
1825 else
1826 guid = ddf->anchor.guid;
1827 uuid_from_ddf_guid(guid, uuid);
1828 }
1829
1830 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1831
1832 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1833 {
1834 struct ddf_super *ddf = st->sb;
1835 int map_disks = info->array.raid_disks;
1836 __u32 *cptr;
1837
1838 if (ddf->currentconf) {
1839 getinfo_super_ddf_bvd(st, info, map);
1840 return;
1841 }
1842 memset(info, 0, sizeof(*info));
1843
1844 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1845 info->array.level = LEVEL_CONTAINER;
1846 info->array.layout = 0;
1847 info->array.md_minor = -1;
1848 cptr = (__u32 *)(ddf->anchor.guid + 16);
1849 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1850
1851 info->array.utime = 0;
1852 info->array.chunk_size = 0;
1853 info->container_enough = 1;
1854
1855 info->disk.major = 0;
1856 info->disk.minor = 0;
1857 if (ddf->dlist) {
1858 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1859 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1860
1861 info->data_offset = be64_to_cpu(ddf->phys->
1862 entries[info->disk.raid_disk].
1863 config_size);
1864 info->component_size = ddf->dlist->size - info->data_offset;
1865 } else {
1866 info->disk.number = -1;
1867 info->disk.raid_disk = -1;
1868 // info->disk.raid_disk = find refnum in the table and use index;
1869 }
1870 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1871
1872 info->recovery_start = MaxSector;
1873 info->reshape_active = 0;
1874 info->recovery_blocked = 0;
1875 info->name[0] = 0;
1876
1877 info->array.major_version = -1;
1878 info->array.minor_version = -2;
1879 strcpy(info->text_version, "ddf");
1880 info->safe_mode_delay = 0;
1881
1882 uuid_from_super_ddf(st, info->uuid);
1883
1884 if (map) {
1885 int i;
1886 for (i = 0 ; i < map_disks; i++) {
1887 if (i < info->array.raid_disks &&
1888 (be16_to_cpu(ddf->phys->entries[i].state)
1889 & DDF_Online) &&
1890 !(be16_to_cpu(ddf->phys->entries[i].state)
1891 & DDF_Failed))
1892 map[i] = 1;
1893 else
1894 map[i] = 0;
1895 }
1896 }
1897 }
1898
1899 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1900 {
1901 struct ddf_super *ddf = st->sb;
1902 struct vcl *vc = ddf->currentconf;
1903 int cd = ddf->currentdev;
1904 int n_prim;
1905 int j;
1906 struct dl *dl;
1907 int map_disks = info->array.raid_disks;
1908 __u32 *cptr;
1909 struct vd_config *conf;
1910
1911 memset(info, 0, sizeof(*info));
1912 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1913 return;
1914 info->array.md_minor = -1;
1915 cptr = (__u32 *)(vc->conf.guid + 16);
1916 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1917 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1918 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1919 info->custom_array_size = 0;
1920
1921 conf = &vc->conf;
1922 n_prim = be16_to_cpu(conf->prim_elmnt_count);
1923 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
1924 int ibvd = cd / n_prim - 1;
1925 cd %= n_prim;
1926 conf = vc->other_bvds[ibvd];
1927 }
1928
1929 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
1930 info->data_offset =
1931 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
1932 if (vc->block_sizes)
1933 info->component_size = vc->block_sizes[cd];
1934 else
1935 info->component_size = be64_to_cpu(conf->blocks);
1936 }
1937
1938 for (dl = ddf->dlist; dl ; dl = dl->next)
1939 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
1940 break;
1941
1942 info->disk.major = 0;
1943 info->disk.minor = 0;
1944 info->disk.state = 0;
1945 if (dl) {
1946 info->disk.major = dl->major;
1947 info->disk.minor = dl->minor;
1948 info->disk.raid_disk = cd + conf->sec_elmnt_seq
1949 * be16_to_cpu(conf->prim_elmnt_count);
1950 info->disk.number = dl->pdnum;
1951 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
1952 }
1953
1954 info->container_member = ddf->currentconf->vcnum;
1955
1956 info->recovery_start = MaxSector;
1957 info->resync_start = 0;
1958 info->reshape_active = 0;
1959 info->recovery_blocked = 0;
1960 if (!(ddf->virt->entries[info->container_member].state
1961 & DDF_state_inconsistent) &&
1962 (ddf->virt->entries[info->container_member].init_state
1963 & DDF_initstate_mask)
1964 == DDF_init_full)
1965 info->resync_start = MaxSector;
1966
1967 uuid_from_super_ddf(st, info->uuid);
1968
1969 info->array.major_version = -1;
1970 info->array.minor_version = -2;
1971 sprintf(info->text_version, "/%s/%d",
1972 st->container_devnm,
1973 info->container_member);
1974 info->safe_mode_delay = 200;
1975
1976 memcpy(info->name, ddf->virt->entries[info->container_member].name, 16);
1977 info->name[16]=0;
1978 for(j=0; j<16; j++)
1979 if (info->name[j] == ' ')
1980 info->name[j] = 0;
1981
1982 if (map)
1983 for (j = 0; j < map_disks; j++) {
1984 map[j] = 0;
1985 if (j < info->array.raid_disks) {
1986 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
1987 if (i >= 0 &&
1988 (be16_to_cpu(ddf->phys->entries[i].state)
1989 & DDF_Online) &&
1990 !(be16_to_cpu(ddf->phys->entries[i].state)
1991 & DDF_Failed))
1992 map[i] = 1;
1993 }
1994 }
1995 }
1996
1997 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
1998 char *update,
1999 char *devname, int verbose,
2000 int uuid_set, char *homehost)
2001 {
2002 /* For 'assemble' and 'force' we need to return non-zero if any
2003 * change was made. For others, the return value is ignored.
2004 * Update options are:
2005 * force-one : This device looks a bit old but needs to be included,
2006 * update age info appropriately.
2007 * assemble: clear any 'faulty' flag to allow this device to
2008 * be assembled.
2009 * force-array: Array is degraded but being forced, mark it clean
2010 * if that will be needed to assemble it.
2011 *
2012 * newdev: not used ????
2013 * grow: Array has gained a new device - this is currently for
2014 * linear only
2015 * resync: mark as dirty so a resync will happen.
2016 * uuid: Change the uuid of the array to match what is given
2017 * homehost: update the recorded homehost
2018 * name: update the name - preserving the homehost
2019 * _reshape_progress: record new reshape_progress position.
2020 *
2021 * Following are not relevant for this version:
2022 * sparc2.2 : update from old dodgey metadata
2023 * super-minor: change the preferred_minor number
2024 * summaries: update redundant counters.
2025 */
2026 int rv = 0;
2027 // struct ddf_super *ddf = st->sb;
2028 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2029 // struct virtual_entry *ve = find_ve(ddf);
2030
2031 /* we don't need to handle "force-*" or "assemble" as
2032 * there is no need to 'trick' the kernel. We the metadata is
2033 * first updated to activate the array, all the implied modifications
2034 * will just happen.
2035 */
2036
2037 if (strcmp(update, "grow") == 0) {
2038 /* FIXME */
2039 } else if (strcmp(update, "resync") == 0) {
2040 // info->resync_checkpoint = 0;
2041 } else if (strcmp(update, "homehost") == 0) {
2042 /* homehost is stored in controller->vendor_data,
2043 * or it is when we are the vendor
2044 */
2045 // if (info->vendor_is_local)
2046 // strcpy(ddf->controller.vendor_data, homehost);
2047 rv = -1;
2048 } else if (strcmp(update, "name") == 0) {
2049 /* name is stored in virtual_entry->name */
2050 // memset(ve->name, ' ', 16);
2051 // strncpy(ve->name, info->name, 16);
2052 rv = -1;
2053 } else if (strcmp(update, "_reshape_progress") == 0) {
2054 /* We don't support reshape yet */
2055 } else if (strcmp(update, "assemble") == 0 ) {
2056 /* Do nothing, just succeed */
2057 rv = 0;
2058 } else
2059 rv = -1;
2060
2061 // update_all_csum(ddf);
2062
2063 return rv;
2064 }
2065
2066 static void make_header_guid(char *guid)
2067 {
2068 be32 stamp;
2069 /* Create a DDF Header of Virtual Disk GUID */
2070
2071 /* 24 bytes of fiction required.
2072 * first 8 are a 'vendor-id' - "Linux-MD"
2073 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2074 * Remaining 8 random number plus timestamp
2075 */
2076 memcpy(guid, T10, sizeof(T10));
2077 stamp = cpu_to_be32(0xdeadbeef);
2078 memcpy(guid+8, &stamp, 4);
2079 stamp = cpu_to_be32(0);
2080 memcpy(guid+12, &stamp, 4);
2081 stamp = cpu_to_be32(time(0) - DECADE);
2082 memcpy(guid+16, &stamp, 4);
2083 stamp._v32 = random32();
2084 memcpy(guid+20, &stamp, 4);
2085 }
2086
2087 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2088 {
2089 unsigned int i;
2090 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2091 if (all_ff(ddf->virt->entries[i].guid))
2092 return i;
2093 }
2094 return DDF_NOTFOUND;
2095 }
2096
2097 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2098 const char *name)
2099 {
2100 unsigned int i;
2101 if (name == NULL)
2102 return DDF_NOTFOUND;
2103 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2104 if (all_ff(ddf->virt->entries[i].guid))
2105 continue;
2106 if (!strncmp(name, ddf->virt->entries[i].name,
2107 sizeof(ddf->virt->entries[i].name)))
2108 return i;
2109 }
2110 return DDF_NOTFOUND;
2111 }
2112
2113 #ifndef MDASSEMBLE
2114 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2115 const char *guid)
2116 {
2117 unsigned int i;
2118 if (guid == NULL || all_ff(guid))
2119 return DDF_NOTFOUND;
2120 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2121 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2122 return i;
2123 return DDF_NOTFOUND;
2124 }
2125 #endif
2126
2127 static int init_super_ddf_bvd(struct supertype *st,
2128 mdu_array_info_t *info,
2129 unsigned long long size,
2130 char *name, char *homehost,
2131 int *uuid, unsigned long long data_offset);
2132
2133 static int init_super_ddf(struct supertype *st,
2134 mdu_array_info_t *info,
2135 unsigned long long size, char *name, char *homehost,
2136 int *uuid, unsigned long long data_offset)
2137 {
2138 /* This is primarily called by Create when creating a new array.
2139 * We will then get add_to_super called for each component, and then
2140 * write_init_super called to write it out to each device.
2141 * For DDF, Create can create on fresh devices or on a pre-existing
2142 * array.
2143 * To create on a pre-existing array a different method will be called.
2144 * This one is just for fresh drives.
2145 *
2146 * We need to create the entire 'ddf' structure which includes:
2147 * DDF headers - these are easy.
2148 * Controller data - a Sector describing this controller .. not that
2149 * this is a controller exactly.
2150 * Physical Disk Record - one entry per device, so
2151 * leave plenty of space.
2152 * Virtual Disk Records - again, just leave plenty of space.
2153 * This just lists VDs, doesn't give details
2154 * Config records - describes the VDs that use this disk
2155 * DiskData - describes 'this' device.
2156 * BadBlockManagement - empty
2157 * Diag Space - empty
2158 * Vendor Logs - Could we put bitmaps here?
2159 *
2160 */
2161 struct ddf_super *ddf;
2162 char hostname[17];
2163 int hostlen;
2164 int max_phys_disks, max_virt_disks;
2165 unsigned long long sector;
2166 int clen;
2167 int i;
2168 int pdsize, vdsize;
2169 struct phys_disk *pd;
2170 struct virtual_disk *vd;
2171
2172 if (data_offset != INVALID_SECTORS) {
2173 pr_err("data-offset not supported by DDF\n");
2174 return 0;
2175 }
2176
2177 if (st->sb)
2178 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2179 data_offset);
2180
2181 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2182 pr_err("%s could not allocate superblock\n", __func__);
2183 return 0;
2184 }
2185 memset(ddf, 0, sizeof(*ddf));
2186 ddf->dlist = NULL; /* no physical disks yet */
2187 ddf->conflist = NULL; /* No virtual disks yet */
2188 st->sb = ddf;
2189
2190 if (info == NULL) {
2191 /* zeroing superblock */
2192 return 0;
2193 }
2194
2195 /* At least 32MB *must* be reserved for the ddf. So let's just
2196 * start 32MB from the end, and put the primary header there.
2197 * Don't do secondary for now.
2198 * We don't know exactly where that will be yet as it could be
2199 * different on each device. To just set up the lengths.
2200 *
2201 */
2202
2203 ddf->anchor.magic = DDF_HEADER_MAGIC;
2204 make_header_guid(ddf->anchor.guid);
2205
2206 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2207 ddf->anchor.seq = cpu_to_be32(1);
2208 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2209 ddf->anchor.openflag = 0xFF;
2210 ddf->anchor.foreignflag = 0;
2211 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2212 ddf->anchor.pad0 = 0xff;
2213 memset(ddf->anchor.pad1, 0xff, 12);
2214 memset(ddf->anchor.header_ext, 0xff, 32);
2215 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2216 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2217 ddf->anchor.type = DDF_HEADER_ANCHOR;
2218 memset(ddf->anchor.pad2, 0xff, 3);
2219 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2220 /* Put this at bottom of 32M reserved.. */
2221 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2222 max_phys_disks = 1023; /* Should be enough */
2223 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2224 max_virt_disks = 255;
2225 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2226 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2227 ddf->max_part = 64;
2228 ddf->mppe = 256;
2229 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2230 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2231 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2232 memset(ddf->anchor.pad3, 0xff, 54);
2233 /* controller sections is one sector long immediately
2234 * after the ddf header */
2235 sector = 1;
2236 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2237 ddf->anchor.controller_section_length = cpu_to_be32(1);
2238 sector += 1;
2239
2240 /* phys is 8 sectors after that */
2241 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2242 sizeof(struct phys_disk_entry)*max_phys_disks,
2243 512);
2244 switch(pdsize/512) {
2245 case 2: case 8: case 32: case 128: case 512: break;
2246 default: abort();
2247 }
2248 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2249 ddf->anchor.phys_section_length =
2250 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2251 sector += pdsize/512;
2252
2253 /* virt is another 32 sectors */
2254 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2255 sizeof(struct virtual_entry) * max_virt_disks,
2256 512);
2257 switch(vdsize/512) {
2258 case 2: case 8: case 32: case 128: case 512: break;
2259 default: abort();
2260 }
2261 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2262 ddf->anchor.virt_section_length =
2263 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2264 sector += vdsize/512;
2265
2266 clen = ddf->conf_rec_len * (ddf->max_part+1);
2267 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2268 ddf->anchor.config_section_length = cpu_to_be32(clen);
2269 sector += clen;
2270
2271 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2272 ddf->anchor.data_section_length = cpu_to_be32(1);
2273 sector += 1;
2274
2275 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2276 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2277 ddf->anchor.diag_space_length = cpu_to_be32(0);
2278 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2279 ddf->anchor.vendor_length = cpu_to_be32(0);
2280 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2281
2282 memset(ddf->anchor.pad4, 0xff, 256);
2283
2284 memcpy(&ddf->primary, &ddf->anchor, 512);
2285 memcpy(&ddf->secondary, &ddf->anchor, 512);
2286
2287 ddf->primary.openflag = 1; /* I guess.. */
2288 ddf->primary.type = DDF_HEADER_PRIMARY;
2289
2290 ddf->secondary.openflag = 1; /* I guess.. */
2291 ddf->secondary.type = DDF_HEADER_SECONDARY;
2292
2293 ddf->active = &ddf->primary;
2294
2295 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2296
2297 /* 24 more bytes of fiction required.
2298 * first 8 are a 'vendor-id' - "Linux-MD"
2299 * Remaining 16 are serial number.... maybe a hostname would do?
2300 */
2301 memcpy(ddf->controller.guid, T10, sizeof(T10));
2302 gethostname(hostname, sizeof(hostname));
2303 hostname[sizeof(hostname) - 1] = 0;
2304 hostlen = strlen(hostname);
2305 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2306 for (i = strlen(T10) ; i+hostlen < 24; i++)
2307 ddf->controller.guid[i] = ' ';
2308
2309 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2310 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2311 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2312 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2313 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2314 memset(ddf->controller.pad, 0xff, 8);
2315 memset(ddf->controller.vendor_data, 0xff, 448);
2316 if (homehost && strlen(homehost) < 440)
2317 strcpy((char*)ddf->controller.vendor_data, homehost);
2318
2319 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2320 pr_err("%s could not allocate pd\n", __func__);
2321 return 0;
2322 }
2323 ddf->phys = pd;
2324 ddf->pdsize = pdsize;
2325
2326 memset(pd, 0xff, pdsize);
2327 memset(pd, 0, sizeof(*pd));
2328 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2329 pd->used_pdes = cpu_to_be16(0);
2330 pd->max_pdes = cpu_to_be16(max_phys_disks);
2331 memset(pd->pad, 0xff, 52);
2332 for (i = 0; i < max_phys_disks; i++)
2333 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2334
2335 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2336 pr_err("%s could not allocate vd\n", __func__);
2337 return 0;
2338 }
2339 ddf->virt = vd;
2340 ddf->vdsize = vdsize;
2341 memset(vd, 0, vdsize);
2342 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2343 vd->populated_vdes = cpu_to_be16(0);
2344 vd->max_vdes = cpu_to_be16(max_virt_disks);
2345 memset(vd->pad, 0xff, 52);
2346
2347 for (i=0; i<max_virt_disks; i++)
2348 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2349
2350 st->sb = ddf;
2351 ddf_set_updates_pending(ddf);
2352 return 1;
2353 }
2354
2355 static int chunk_to_shift(int chunksize)
2356 {
2357 return ffs(chunksize/512)-1;
2358 }
2359
2360 #ifndef MDASSEMBLE
2361 struct extent {
2362 unsigned long long start, size;
2363 };
2364 static int cmp_extent(const void *av, const void *bv)
2365 {
2366 const struct extent *a = av;
2367 const struct extent *b = bv;
2368 if (a->start < b->start)
2369 return -1;
2370 if (a->start > b->start)
2371 return 1;
2372 return 0;
2373 }
2374
2375 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2376 {
2377 /* find a list of used extents on the give physical device
2378 * (dnum) of the given ddf.
2379 * Return a malloced array of 'struct extent'
2380
2381 * FIXME ignore DDF_Legacy devices?
2382
2383 */
2384 struct extent *rv;
2385 int n = 0;
2386 unsigned int i;
2387
2388 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2389
2390 for (i = 0; i < ddf->max_part; i++) {
2391 const struct vd_config *bvd;
2392 unsigned int ibvd;
2393 struct vcl *v = dl->vlist[i];
2394 if (v == NULL ||
2395 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2396 &bvd, &ibvd) == DDF_NOTFOUND)
2397 continue;
2398 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2399 rv[n].size = be64_to_cpu(bvd->blocks);
2400 n++;
2401 }
2402 qsort(rv, n, sizeof(*rv), cmp_extent);
2403
2404 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2405 rv[n].size = 0;
2406 return rv;
2407 }
2408 #endif
2409
2410 static int init_super_ddf_bvd(struct supertype *st,
2411 mdu_array_info_t *info,
2412 unsigned long long size,
2413 char *name, char *homehost,
2414 int *uuid, unsigned long long data_offset)
2415 {
2416 /* We are creating a BVD inside a pre-existing container.
2417 * so st->sb is already set.
2418 * We need to create a new vd_config and a new virtual_entry
2419 */
2420 struct ddf_super *ddf = st->sb;
2421 unsigned int venum, i;
2422 struct virtual_entry *ve;
2423 struct vcl *vcl;
2424 struct vd_config *vc;
2425
2426 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2427 pr_err("This ddf already has an array called %s\n", name);
2428 return 0;
2429 }
2430 venum = find_unused_vde(ddf);
2431 if (venum == DDF_NOTFOUND) {
2432 pr_err("Cannot find spare slot for virtual disk\n");
2433 return 0;
2434 }
2435 ve = &ddf->virt->entries[venum];
2436
2437 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2438 * timestamp, random number
2439 */
2440 make_header_guid(ve->guid);
2441 ve->unit = cpu_to_be16(info->md_minor);
2442 ve->pad0 = 0xFFFF;
2443 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2444 DDF_GUID_LEN);
2445 ve->type = cpu_to_be16(0);
2446 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2447 if (info->state & 1) /* clean */
2448 ve->init_state = DDF_init_full;
2449 else
2450 ve->init_state = DDF_init_not;
2451
2452 memset(ve->pad1, 0xff, 14);
2453 memset(ve->name, ' ', 16);
2454 if (name)
2455 strncpy(ve->name, name, 16);
2456 ddf->virt->populated_vdes =
2457 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2458
2459 /* Now create a new vd_config */
2460 if (posix_memalign((void**)&vcl, 512,
2461 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2462 pr_err("%s could not allocate vd_config\n", __func__);
2463 return 0;
2464 }
2465 vcl->vcnum = venum;
2466 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2467 vc = &vcl->conf;
2468
2469 vc->magic = DDF_VD_CONF_MAGIC;
2470 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2471 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2472 vc->seqnum = cpu_to_be32(1);
2473 memset(vc->pad0, 0xff, 24);
2474 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2475 if (layout_md2ddf(info, vc) == -1 ||
2476 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2477 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2478 __func__, info->level, info->layout, info->raid_disks);
2479 free(vcl);
2480 return 0;
2481 }
2482 vc->sec_elmnt_seq = 0;
2483 if (alloc_other_bvds(ddf, vcl) != 0) {
2484 pr_err("%s could not allocate other bvds\n",
2485 __func__);
2486 free(vcl);
2487 return 0;
2488 }
2489 vc->blocks = cpu_to_be64(info->size * 2);
2490 vc->array_blocks = cpu_to_be64(
2491 calc_array_size(info->level, info->raid_disks, info->layout,
2492 info->chunk_size, info->size*2));
2493 memset(vc->pad1, 0xff, 8);
2494 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2495 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2496 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2497 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2498 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2499 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2500 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2501 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2502 memset(vc->cache_pol, 0, 8);
2503 vc->bg_rate = 0x80;
2504 memset(vc->pad2, 0xff, 3);
2505 memset(vc->pad3, 0xff, 52);
2506 memset(vc->pad4, 0xff, 192);
2507 memset(vc->v0, 0xff, 32);
2508 memset(vc->v1, 0xff, 32);
2509 memset(vc->v2, 0xff, 16);
2510 memset(vc->v3, 0xff, 16);
2511 memset(vc->vendor, 0xff, 32);
2512
2513 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2514 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2515
2516 for (i = 1; i < vc->sec_elmnt_count; i++) {
2517 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2518 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2519 }
2520
2521 vcl->next = ddf->conflist;
2522 ddf->conflist = vcl;
2523 ddf->currentconf = vcl;
2524 ddf_set_updates_pending(ddf);
2525 return 1;
2526 }
2527
2528
2529 #ifndef MDASSEMBLE
2530 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2531
2532 static void add_to_super_ddf_bvd(struct supertype *st,
2533 mdu_disk_info_t *dk, int fd, char *devname)
2534 {
2535 /* fd and devname identify a device with-in the ddf container (st).
2536 * dk identifies a location in the new BVD.
2537 * We need to find suitable free space in that device and update
2538 * the phys_refnum and lba_offset for the newly created vd_config.
2539 * We might also want to update the type in the phys_disk
2540 * section.
2541 *
2542 * Alternately: fd == -1 and we have already chosen which device to
2543 * use and recorded in dlist->raid_disk;
2544 */
2545 struct dl *dl;
2546 struct ddf_super *ddf = st->sb;
2547 struct vd_config *vc;
2548 unsigned int i;
2549 unsigned long long blocks, pos, esize;
2550 struct extent *ex;
2551 unsigned int raid_disk = dk->raid_disk;
2552
2553 if (fd == -1) {
2554 for (dl = ddf->dlist; dl ; dl = dl->next)
2555 if (dl->raiddisk == dk->raid_disk)
2556 break;
2557 } else {
2558 for (dl = ddf->dlist; dl ; dl = dl->next)
2559 if (dl->major == dk->major &&
2560 dl->minor == dk->minor)
2561 break;
2562 }
2563 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2564 return;
2565
2566 vc = &ddf->currentconf->conf;
2567 if (vc->sec_elmnt_count > 1) {
2568 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2569 if (raid_disk >= n)
2570 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2571 raid_disk %= n;
2572 }
2573
2574 ex = get_extents(ddf, dl);
2575 if (!ex)
2576 return;
2577
2578 i = 0; pos = 0;
2579 blocks = be64_to_cpu(vc->blocks);
2580 if (ddf->currentconf->block_sizes)
2581 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2582
2583 do {
2584 esize = ex[i].start - pos;
2585 if (esize >= blocks)
2586 break;
2587 pos = ex[i].start + ex[i].size;
2588 i++;
2589 } while (ex[i-1].size);
2590
2591 free(ex);
2592 if (esize < blocks)
2593 return;
2594
2595 ddf->currentdev = dk->raid_disk;
2596 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2597 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2598
2599 for (i = 0; i < ddf->max_part ; i++)
2600 if (dl->vlist[i] == NULL)
2601 break;
2602 if (i == ddf->max_part)
2603 return;
2604 dl->vlist[i] = ddf->currentconf;
2605
2606 if (fd >= 0)
2607 dl->fd = fd;
2608 if (devname)
2609 dl->devname = devname;
2610
2611 /* Check if we can mark array as optimal yet */
2612 i = ddf->currentconf->vcnum;
2613 ddf->virt->entries[i].state =
2614 (ddf->virt->entries[i].state & ~DDF_state_mask)
2615 | get_svd_state(ddf, ddf->currentconf);
2616 be16_clear(ddf->phys->entries[dl->pdnum].type,
2617 cpu_to_be16(DDF_Global_Spare));
2618 be16_set(ddf->phys->entries[dl->pdnum].type,
2619 cpu_to_be16(DDF_Active_in_VD));
2620 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2621 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2622 ddf->currentconf->vcnum, guid_str(vc->guid),
2623 dk->raid_disk);
2624 ddf_set_updates_pending(ddf);
2625 }
2626
2627 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2628 {
2629 unsigned int i;
2630 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2631 if (all_ff(ddf->phys->entries[i].guid))
2632 return i;
2633 }
2634 return DDF_NOTFOUND;
2635 }
2636
2637 /* add a device to a container, either while creating it or while
2638 * expanding a pre-existing container
2639 */
2640 static int add_to_super_ddf(struct supertype *st,
2641 mdu_disk_info_t *dk, int fd, char *devname,
2642 unsigned long long data_offset)
2643 {
2644 struct ddf_super *ddf = st->sb;
2645 struct dl *dd;
2646 time_t now;
2647 struct tm *tm;
2648 unsigned long long size;
2649 struct phys_disk_entry *pde;
2650 unsigned int n, i;
2651 struct stat stb;
2652 __u32 *tptr;
2653
2654 if (ddf->currentconf) {
2655 add_to_super_ddf_bvd(st, dk, fd, devname);
2656 return 0;
2657 }
2658
2659 /* This is device numbered dk->number. We need to create
2660 * a phys_disk entry and a more detailed disk_data entry.
2661 */
2662 fstat(fd, &stb);
2663 n = find_unused_pde(ddf);
2664 if (n == DDF_NOTFOUND) {
2665 pr_err("%s: No free slot in array, cannot add disk\n",
2666 __func__);
2667 return 1;
2668 }
2669 pde = &ddf->phys->entries[n];
2670 get_dev_size(fd, NULL, &size);
2671 if (size <= 32*1024*1024) {
2672 pr_err("%s: device size must be at least 32MB\n",
2673 __func__);
2674 return 1;
2675 }
2676 size >>= 9;
2677
2678 if (posix_memalign((void**)&dd, 512,
2679 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2680 pr_err("%s could allocate buffer for new disk, aborting\n",
2681 __func__);
2682 return 1;
2683 }
2684 dd->major = major(stb.st_rdev);
2685 dd->minor = minor(stb.st_rdev);
2686 dd->devname = devname;
2687 dd->fd = fd;
2688 dd->spare = NULL;
2689
2690 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2691 now = time(0);
2692 tm = localtime(&now);
2693 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2694 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2695 tptr = (__u32 *)(dd->disk.guid + 16);
2696 *tptr++ = random32();
2697 *tptr = random32();
2698
2699 do {
2700 /* Cannot be bothered finding a CRC of some irrelevant details*/
2701 dd->disk.refnum._v32 = random32();
2702 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2703 i > 0; i--)
2704 if (be32_eq(ddf->phys->entries[i-1].refnum,
2705 dd->disk.refnum))
2706 break;
2707 } while (i > 0);
2708
2709 dd->disk.forced_ref = 1;
2710 dd->disk.forced_guid = 1;
2711 memset(dd->disk.vendor, ' ', 32);
2712 memcpy(dd->disk.vendor, "Linux", 5);
2713 memset(dd->disk.pad, 0xff, 442);
2714 for (i = 0; i < ddf->max_part ; i++)
2715 dd->vlist[i] = NULL;
2716
2717 dd->pdnum = n;
2718
2719 if (st->update_tail) {
2720 int len = (sizeof(struct phys_disk) +
2721 sizeof(struct phys_disk_entry));
2722 struct phys_disk *pd;
2723
2724 pd = xmalloc(len);
2725 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2726 pd->used_pdes = cpu_to_be16(n);
2727 pde = &pd->entries[0];
2728 dd->mdupdate = pd;
2729 } else
2730 ddf->phys->used_pdes = cpu_to_be16(
2731 1 + be16_to_cpu(ddf->phys->used_pdes));
2732
2733 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2734 pde->refnum = dd->disk.refnum;
2735 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2736 pde->state = cpu_to_be16(DDF_Online);
2737 dd->size = size;
2738 /*
2739 * If there is already a device in dlist, try to reserve the same
2740 * amount of workspace. Otherwise, use 32MB.
2741 * We checked disk size above already.
2742 */
2743 #define __calc_lba(new, old, lba, mb) do { \
2744 unsigned long long dif; \
2745 if ((old) != NULL) \
2746 dif = (old)->size - be64_to_cpu((old)->lba); \
2747 else \
2748 dif = (new)->size; \
2749 if ((new)->size > dif) \
2750 (new)->lba = cpu_to_be64((new)->size - dif); \
2751 else \
2752 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2753 } while (0)
2754 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2755 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2756 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2757 pde->config_size = dd->workspace_lba;
2758
2759 sprintf(pde->path, "%17.17s","Information: nil") ;
2760 memset(pde->pad, 0xff, 6);
2761
2762 if (st->update_tail) {
2763 dd->next = ddf->add_list;
2764 ddf->add_list = dd;
2765 } else {
2766 dd->next = ddf->dlist;
2767 ddf->dlist = dd;
2768 ddf_set_updates_pending(ddf);
2769 }
2770
2771 return 0;
2772 }
2773
2774 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2775 {
2776 struct ddf_super *ddf = st->sb;
2777 struct dl *dl;
2778
2779 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2780 * disappeared from the container.
2781 * We need to arrange that it disappears from the metadata and
2782 * internal data structures too.
2783 * Most of the work is done by ddf_process_update which edits
2784 * the metadata and closes the file handle and attaches the memory
2785 * where free_updates will free it.
2786 */
2787 for (dl = ddf->dlist; dl ; dl = dl->next)
2788 if (dl->major == dk->major &&
2789 dl->minor == dk->minor)
2790 break;
2791 if (!dl)
2792 return -1;
2793
2794 if (st->update_tail) {
2795 int len = (sizeof(struct phys_disk) +
2796 sizeof(struct phys_disk_entry));
2797 struct phys_disk *pd;
2798
2799 pd = xmalloc(len);
2800 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2801 pd->used_pdes = cpu_to_be16(dl->pdnum);
2802 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2803 append_metadata_update(st, pd, len);
2804 }
2805 return 0;
2806 }
2807 #endif
2808
2809 /*
2810 * This is the write_init_super method for a ddf container. It is
2811 * called when creating a container or adding another device to a
2812 * container.
2813 */
2814 #define NULL_CONF_SZ 4096
2815
2816 static char *null_aligned;
2817 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2818 {
2819 unsigned long long sector;
2820 struct ddf_header *header;
2821 int fd, i, n_config, conf_size;
2822 int ret = 0;
2823
2824 if (null_aligned == NULL) {
2825 if (posix_memalign((void **)&null_aligned, 4096, NULL_CONF_SZ)
2826 != 0)
2827 return 0;
2828 memset(null_aligned, 0xff, NULL_CONF_SZ);
2829 }
2830
2831 fd = d->fd;
2832
2833 switch (type) {
2834 case DDF_HEADER_PRIMARY:
2835 header = &ddf->primary;
2836 sector = be64_to_cpu(header->primary_lba);
2837 break;
2838 case DDF_HEADER_SECONDARY:
2839 header = &ddf->secondary;
2840 sector = be64_to_cpu(header->secondary_lba);
2841 break;
2842 default:
2843 return 0;
2844 }
2845
2846 header->type = type;
2847 header->openflag = 1;
2848 header->crc = calc_crc(header, 512);
2849
2850 lseek64(fd, sector<<9, 0);
2851 if (write(fd, header, 512) < 0)
2852 goto out;
2853
2854 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2855 if (write(fd, &ddf->controller, 512) < 0)
2856 goto out;
2857
2858 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2859 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2860 goto out;
2861 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2862 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2863 goto out;
2864
2865 /* Now write lots of config records. */
2866 n_config = ddf->max_part;
2867 conf_size = ddf->conf_rec_len * 512;
2868 for (i = 0 ; i <= n_config ; i++) {
2869 struct vcl *c;
2870 struct vd_config *vdc = NULL;
2871 if (i == n_config) {
2872 c = (struct vcl *)d->spare;
2873 if (c)
2874 vdc = &c->conf;
2875 } else {
2876 unsigned int dummy;
2877 c = d->vlist[i];
2878 if (c)
2879 get_pd_index_from_refnum(
2880 c, d->disk.refnum,
2881 ddf->mppe,
2882 (const struct vd_config **)&vdc,
2883 &dummy);
2884 }
2885 if (c) {
2886 dprintf("writing conf record %i on disk %08x for %s/%u\n",
2887 i, be32_to_cpu(d->disk.refnum),
2888 guid_str(vdc->guid),
2889 vdc->sec_elmnt_seq);
2890 vdc->seqnum = header->seq;
2891 vdc->crc = calc_crc(vdc, conf_size);
2892 if (write(fd, vdc, conf_size) < 0)
2893 break;
2894 } else {
2895 unsigned int togo = conf_size;
2896 while (togo > NULL_CONF_SZ) {
2897 if (write(fd, null_aligned, NULL_CONF_SZ) < 0)
2898 break;
2899 togo -= NULL_CONF_SZ;
2900 }
2901 if (write(fd, null_aligned, togo) < 0)
2902 break;
2903 }
2904 }
2905 if (i <= n_config)
2906 goto out;
2907
2908 d->disk.crc = calc_crc(&d->disk, 512);
2909 if (write(fd, &d->disk, 512) < 0)
2910 goto out;
2911
2912 ret = 1;
2913 out:
2914 header->openflag = 0;
2915 header->crc = calc_crc(header, 512);
2916
2917 lseek64(fd, sector<<9, 0);
2918 if (write(fd, header, 512) < 0)
2919 ret = 0;
2920
2921 return ret;
2922 }
2923
2924 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
2925 {
2926 unsigned long long size;
2927 int fd = d->fd;
2928 if (fd < 0)
2929 return 0;
2930
2931 /* We need to fill in the primary, (secondary) and workspace
2932 * lba's in the headers, set their checksums,
2933 * Also checksum phys, virt....
2934 *
2935 * Then write everything out, finally the anchor is written.
2936 */
2937 get_dev_size(fd, NULL, &size);
2938 size /= 512;
2939 if (be64_to_cpu(d->workspace_lba) != 0ULL)
2940 ddf->anchor.workspace_lba = d->workspace_lba;
2941 else
2942 ddf->anchor.workspace_lba =
2943 cpu_to_be64(size - 32*1024*2);
2944 if (be64_to_cpu(d->primary_lba) != 0ULL)
2945 ddf->anchor.primary_lba = d->primary_lba;
2946 else
2947 ddf->anchor.primary_lba =
2948 cpu_to_be64(size - 16*1024*2);
2949 if (be64_to_cpu(d->secondary_lba) != 0ULL)
2950 ddf->anchor.secondary_lba = d->secondary_lba;
2951 else
2952 ddf->anchor.secondary_lba =
2953 cpu_to_be64(size - 32*1024*2);
2954 ddf->anchor.seq = ddf->active->seq;
2955 memcpy(&ddf->primary, &ddf->anchor, 512);
2956 memcpy(&ddf->secondary, &ddf->anchor, 512);
2957
2958 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
2959 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
2960 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
2961
2962 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
2963 return 0;
2964
2965 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
2966 return 0;
2967
2968 lseek64(fd, (size-1)*512, SEEK_SET);
2969 if (write(fd, &ddf->anchor, 512) < 0)
2970 return 0;
2971
2972 return 1;
2973 }
2974
2975 #ifndef MDASSEMBLE
2976 static int __write_init_super_ddf(struct supertype *st)
2977 {
2978 struct ddf_super *ddf = st->sb;
2979 struct dl *d;
2980 int attempts = 0;
2981 int successes = 0;
2982
2983 pr_state(ddf, __func__);
2984
2985 /* try to write updated metadata,
2986 * if we catch a failure move on to the next disk
2987 */
2988 for (d = ddf->dlist; d; d=d->next) {
2989 attempts++;
2990 successes += _write_super_to_disk(ddf, d);
2991 }
2992
2993 return attempts != successes;
2994 }
2995
2996 static int write_init_super_ddf(struct supertype *st)
2997 {
2998 struct ddf_super *ddf = st->sb;
2999 struct vcl *currentconf = ddf->currentconf;
3000
3001 /* we are done with currentconf reset it to point st at the container */
3002 ddf->currentconf = NULL;
3003
3004 if (st->update_tail) {
3005 /* queue the virtual_disk and vd_config as metadata updates */
3006 struct virtual_disk *vd;
3007 struct vd_config *vc;
3008 int len, tlen;
3009 unsigned int i;
3010
3011 if (!currentconf) {
3012 int len = (sizeof(struct phys_disk) +
3013 sizeof(struct phys_disk_entry));
3014
3015 /* adding a disk to the container. */
3016 if (!ddf->add_list)
3017 return 0;
3018
3019 append_metadata_update(st, ddf->add_list->mdupdate, len);
3020 ddf->add_list->mdupdate = NULL;
3021 return 0;
3022 }
3023
3024 /* Newly created VD */
3025
3026 /* First the virtual disk. We have a slightly fake header */
3027 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3028 vd = xmalloc(len);
3029 *vd = *ddf->virt;
3030 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3031 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3032 append_metadata_update(st, vd, len);
3033
3034 /* Then the vd_config */
3035 len = ddf->conf_rec_len * 512;
3036 tlen = len * currentconf->conf.sec_elmnt_count;
3037 vc = xmalloc(tlen);
3038 memcpy(vc, &currentconf->conf, len);
3039 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3040 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3041 len);
3042 append_metadata_update(st, vc, tlen);
3043
3044 /* FIXME I need to close the fds! */
3045 return 0;
3046 } else {
3047 struct dl *d;
3048 if (!currentconf)
3049 for (d = ddf->dlist; d; d=d->next)
3050 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3051 return __write_init_super_ddf(st);
3052 }
3053 }
3054
3055 #endif
3056
3057 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3058 unsigned long long data_offset)
3059 {
3060 /* We must reserve the last 32Meg */
3061 if (devsize <= 32*1024*2)
3062 return 0;
3063 return devsize - 32*1024*2;
3064 }
3065
3066 #ifndef MDASSEMBLE
3067
3068 static int reserve_space(struct supertype *st, int raiddisks,
3069 unsigned long long size, int chunk,
3070 unsigned long long *freesize)
3071 {
3072 /* Find 'raiddisks' spare extents at least 'size' big (but
3073 * only caring about multiples of 'chunk') and remember
3074 * them.
3075 * If the cannot be found, fail.
3076 */
3077 struct dl *dl;
3078 struct ddf_super *ddf = st->sb;
3079 int cnt = 0;
3080
3081 for (dl = ddf->dlist; dl ; dl=dl->next) {
3082 dl->raiddisk = -1;
3083 dl->esize = 0;
3084 }
3085 /* Now find largest extent on each device */
3086 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3087 struct extent *e = get_extents(ddf, dl);
3088 unsigned long long pos = 0;
3089 int i = 0;
3090 int found = 0;
3091 unsigned long long minsize = size;
3092
3093 if (size == 0)
3094 minsize = chunk;
3095
3096 if (!e)
3097 continue;
3098 do {
3099 unsigned long long esize;
3100 esize = e[i].start - pos;
3101 if (esize >= minsize) {
3102 found = 1;
3103 minsize = esize;
3104 }
3105 pos = e[i].start + e[i].size;
3106 i++;
3107 } while (e[i-1].size);
3108 if (found) {
3109 cnt++;
3110 dl->esize = minsize;
3111 }
3112 free(e);
3113 }
3114 if (cnt < raiddisks) {
3115 pr_err("not enough devices with space to create array.\n");
3116 return 0; /* No enough free spaces large enough */
3117 }
3118 if (size == 0) {
3119 /* choose the largest size of which there are at least 'raiddisk' */
3120 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3121 struct dl *dl2;
3122 if (dl->esize <= size)
3123 continue;
3124 /* This is bigger than 'size', see if there are enough */
3125 cnt = 0;
3126 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3127 if (dl2->esize >= dl->esize)
3128 cnt++;
3129 if (cnt >= raiddisks)
3130 size = dl->esize;
3131 }
3132 if (chunk) {
3133 size = size / chunk;
3134 size *= chunk;
3135 }
3136 *freesize = size;
3137 if (size < 32) {
3138 pr_err("not enough spare devices to create array.\n");
3139 return 0;
3140 }
3141 }
3142 /* We have a 'size' of which there are enough spaces.
3143 * We simply do a first-fit */
3144 cnt = 0;
3145 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3146 if (dl->esize < size)
3147 continue;
3148
3149 dl->raiddisk = cnt;
3150 cnt++;
3151 }
3152 return 1;
3153 }
3154
3155 static int
3156 validate_geometry_ddf_container(struct supertype *st,
3157 int level, int layout, int raiddisks,
3158 int chunk, unsigned long long size,
3159 unsigned long long data_offset,
3160 char *dev, unsigned long long *freesize,
3161 int verbose);
3162
3163 static int validate_geometry_ddf_bvd(struct supertype *st,
3164 int level, int layout, int raiddisks,
3165 int *chunk, unsigned long long size,
3166 unsigned long long data_offset,
3167 char *dev, unsigned long long *freesize,
3168 int verbose);
3169
3170 static int validate_geometry_ddf(struct supertype *st,
3171 int level, int layout, int raiddisks,
3172 int *chunk, unsigned long long size,
3173 unsigned long long data_offset,
3174 char *dev, unsigned long long *freesize,
3175 int verbose)
3176 {
3177 int fd;
3178 struct mdinfo *sra;
3179 int cfd;
3180
3181 /* ddf potentially supports lots of things, but it depends on
3182 * what devices are offered (and maybe kernel version?)
3183 * If given unused devices, we will make a container.
3184 * If given devices in a container, we will make a BVD.
3185 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3186 */
3187
3188 if (chunk && *chunk == UnSet)
3189 *chunk = DEFAULT_CHUNK;
3190
3191 if (level == -1000000) level = LEVEL_CONTAINER;
3192 if (level == LEVEL_CONTAINER) {
3193 /* Must be a fresh device to add to a container */
3194 return validate_geometry_ddf_container(st, level, layout,
3195 raiddisks, chunk?*chunk:0,
3196 size, data_offset, dev,
3197 freesize,
3198 verbose);
3199 }
3200
3201 if (!dev) {
3202 mdu_array_info_t array = {
3203 .level = level, .layout = layout,
3204 .raid_disks = raiddisks
3205 };
3206 struct vd_config conf;
3207 if (layout_md2ddf(&array, &conf) == -1) {
3208 if (verbose)
3209 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3210 level, layout, raiddisks);
3211 return 0;
3212 }
3213 /* Should check layout? etc */
3214
3215 if (st->sb && freesize) {
3216 /* --create was given a container to create in.
3217 * So we need to check that there are enough
3218 * free spaces and return the amount of space.
3219 * We may as well remember which drives were
3220 * chosen so that add_to_super/getinfo_super
3221 * can return them.
3222 */
3223 return reserve_space(st, raiddisks, size, chunk?*chunk:0, freesize);
3224 }
3225 return 1;
3226 }
3227
3228 if (st->sb) {
3229 /* A container has already been opened, so we are
3230 * creating in there. Maybe a BVD, maybe an SVD.
3231 * Should make a distinction one day.
3232 */
3233 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3234 chunk, size, data_offset, dev,
3235 freesize,
3236 verbose);
3237 }
3238 /* This is the first device for the array.
3239 * If it is a container, we read it in and do automagic allocations,
3240 * no other devices should be given.
3241 * Otherwise it must be a member device of a container, and we
3242 * do manual allocation.
3243 * Later we should check for a BVD and make an SVD.
3244 */
3245 fd = open(dev, O_RDONLY|O_EXCL, 0);
3246 if (fd >= 0) {
3247 sra = sysfs_read(fd, NULL, GET_VERSION);
3248 close(fd);
3249 if (sra && sra->array.major_version == -1 &&
3250 strcmp(sra->text_version, "ddf") == 0) {
3251
3252 /* load super */
3253 /* find space for 'n' devices. */
3254 /* remember the devices */
3255 /* Somehow return the fact that we have enough */
3256 }
3257
3258 if (verbose)
3259 pr_err("ddf: Cannot create this array "
3260 "on device %s - a container is required.\n",
3261 dev);
3262 return 0;
3263 }
3264 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3265 if (verbose)
3266 pr_err("ddf: Cannot open %s: %s\n",
3267 dev, strerror(errno));
3268 return 0;
3269 }
3270 /* Well, it is in use by someone, maybe a 'ddf' container. */
3271 cfd = open_container(fd);
3272 if (cfd < 0) {
3273 close(fd);
3274 if (verbose)
3275 pr_err("ddf: Cannot use %s: %s\n",
3276 dev, strerror(EBUSY));
3277 return 0;
3278 }
3279 sra = sysfs_read(cfd, NULL, GET_VERSION);
3280 close(fd);
3281 if (sra && sra->array.major_version == -1 &&
3282 strcmp(sra->text_version, "ddf") == 0) {
3283 /* This is a member of a ddf container. Load the container
3284 * and try to create a bvd
3285 */
3286 struct ddf_super *ddf;
3287 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3288 st->sb = ddf;
3289 strcpy(st->container_devnm, fd2devnm(cfd));
3290 close(cfd);
3291 return validate_geometry_ddf_bvd(st, level, layout,
3292 raiddisks, chunk, size,
3293 data_offset,
3294 dev, freesize,
3295 verbose);
3296 }
3297 close(cfd);
3298 } else /* device may belong to a different container */
3299 return 0;
3300
3301 return 1;
3302 }
3303
3304 static int
3305 validate_geometry_ddf_container(struct supertype *st,
3306 int level, int layout, int raiddisks,
3307 int chunk, unsigned long long size,
3308 unsigned long long data_offset,
3309 char *dev, unsigned long long *freesize,
3310 int verbose)
3311 {
3312 int fd;
3313 unsigned long long ldsize;
3314
3315 if (level != LEVEL_CONTAINER)
3316 return 0;
3317 if (!dev)
3318 return 1;
3319
3320 fd = open(dev, O_RDONLY|O_EXCL, 0);
3321 if (fd < 0) {
3322 if (verbose)
3323 pr_err("ddf: Cannot open %s: %s\n",
3324 dev, strerror(errno));
3325 return 0;
3326 }
3327 if (!get_dev_size(fd, dev, &ldsize)) {
3328 close(fd);
3329 return 0;
3330 }
3331 close(fd);
3332
3333 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3334 if (*freesize == 0)
3335 return 0;
3336
3337 return 1;
3338 }
3339
3340 static int validate_geometry_ddf_bvd(struct supertype *st,
3341 int level, int layout, int raiddisks,
3342 int *chunk, unsigned long long size,
3343 unsigned long long data_offset,
3344 char *dev, unsigned long long *freesize,
3345 int verbose)
3346 {
3347 struct stat stb;
3348 struct ddf_super *ddf = st->sb;
3349 struct dl *dl;
3350 unsigned long long pos = 0;
3351 unsigned long long maxsize;
3352 struct extent *e;
3353 int i;
3354 /* ddf/bvd supports lots of things, but not containers */
3355 if (level == LEVEL_CONTAINER) {
3356 if (verbose)
3357 pr_err("DDF cannot create a container within an container\n");
3358 return 0;
3359 }
3360 /* We must have the container info already read in. */
3361 if (!ddf)
3362 return 0;
3363
3364 if (!dev) {
3365 /* General test: make sure there is space for
3366 * 'raiddisks' device extents of size 'size'.
3367 */
3368 unsigned long long minsize = size;
3369 int dcnt = 0;
3370 if (minsize == 0)
3371 minsize = 8;
3372 for (dl = ddf->dlist; dl ; dl = dl->next)
3373 {
3374 int found = 0;
3375 pos = 0;
3376
3377 i = 0;
3378 e = get_extents(ddf, dl);
3379 if (!e) continue;
3380 do {
3381 unsigned long long esize;
3382 esize = e[i].start - pos;
3383 if (esize >= minsize)
3384 found = 1;
3385 pos = e[i].start + e[i].size;
3386 i++;
3387 } while (e[i-1].size);
3388 if (found)
3389 dcnt++;
3390 free(e);
3391 }
3392 if (dcnt < raiddisks) {
3393 if (verbose)
3394 pr_err("ddf: Not enough devices with "
3395 "space for this array (%d < %d)\n",
3396 dcnt, raiddisks);
3397 return 0;
3398 }
3399 return 1;
3400 }
3401 /* This device must be a member of the set */
3402 if (stat(dev, &stb) < 0)
3403 return 0;
3404 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3405 return 0;
3406 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3407 if (dl->major == (int)major(stb.st_rdev) &&
3408 dl->minor == (int)minor(stb.st_rdev))
3409 break;
3410 }
3411 if (!dl) {
3412 if (verbose)
3413 pr_err("ddf: %s is not in the "
3414 "same DDF set\n",
3415 dev);
3416 return 0;
3417 }
3418 e = get_extents(ddf, dl);
3419 maxsize = 0;
3420 i = 0;
3421 if (e) do {
3422 unsigned long long esize;
3423 esize = e[i].start - pos;
3424 if (esize >= maxsize)
3425 maxsize = esize;
3426 pos = e[i].start + e[i].size;
3427 i++;
3428 } while (e[i-1].size);
3429 *freesize = maxsize;
3430 // FIXME here I am
3431
3432 return 1;
3433 }
3434
3435 static int load_super_ddf_all(struct supertype *st, int fd,
3436 void **sbp, char *devname)
3437 {
3438 struct mdinfo *sra;
3439 struct ddf_super *super;
3440 struct mdinfo *sd, *best = NULL;
3441 int bestseq = 0;
3442 int seq;
3443 char nm[20];
3444 int dfd;
3445
3446 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3447 if (!sra)
3448 return 1;
3449 if (sra->array.major_version != -1 ||
3450 sra->array.minor_version != -2 ||
3451 strcmp(sra->text_version, "ddf") != 0)
3452 return 1;
3453
3454 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3455 return 1;
3456 memset(super, 0, sizeof(*super));
3457
3458 /* first, try each device, and choose the best ddf */
3459 for (sd = sra->devs ; sd ; sd = sd->next) {
3460 int rv;
3461 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3462 dfd = dev_open(nm, O_RDONLY);
3463 if (dfd < 0)
3464 return 2;
3465 rv = load_ddf_headers(dfd, super, NULL);
3466 close(dfd);
3467 if (rv == 0) {
3468 seq = be32_to_cpu(super->active->seq);
3469 if (super->active->openflag)
3470 seq--;
3471 if (!best || seq > bestseq) {
3472 bestseq = seq;
3473 best = sd;
3474 }
3475 }
3476 }
3477 if (!best)
3478 return 1;
3479 /* OK, load this ddf */
3480 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3481 dfd = dev_open(nm, O_RDONLY);
3482 if (dfd < 0)
3483 return 1;
3484 load_ddf_headers(dfd, super, NULL);
3485 load_ddf_global(dfd, super, NULL);
3486 close(dfd);
3487 /* Now we need the device-local bits */
3488 for (sd = sra->devs ; sd ; sd = sd->next) {
3489 int rv;
3490
3491 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3492 dfd = dev_open(nm, O_RDWR);
3493 if (dfd < 0)
3494 return 2;
3495 rv = load_ddf_headers(dfd, super, NULL);
3496 if (rv == 0)
3497 rv = load_ddf_local(dfd, super, NULL, 1);
3498 if (rv)
3499 return 1;
3500 }
3501
3502 *sbp = super;
3503 if (st->ss == NULL) {
3504 st->ss = &super_ddf;
3505 st->minor_version = 0;
3506 st->max_devs = 512;
3507 }
3508 strcpy(st->container_devnm, fd2devnm(fd));
3509 return 0;
3510 }
3511
3512 static int load_container_ddf(struct supertype *st, int fd,
3513 char *devname)
3514 {
3515 return load_super_ddf_all(st, fd, &st->sb, devname);
3516 }
3517
3518 #endif /* MDASSEMBLE */
3519
3520 static int check_secondary(const struct vcl *vc)
3521 {
3522 const struct vd_config *conf = &vc->conf;
3523 int i;
3524
3525 /* The only DDF secondary RAID level md can support is
3526 * RAID 10, if the stripe sizes and Basic volume sizes
3527 * are all equal.
3528 * Other configurations could in theory be supported by exposing
3529 * the BVDs to user space and using device mapper for the secondary
3530 * mapping. So far we don't support that.
3531 */
3532
3533 __u64 sec_elements[4] = {0, 0, 0, 0};
3534 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3535 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3536
3537 if (vc->other_bvds == NULL) {
3538 pr_err("No BVDs for secondary RAID found\n");
3539 return -1;
3540 }
3541 if (conf->prl != DDF_RAID1) {
3542 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3543 return -1;
3544 }
3545 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3546 pr_err("Secondary RAID level %d is unsupported\n",
3547 conf->srl);
3548 return -1;
3549 }
3550 __set_sec_seen(conf->sec_elmnt_seq);
3551 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3552 const struct vd_config *bvd = vc->other_bvds[i];
3553 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3554 continue;
3555 if (bvd->srl != conf->srl) {
3556 pr_err("Inconsistent secondary RAID level across BVDs\n");
3557 return -1;
3558 }
3559 if (bvd->prl != conf->prl) {
3560 pr_err("Different RAID levels for BVDs are unsupported\n");
3561 return -1;
3562 }
3563 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3564 pr_err("All BVDs must have the same number of primary elements\n");
3565 return -1;
3566 }
3567 if (bvd->chunk_shift != conf->chunk_shift) {
3568 pr_err("Different strip sizes for BVDs are unsupported\n");
3569 return -1;
3570 }
3571 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3572 pr_err("Different BVD sizes are unsupported\n");
3573 return -1;
3574 }
3575 __set_sec_seen(bvd->sec_elmnt_seq);
3576 }
3577 for (i = 0; i < conf->sec_elmnt_count; i++) {
3578 if (!__was_sec_seen(i)) {
3579 pr_err("BVD %d is missing\n", i);
3580 return -1;
3581 }
3582 }
3583 return 0;
3584 }
3585
3586 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3587 be32 refnum, unsigned int nmax,
3588 const struct vd_config **bvd,
3589 unsigned int *idx)
3590 {
3591 unsigned int i, j, n, sec, cnt;
3592
3593 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3594 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3595
3596 for (i = 0, j = 0 ; i < nmax ; i++) {
3597 /* j counts valid entries for this BVD */
3598 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3599 j++;
3600 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3601 *bvd = &vc->conf;
3602 *idx = i;
3603 return sec * cnt + j - 1;
3604 }
3605 }
3606 if (vc->other_bvds == NULL)
3607 goto bad;
3608
3609 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3610 struct vd_config *vd = vc->other_bvds[n-1];
3611 sec = vd->sec_elmnt_seq;
3612 if (sec == DDF_UNUSED_BVD)
3613 continue;
3614 for (i = 0, j = 0 ; i < nmax ; i++) {
3615 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3616 j++;
3617 if (be32_eq(vd->phys_refnum[i], refnum)) {
3618 *bvd = vd;
3619 *idx = i;
3620 return sec * cnt + j - 1;
3621 }
3622 }
3623 }
3624 bad:
3625 *bvd = NULL;
3626 return DDF_NOTFOUND;
3627 }
3628
3629 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3630 {
3631 /* Given a container loaded by load_super_ddf_all,
3632 * extract information about all the arrays into
3633 * an mdinfo tree.
3634 *
3635 * For each vcl in conflist: create an mdinfo, fill it in,
3636 * then look for matching devices (phys_refnum) in dlist
3637 * and create appropriate device mdinfo.
3638 */
3639 struct ddf_super *ddf = st->sb;
3640 struct mdinfo *rest = NULL;
3641 struct vcl *vc;
3642
3643 for (vc = ddf->conflist ; vc ; vc=vc->next)
3644 {
3645 unsigned int i;
3646 unsigned int j;
3647 struct mdinfo *this;
3648 char *ep;
3649 __u32 *cptr;
3650 unsigned int pd;
3651
3652 if (subarray &&
3653 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3654 *ep != '\0'))
3655 continue;
3656
3657 if (vc->conf.sec_elmnt_count > 1) {
3658 if (check_secondary(vc) != 0)
3659 continue;
3660 }
3661
3662 this = xcalloc(1, sizeof(*this));
3663 this->next = rest;
3664 rest = this;
3665
3666 if (layout_ddf2md(&vc->conf, &this->array))
3667 continue;
3668 this->array.md_minor = -1;
3669 this->array.major_version = -1;
3670 this->array.minor_version = -2;
3671 cptr = (__u32 *)(vc->conf.guid + 16);
3672 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3673 this->array.utime = DECADE +
3674 be32_to_cpu(vc->conf.timestamp);
3675 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3676
3677 i = vc->vcnum;
3678 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3679 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3680 DDF_init_full) {
3681 this->array.state = 0;
3682 this->resync_start = 0;
3683 } else {
3684 this->array.state = 1;
3685 this->resync_start = MaxSector;
3686 }
3687 memcpy(this->name, ddf->virt->entries[i].name, 16);
3688 this->name[16]=0;
3689 for(j=0; j<16; j++)
3690 if (this->name[j] == ' ')
3691 this->name[j] = 0;
3692
3693 memset(this->uuid, 0, sizeof(this->uuid));
3694 this->component_size = be64_to_cpu(vc->conf.blocks);
3695 this->array.size = this->component_size / 2;
3696 this->container_member = i;
3697
3698 ddf->currentconf = vc;
3699 uuid_from_super_ddf(st, this->uuid);
3700 if (!subarray)
3701 ddf->currentconf = NULL;
3702
3703 sprintf(this->text_version, "/%s/%d",
3704 st->container_devnm, this->container_member);
3705
3706 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3707 struct mdinfo *dev;
3708 struct dl *d;
3709 const struct vd_config *bvd;
3710 unsigned int iphys;
3711 int stt;
3712
3713 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3714 == 0xFFFFFFFF)
3715 continue;
3716
3717 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3718 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3719 != DDF_Online)
3720 continue;
3721
3722 i = get_pd_index_from_refnum(
3723 vc, ddf->phys->entries[pd].refnum,
3724 ddf->mppe, &bvd, &iphys);
3725 if (i == DDF_NOTFOUND)
3726 continue;
3727
3728 this->array.working_disks++;
3729
3730 for (d = ddf->dlist; d ; d=d->next)
3731 if (be32_eq(d->disk.refnum,
3732 ddf->phys->entries[pd].refnum))
3733 break;
3734 if (d == NULL)
3735 /* Haven't found that one yet, maybe there are others */
3736 continue;
3737
3738 dev = xcalloc(1, sizeof(*dev));
3739 dev->next = this->devs;
3740 this->devs = dev;
3741
3742 dev->disk.number = be32_to_cpu(d->disk.refnum);
3743 dev->disk.major = d->major;
3744 dev->disk.minor = d->minor;
3745 dev->disk.raid_disk = i;
3746 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3747 dev->recovery_start = MaxSector;
3748
3749 dev->events = be32_to_cpu(ddf->primary.seq);
3750 dev->data_offset =
3751 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3752 dev->component_size = be64_to_cpu(bvd->blocks);
3753 if (d->devname)
3754 strcpy(dev->name, d->devname);
3755 }
3756 }
3757 return rest;
3758 }
3759
3760 static int store_super_ddf(struct supertype *st, int fd)
3761 {
3762 struct ddf_super *ddf = st->sb;
3763 unsigned long long dsize;
3764 void *buf;
3765 int rc;
3766
3767 if (!ddf)
3768 return 1;
3769
3770 if (!get_dev_size(fd, NULL, &dsize))
3771 return 1;
3772
3773 if (ddf->dlist || ddf->conflist) {
3774 struct stat sta;
3775 struct dl *dl;
3776 int ofd, ret;
3777
3778 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3779 pr_err("%s: file descriptor for invalid device\n",
3780 __func__);
3781 return 1;
3782 }
3783 for (dl = ddf->dlist; dl; dl = dl->next)
3784 if (dl->major == (int)major(sta.st_rdev) &&
3785 dl->minor == (int)minor(sta.st_rdev))
3786 break;
3787 if (!dl) {
3788 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3789 (int)major(sta.st_rdev),
3790 (int)minor(sta.st_rdev));
3791 return 1;
3792 }
3793 ofd = dl->fd;
3794 dl->fd = fd;
3795 ret = (_write_super_to_disk(ddf, dl) != 1);
3796 dl->fd = ofd;
3797 return ret;
3798 }
3799
3800 if (posix_memalign(&buf, 512, 512) != 0)
3801 return 1;
3802 memset(buf, 0, 512);
3803
3804 lseek64(fd, dsize-512, 0);
3805 rc = write(fd, buf, 512);
3806 free(buf);
3807 if (rc < 0)
3808 return 1;
3809 return 0;
3810 }
3811
3812 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3813 {
3814 /*
3815 * return:
3816 * 0 same, or first was empty, and second was copied
3817 * 1 second had wrong number
3818 * 2 wrong uuid
3819 * 3 wrong other info
3820 */
3821 struct ddf_super *first = st->sb;
3822 struct ddf_super *second = tst->sb;
3823 struct dl *dl1, *dl2;
3824 struct vcl *vl1, *vl2;
3825 unsigned int max_vds, max_pds, pd, vd;
3826
3827 if (!first) {
3828 st->sb = tst->sb;
3829 tst->sb = NULL;
3830 return 0;
3831 }
3832
3833 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3834 return 2;
3835
3836 if (!be32_eq(first->anchor.seq, second->anchor.seq)) {
3837 dprintf("%s: sequence number mismatch %u/%u\n", __func__,
3838 be32_to_cpu(first->anchor.seq),
3839 be32_to_cpu(second->anchor.seq));
3840 return 3;
3841 }
3842 if (first->max_part != second->max_part ||
3843 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3844 !be16_eq(first->virt->populated_vdes,
3845 second->virt->populated_vdes)) {
3846 dprintf("%s: PD/VD number mismatch\n", __func__);
3847 return 3;
3848 }
3849
3850 max_pds = be16_to_cpu(first->phys->used_pdes);
3851 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3852 for (pd = 0; pd < max_pds; pd++)
3853 if (be32_eq(first->phys->entries[pd].refnum,
3854 dl2->disk.refnum))
3855 break;
3856 if (pd == max_pds) {
3857 dprintf("%s: no match for disk %08x\n", __func__,
3858 be32_to_cpu(dl2->disk.refnum));
3859 return 3;
3860 }
3861 }
3862
3863 max_vds = be16_to_cpu(first->active->max_vd_entries);
3864 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3865 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3866 continue;
3867 for (vd = 0; vd < max_vds; vd++)
3868 if (!memcmp(first->virt->entries[vd].guid,
3869 vl2->conf.guid, DDF_GUID_LEN))
3870 break;
3871 if (vd == max_vds) {
3872 dprintf("%s: no match for VD config\n", __func__);
3873 return 3;
3874 }
3875 }
3876 /* FIXME should I look at anything else? */
3877
3878 /*
3879 At this point we are fairly sure that the meta data matches.
3880 But the new disk may contain additional local data.
3881 Add it to the super block.
3882 */
3883 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3884 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3885 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3886 DDF_GUID_LEN))
3887 break;
3888 if (vl1) {
3889 if (vl1->other_bvds != NULL &&
3890 vl1->conf.sec_elmnt_seq !=
3891 vl2->conf.sec_elmnt_seq) {
3892 dprintf("%s: adding BVD %u\n", __func__,
3893 vl2->conf.sec_elmnt_seq);
3894 add_other_bvd(vl1, &vl2->conf,
3895 first->conf_rec_len*512);
3896 }
3897 continue;
3898 }
3899
3900 if (posix_memalign((void **)&vl1, 512,
3901 (first->conf_rec_len*512 +
3902 offsetof(struct vcl, conf))) != 0) {
3903 pr_err("%s could not allocate vcl buf\n",
3904 __func__);
3905 return 3;
3906 }
3907
3908 vl1->next = first->conflist;
3909 vl1->block_sizes = NULL;
3910 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3911 if (alloc_other_bvds(first, vl1) != 0) {
3912 pr_err("%s could not allocate other bvds\n",
3913 __func__);
3914 free(vl1);
3915 return 3;
3916 }
3917 for (vd = 0; vd < max_vds; vd++)
3918 if (!memcmp(first->virt->entries[vd].guid,
3919 vl1->conf.guid, DDF_GUID_LEN))
3920 break;
3921 vl1->vcnum = vd;
3922 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
3923 first->conflist = vl1;
3924 }
3925
3926 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3927 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3928 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3929 break;
3930 if (dl1)
3931 continue;
3932
3933 if (posix_memalign((void **)&dl1, 512,
3934 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
3935 != 0) {
3936 pr_err("%s could not allocate disk info buffer\n",
3937 __func__);
3938 return 3;
3939 }
3940 memcpy(dl1, dl2, sizeof(*dl1));
3941 dl1->mdupdate = NULL;
3942 dl1->next = first->dlist;
3943 dl1->fd = -1;
3944 for (pd = 0; pd < max_pds; pd++)
3945 if (be32_eq(first->phys->entries[pd].refnum,
3946 dl1->disk.refnum))
3947 break;
3948 dl1->pdnum = pd;
3949 if (dl2->spare) {
3950 if (posix_memalign((void **)&dl1->spare, 512,
3951 first->conf_rec_len*512) != 0) {
3952 pr_err("%s could not allocate spare info buf\n",
3953 __func__);
3954 return 3;
3955 }
3956 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3957 }
3958 for (vd = 0 ; vd < first->max_part ; vd++) {
3959 if (!dl2->vlist[vd]) {
3960 dl1->vlist[vd] = NULL;
3961 continue;
3962 }
3963 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3964 if (!memcmp(vl1->conf.guid,
3965 dl2->vlist[vd]->conf.guid,
3966 DDF_GUID_LEN))
3967 break;
3968 dl1->vlist[vd] = vl1;
3969 }
3970 }
3971 first->dlist = dl1;
3972 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
3973 be32_to_cpu(dl1->disk.refnum));
3974 }
3975
3976 return 0;
3977 }
3978
3979 #ifndef MDASSEMBLE
3980 /*
3981 * A new array 'a' has been started which claims to be instance 'inst'
3982 * within container 'c'.
3983 * We need to confirm that the array matches the metadata in 'c' so
3984 * that we don't corrupt any metadata.
3985 */
3986 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
3987 {
3988 struct ddf_super *ddf = c->sb;
3989 int n = atoi(inst);
3990 if (all_ff(ddf->virt->entries[n].guid)) {
3991 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
3992 return -ENODEV;
3993 }
3994 dprintf("ddf: open_new %d\n", n);
3995 a->info.container_member = n;
3996 return 0;
3997 }
3998
3999 /*
4000 * The array 'a' is to be marked clean in the metadata.
4001 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4002 * clean up to the point (in sectors). If that cannot be recorded in the
4003 * metadata, then leave it as dirty.
4004 *
4005 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4006 * !global! virtual_disk.virtual_entry structure.
4007 */
4008 static int ddf_set_array_state(struct active_array *a, int consistent)
4009 {
4010 struct ddf_super *ddf = a->container->sb;
4011 int inst = a->info.container_member;
4012 int old = ddf->virt->entries[inst].state;
4013 if (consistent == 2) {
4014 /* Should check if a recovery should be started FIXME */
4015 consistent = 1;
4016 if (!is_resync_complete(&a->info))
4017 consistent = 0;
4018 }
4019 if (consistent)
4020 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4021 else
4022 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4023 if (old != ddf->virt->entries[inst].state)
4024 ddf_set_updates_pending(ddf);
4025
4026 old = ddf->virt->entries[inst].init_state;
4027 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4028 if (is_resync_complete(&a->info))
4029 ddf->virt->entries[inst].init_state |= DDF_init_full;
4030 else if (a->info.resync_start == 0)
4031 ddf->virt->entries[inst].init_state |= DDF_init_not;
4032 else
4033 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4034 if (old != ddf->virt->entries[inst].init_state)
4035 ddf_set_updates_pending(ddf);
4036
4037 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4038 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4039 consistent?"clean":"dirty",
4040 a->info.resync_start);
4041 return consistent;
4042 }
4043
4044 static int get_bvd_state(const struct ddf_super *ddf,
4045 const struct vd_config *vc)
4046 {
4047 unsigned int i, n_bvd, working = 0;
4048 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4049 int pd, st, state;
4050 for (i = 0; i < n_prim; i++) {
4051 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4052 continue;
4053 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4054 if (pd < 0)
4055 continue;
4056 st = be16_to_cpu(ddf->phys->entries[pd].state);
4057 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4058 == DDF_Online)
4059 working++;
4060 }
4061
4062 state = DDF_state_degraded;
4063 if (working == n_prim)
4064 state = DDF_state_optimal;
4065 else
4066 switch (vc->prl) {
4067 case DDF_RAID0:
4068 case DDF_CONCAT:
4069 case DDF_JBOD:
4070 state = DDF_state_failed;
4071 break;
4072 case DDF_RAID1:
4073 if (working == 0)
4074 state = DDF_state_failed;
4075 else if (working >= 2)
4076 state = DDF_state_part_optimal;
4077 break;
4078 case DDF_RAID4:
4079 case DDF_RAID5:
4080 if (working < n_prim - 1)
4081 state = DDF_state_failed;
4082 break;
4083 case DDF_RAID6:
4084 if (working < n_prim - 2)
4085 state = DDF_state_failed;
4086 else if (working == n_prim - 1)
4087 state = DDF_state_part_optimal;
4088 break;
4089 }
4090 return state;
4091 }
4092
4093 static int secondary_state(int state, int other, int seclevel)
4094 {
4095 if (state == DDF_state_optimal && other == DDF_state_optimal)
4096 return DDF_state_optimal;
4097 if (seclevel == DDF_2MIRRORED) {
4098 if (state == DDF_state_optimal || other == DDF_state_optimal)
4099 return DDF_state_part_optimal;
4100 if (state == DDF_state_failed && other == DDF_state_failed)
4101 return DDF_state_failed;
4102 return DDF_state_degraded;
4103 } else {
4104 if (state == DDF_state_failed || other == DDF_state_failed)
4105 return DDF_state_failed;
4106 if (state == DDF_state_degraded || other == DDF_state_degraded)
4107 return DDF_state_degraded;
4108 return DDF_state_part_optimal;
4109 }
4110 }
4111
4112 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4113 {
4114 int state = get_bvd_state(ddf, &vcl->conf);
4115 unsigned int i;
4116 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4117 state = secondary_state(
4118 state,
4119 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4120 vcl->conf.srl);
4121 }
4122 return state;
4123 }
4124
4125 /*
4126 * The state of each disk is stored in the global phys_disk structure
4127 * in phys_disk.entries[n].state.
4128 * This makes various combinations awkward.
4129 * - When a device fails in any array, it must be failed in all arrays
4130 * that include a part of this device.
4131 * - When a component is rebuilding, we cannot include it officially in the
4132 * array unless this is the only array that uses the device.
4133 *
4134 * So: when transitioning:
4135 * Online -> failed, just set failed flag. monitor will propagate
4136 * spare -> online, the device might need to be added to the array.
4137 * spare -> failed, just set failed. Don't worry if in array or not.
4138 */
4139 static void ddf_set_disk(struct active_array *a, int n, int state)
4140 {
4141 struct ddf_super *ddf = a->container->sb;
4142 unsigned int inst = a->info.container_member, n_bvd;
4143 struct vcl *vcl;
4144 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4145 &n_bvd, &vcl);
4146 int pd;
4147 struct mdinfo *mdi;
4148 struct dl *dl;
4149
4150 if (vc == NULL) {
4151 dprintf("ddf: cannot find instance %d!!\n", inst);
4152 return;
4153 }
4154 /* Find the matching slot in 'info'. */
4155 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4156 if (mdi->disk.raid_disk == n)
4157 break;
4158 if (!mdi)
4159 return;
4160
4161 /* and find the 'dl' entry corresponding to that. */
4162 for (dl = ddf->dlist; dl; dl = dl->next)
4163 if (mdi->state_fd >= 0 &&
4164 mdi->disk.major == dl->major &&
4165 mdi->disk.minor == dl->minor)
4166 break;
4167 if (!dl)
4168 return;
4169
4170 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4171 if (pd < 0 || pd != dl->pdnum) {
4172 /* disk doesn't currently exist or has changed.
4173 * If it is now in_sync, insert it. */
4174 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4175 __func__, dl->pdnum, dl->major, dl->minor,
4176 be32_to_cpu(dl->disk.refnum));
4177 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4178 __func__, inst, n_bvd,
4179 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4180 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4181 pd = dl->pdnum; /* FIXME: is this really correct ? */
4182 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4183 LBA_OFFSET(ddf, vc)[n_bvd] =
4184 cpu_to_be64(mdi->data_offset);
4185 be16_clear(ddf->phys->entries[pd].type,
4186 cpu_to_be16(DDF_Global_Spare));
4187 be16_set(ddf->phys->entries[pd].type,
4188 cpu_to_be16(DDF_Active_in_VD));
4189 ddf_set_updates_pending(ddf);
4190 }
4191 } else {
4192 be16 old = ddf->phys->entries[pd].state;
4193 if (state & DS_FAULTY)
4194 be16_set(ddf->phys->entries[pd].state,
4195 cpu_to_be16(DDF_Failed));
4196 if (state & DS_INSYNC) {
4197 be16_set(ddf->phys->entries[pd].state,
4198 cpu_to_be16(DDF_Online));
4199 be16_clear(ddf->phys->entries[pd].state,
4200 cpu_to_be16(DDF_Rebuilding));
4201 }
4202 if (!be16_eq(old, ddf->phys->entries[pd].state))
4203 ddf_set_updates_pending(ddf);
4204 }
4205
4206 dprintf("ddf: set_disk %d to %x\n", n, state);
4207
4208 /* Now we need to check the state of the array and update
4209 * virtual_disk.entries[n].state.
4210 * It needs to be one of "optimal", "degraded", "failed".
4211 * I don't understand 'deleted' or 'missing'.
4212 */
4213 state = get_svd_state(ddf, vcl);
4214
4215 if (ddf->virt->entries[inst].state !=
4216 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4217 | state)) {
4218
4219 ddf->virt->entries[inst].state =
4220 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4221 | state;
4222 ddf_set_updates_pending(ddf);
4223 }
4224
4225 }
4226
4227 static void ddf_sync_metadata(struct supertype *st)
4228 {
4229
4230 /*
4231 * Write all data to all devices.
4232 * Later, we might be able to track whether only local changes
4233 * have been made, or whether any global data has been changed,
4234 * but ddf is sufficiently weird that it probably always
4235 * changes global data ....
4236 */
4237 struct ddf_super *ddf = st->sb;
4238 if (!ddf->updates_pending)
4239 return;
4240 ddf->updates_pending = 0;
4241 __write_init_super_ddf(st);
4242 dprintf("ddf: sync_metadata\n");
4243 }
4244
4245 static int del_from_conflist(struct vcl **list, const char *guid)
4246 {
4247 struct vcl **p;
4248 int found = 0;
4249 for (p = list; p && *p; p = &((*p)->next))
4250 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4251 found = 1;
4252 *p = (*p)->next;
4253 }
4254 return found;
4255 }
4256
4257 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4258 {
4259 struct dl *dl;
4260 unsigned int vdnum, i;
4261 vdnum = find_vde_by_guid(ddf, guid);
4262 if (vdnum == DDF_NOTFOUND) {
4263 pr_err("%s: could not find VD %s\n", __func__,
4264 guid_str(guid));
4265 return -1;
4266 }
4267 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4268 pr_err("%s: could not find conf %s\n", __func__,
4269 guid_str(guid));
4270 return -1;
4271 }
4272 for (dl = ddf->dlist; dl; dl = dl->next)
4273 for (i = 0; i < ddf->max_part; i++)
4274 if (dl->vlist[i] != NULL &&
4275 !memcmp(dl->vlist[i]->conf.guid, guid,
4276 DDF_GUID_LEN))
4277 dl->vlist[i] = NULL;
4278 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4279 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4280 return 0;
4281 }
4282
4283 static int kill_subarray_ddf(struct supertype *st)
4284 {
4285 struct ddf_super *ddf = st->sb;
4286 /*
4287 * currentconf is set in container_content_ddf,
4288 * called with subarray arg
4289 */
4290 struct vcl *victim = ddf->currentconf;
4291 struct vd_config *conf;
4292 ddf->currentconf = NULL;
4293 unsigned int vdnum;
4294 if (!victim) {
4295 pr_err("%s: nothing to kill\n", __func__);
4296 return -1;
4297 }
4298 conf = &victim->conf;
4299 vdnum = find_vde_by_guid(ddf, conf->guid);
4300 if (vdnum == DDF_NOTFOUND) {
4301 pr_err("%s: could not find VD %s\n", __func__,
4302 guid_str(conf->guid));
4303 return -1;
4304 }
4305 if (st->update_tail) {
4306 struct virtual_disk *vd;
4307 int len = sizeof(struct virtual_disk)
4308 + sizeof(struct virtual_entry);
4309 vd = xmalloc(len);
4310 if (vd == NULL) {
4311 pr_err("%s: failed to allocate %d bytes\n", __func__,
4312 len);
4313 return -1;
4314 }
4315 memset(vd, 0 , len);
4316 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4317 vd->populated_vdes = cpu_to_be16(0);
4318 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4319 /* we use DDF_state_deleted as marker */
4320 vd->entries[0].state = DDF_state_deleted;
4321 append_metadata_update(st, vd, len);
4322 } else {
4323 _kill_subarray_ddf(ddf, conf->guid);
4324 ddf_set_updates_pending(ddf);
4325 ddf_sync_metadata(st);
4326 }
4327 return 0;
4328 }
4329
4330 static void copy_matching_bvd(struct ddf_super *ddf,
4331 struct vd_config *conf,
4332 const struct metadata_update *update)
4333 {
4334 unsigned int mppe =
4335 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4336 unsigned int len = ddf->conf_rec_len * 512;
4337 char *p;
4338 struct vd_config *vc;
4339 for (p = update->buf; p < update->buf + update->len; p += len) {
4340 vc = (struct vd_config *) p;
4341 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4342 memcpy(conf->phys_refnum, vc->phys_refnum,
4343 mppe * (sizeof(__u32) + sizeof(__u64)));
4344 return;
4345 }
4346 }
4347 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4348 conf->sec_elmnt_seq, guid_str(conf->guid));
4349 }
4350
4351 static void ddf_process_update(struct supertype *st,
4352 struct metadata_update *update)
4353 {
4354 /* Apply this update to the metadata.
4355 * The first 4 bytes are a DDF_*_MAGIC which guides
4356 * our actions.
4357 * Possible update are:
4358 * DDF_PHYS_RECORDS_MAGIC
4359 * Add a new physical device or remove an old one.
4360 * Changes to this record only happen implicitly.
4361 * used_pdes is the device number.
4362 * DDF_VIRT_RECORDS_MAGIC
4363 * Add a new VD. Possibly also change the 'access' bits.
4364 * populated_vdes is the entry number.
4365 * DDF_VD_CONF_MAGIC
4366 * New or updated VD. the VIRT_RECORD must already
4367 * exist. For an update, phys_refnum and lba_offset
4368 * (at least) are updated, and the VD_CONF must
4369 * be written to precisely those devices listed with
4370 * a phys_refnum.
4371 * DDF_SPARE_ASSIGN_MAGIC
4372 * replacement Spare Assignment Record... but for which device?
4373 *
4374 * So, e.g.:
4375 * - to create a new array, we send a VIRT_RECORD and
4376 * a VD_CONF. Then assemble and start the array.
4377 * - to activate a spare we send a VD_CONF to add the phys_refnum
4378 * and offset. This will also mark the spare as active with
4379 * a spare-assignment record.
4380 */
4381 struct ddf_super *ddf = st->sb;
4382 be32 *magic = (be32 *)update->buf;
4383 struct phys_disk *pd;
4384 struct virtual_disk *vd;
4385 struct vd_config *vc;
4386 struct vcl *vcl;
4387 struct dl *dl;
4388 unsigned int ent;
4389 unsigned int pdnum, pd2, len;
4390
4391 dprintf("Process update %x\n", be32_to_cpu(*magic));
4392
4393 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4394
4395 if (update->len != (sizeof(struct phys_disk) +
4396 sizeof(struct phys_disk_entry)))
4397 return;
4398 pd = (struct phys_disk*)update->buf;
4399
4400 ent = be16_to_cpu(pd->used_pdes);
4401 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4402 return;
4403 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4404 struct dl **dlp;
4405 /* removing this disk. */
4406 be16_set(ddf->phys->entries[ent].state,
4407 cpu_to_be16(DDF_Missing));
4408 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4409 struct dl *dl = *dlp;
4410 if (dl->pdnum == (signed)ent) {
4411 close(dl->fd);
4412 dl->fd = -1;
4413 /* FIXME this doesn't free
4414 * dl->devname */
4415 update->space = dl;
4416 *dlp = dl->next;
4417 break;
4418 }
4419 }
4420 ddf_set_updates_pending(ddf);
4421 return;
4422 }
4423 if (!all_ff(ddf->phys->entries[ent].guid))
4424 return;
4425 ddf->phys->entries[ent] = pd->entries[0];
4426 ddf->phys->used_pdes = cpu_to_be16
4427 (1 + be16_to_cpu(ddf->phys->used_pdes));
4428 ddf_set_updates_pending(ddf);
4429 if (ddf->add_list) {
4430 struct active_array *a;
4431 struct dl *al = ddf->add_list;
4432 ddf->add_list = al->next;
4433
4434 al->next = ddf->dlist;
4435 ddf->dlist = al;
4436
4437 /* As a device has been added, we should check
4438 * for any degraded devices that might make
4439 * use of this spare */
4440 for (a = st->arrays ; a; a=a->next)
4441 a->check_degraded = 1;
4442 }
4443 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4444
4445 if (update->len != (sizeof(struct virtual_disk) +
4446 sizeof(struct virtual_entry)))
4447 return;
4448 vd = (struct virtual_disk*)update->buf;
4449
4450 if (vd->entries[0].state == DDF_state_deleted) {
4451 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4452 return;
4453 } else {
4454
4455 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4456 if (ent != DDF_NOTFOUND) {
4457 dprintf("%s: VD %s exists already in slot %d\n",
4458 __func__, guid_str(vd->entries[0].guid),
4459 ent);
4460 return;
4461 }
4462 ent = find_unused_vde(ddf);
4463 if (ent == DDF_NOTFOUND)
4464 return;
4465 ddf->virt->entries[ent] = vd->entries[0];
4466 ddf->virt->populated_vdes =
4467 cpu_to_be16(
4468 1 + be16_to_cpu(
4469 ddf->virt->populated_vdes));
4470 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4471 __func__, guid_str(vd->entries[0].guid), ent,
4472 ddf->virt->entries[ent].state,
4473 ddf->virt->entries[ent].init_state);
4474 }
4475 ddf_set_updates_pending(ddf);
4476 }
4477
4478 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4479 vc = (struct vd_config*)update->buf;
4480 len = ddf->conf_rec_len * 512;
4481 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4482 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4483 __func__, guid_str(vc->guid), update->len,
4484 vc->sec_elmnt_count);
4485 return;
4486 }
4487 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4488 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4489 break;
4490 dprintf("%s: conf update for %s (%s)\n", __func__,
4491 guid_str(vc->guid), (vcl ? "old" : "new"));
4492 if (vcl) {
4493 /* An update, just copy the phys_refnum and lba_offset
4494 * fields
4495 */
4496 unsigned int i;
4497 copy_matching_bvd(ddf, &vcl->conf, update);
4498 for (i = 1; i < vc->sec_elmnt_count; i++)
4499 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4500 update);
4501 } else {
4502 /* A new VD_CONF */
4503 unsigned int i;
4504 if (!update->space)
4505 return;
4506 vcl = update->space;
4507 update->space = NULL;
4508 vcl->next = ddf->conflist;
4509 memcpy(&vcl->conf, vc, len);
4510 ent = find_vde_by_guid(ddf, vc->guid);
4511 if (ent == DDF_NOTFOUND)
4512 return;
4513 vcl->vcnum = ent;
4514 ddf->conflist = vcl;
4515 for (i = 1; i < vc->sec_elmnt_count; i++)
4516 memcpy(vcl->other_bvds[i-1],
4517 update->buf + len * i, len);
4518 }
4519 /* Set DDF_Transition on all Failed devices - to help
4520 * us detect those that are no longer in use
4521 */
4522 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4523 pdnum++)
4524 if (be16_and(ddf->phys->entries[pdnum].state,
4525 cpu_to_be16(DDF_Failed)))
4526 be16_set(ddf->phys->entries[pdnum].state,
4527 cpu_to_be16(DDF_Transition));
4528 /* Now make sure vlist is correct for each dl. */
4529 for (dl = ddf->dlist; dl; dl = dl->next) {
4530 unsigned int vn = 0;
4531 int in_degraded = 0;
4532 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4533 unsigned int dn, ibvd;
4534 const struct vd_config *conf;
4535 int vstate;
4536 dn = get_pd_index_from_refnum(vcl,
4537 dl->disk.refnum,
4538 ddf->mppe,
4539 &conf, &ibvd);
4540 if (dn == DDF_NOTFOUND)
4541 continue;
4542 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4543 dl->pdnum,
4544 be32_to_cpu(dl->disk.refnum),
4545 guid_str(conf->guid),
4546 conf->sec_elmnt_seq, vn);
4547 /* Clear the Transition flag */
4548 if (be16_and
4549 (ddf->phys->entries[dl->pdnum].state,
4550 cpu_to_be16(DDF_Failed)))
4551 be16_clear(ddf->phys
4552 ->entries[dl->pdnum].state,
4553 cpu_to_be16(DDF_Transition));
4554 dl->vlist[vn++] = vcl;
4555 vstate = ddf->virt->entries[vcl->vcnum].state
4556 & DDF_state_mask;
4557 if (vstate == DDF_state_degraded ||
4558 vstate == DDF_state_part_optimal)
4559 in_degraded = 1;
4560 }
4561 while (vn < ddf->max_part)
4562 dl->vlist[vn++] = NULL;
4563 if (dl->vlist[0]) {
4564 be16_clear(ddf->phys->entries[dl->pdnum].type,
4565 cpu_to_be16(DDF_Global_Spare));
4566 if (!be16_and(ddf->phys
4567 ->entries[dl->pdnum].type,
4568 cpu_to_be16(DDF_Active_in_VD))) {
4569 be16_set(ddf->phys
4570 ->entries[dl->pdnum].type,
4571 cpu_to_be16(DDF_Active_in_VD));
4572 if (in_degraded)
4573 be16_set(ddf->phys
4574 ->entries[dl->pdnum]
4575 .state,
4576 cpu_to_be16
4577 (DDF_Rebuilding));
4578 }
4579 }
4580 if (dl->spare) {
4581 be16_clear(ddf->phys->entries[dl->pdnum].type,
4582 cpu_to_be16(DDF_Global_Spare));
4583 be16_set(ddf->phys->entries[dl->pdnum].type,
4584 cpu_to_be16(DDF_Spare));
4585 }
4586 if (!dl->vlist[0] && !dl->spare) {
4587 be16_set(ddf->phys->entries[dl->pdnum].type,
4588 cpu_to_be16(DDF_Global_Spare));
4589 be16_clear(ddf->phys->entries[dl->pdnum].type,
4590 cpu_to_be16(DDF_Spare));
4591 be16_clear(ddf->phys->entries[dl->pdnum].type,
4592 cpu_to_be16(DDF_Active_in_VD));
4593 }
4594 }
4595
4596 /* Now remove any 'Failed' devices that are not part
4597 * of any VD. They will have the Transition flag set.
4598 * Once done, we need to update all dl->pdnum numbers.
4599 */
4600 pd2 = 0;
4601 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4602 pdnum++)
4603 if (be16_and(ddf->phys->entries[pdnum].state,
4604 cpu_to_be16(DDF_Failed))
4605 && be16_and(ddf->phys->entries[pdnum].state,
4606 cpu_to_be16(DDF_Transition)))
4607 /* skip this one */;
4608 else if (pdnum == pd2)
4609 pd2++;
4610 else {
4611 ddf->phys->entries[pd2] =
4612 ddf->phys->entries[pdnum];
4613 for (dl = ddf->dlist; dl; dl = dl->next)
4614 if (dl->pdnum == (int)pdnum)
4615 dl->pdnum = pd2;
4616 pd2++;
4617 }
4618 ddf->phys->used_pdes = cpu_to_be16(pd2);
4619 while (pd2 < pdnum) {
4620 memset(ddf->phys->entries[pd2].guid, 0xff,
4621 DDF_GUID_LEN);
4622 pd2++;
4623 }
4624
4625 ddf_set_updates_pending(ddf);
4626 }
4627 /* case DDF_SPARE_ASSIGN_MAGIC */
4628 }
4629
4630 static void ddf_prepare_update(struct supertype *st,
4631 struct metadata_update *update)
4632 {
4633 /* This update arrived at managemon.
4634 * We are about to pass it to monitor.
4635 * If a malloc is needed, do it here.
4636 */
4637 struct ddf_super *ddf = st->sb;
4638 be32 *magic = (be32 *)update->buf;
4639 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4640 struct vcl *vcl;
4641 struct vd_config *conf = (struct vd_config *) update->buf;
4642 if (posix_memalign(&update->space, 512,
4643 offsetof(struct vcl, conf)
4644 + ddf->conf_rec_len * 512) != 0) {
4645 update->space = NULL;
4646 return;
4647 }
4648 vcl = update->space;
4649 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4650 if (alloc_other_bvds(ddf, vcl) != 0) {
4651 free(update->space);
4652 update->space = NULL;
4653 }
4654 }
4655 }
4656
4657 /*
4658 * Check degraded state of a RAID10.
4659 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4660 */
4661 static int raid10_degraded(struct mdinfo *info)
4662 {
4663 int n_prim, n_bvds;
4664 int i;
4665 struct mdinfo *d, *sra;
4666 char *found;
4667 int ret = -1;
4668
4669 if (info->array.layout == 0) {
4670 sra = sysfs_read(-1, info->sys_name, GET_LAYOUT);
4671 info->array.layout = sra->array.layout;
4672 free(sra);
4673 }
4674
4675 n_prim = info->array.layout & ~0x100;
4676 n_bvds = info->array.raid_disks / n_prim;
4677 found = xmalloc(n_bvds);
4678 if (found == NULL)
4679 return ret;
4680 memset(found, 0, n_bvds);
4681 for (d = info->devs; d; d = d->next) {
4682 i = d->disk.raid_disk / n_prim;
4683 if (i >= n_bvds) {
4684 pr_err("%s: BUG: invalid raid disk\n", __func__);
4685 goto out;
4686 }
4687 if (d->state_fd > 0)
4688 found[i]++;
4689 }
4690 ret = 2;
4691 for (i = 0; i < n_bvds; i++)
4692 if (!found[i]) {
4693 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4694 ret = 0;
4695 goto out;
4696 } else if (found[i] < n_prim) {
4697 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4698 n_bvds);
4699 ret = 1;
4700 }
4701 out:
4702 free(found);
4703 return ret;
4704 }
4705
4706 /*
4707 * Check if the array 'a' is degraded but not failed.
4708 * If it is, find as many spares as are available and needed and
4709 * arrange for their inclusion.
4710 * We only choose devices which are not already in the array,
4711 * and prefer those with a spare-assignment to this array.
4712 * otherwise we choose global spares - assuming always that
4713 * there is enough room.
4714 * For each spare that we assign, we return an 'mdinfo' which
4715 * describes the position for the device in the array.
4716 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4717 * the new phys_refnum and lba_offset values.
4718 *
4719 * Only worry about BVDs at the moment.
4720 */
4721 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4722 struct metadata_update **updates)
4723 {
4724 int working = 0;
4725 struct mdinfo *d;
4726 struct ddf_super *ddf = a->container->sb;
4727 int global_ok = 0;
4728 struct mdinfo *rv = NULL;
4729 struct mdinfo *di;
4730 struct metadata_update *mu;
4731 struct dl *dl;
4732 int i;
4733 struct vcl *vcl;
4734 struct vd_config *vc;
4735 unsigned int n_bvd;
4736
4737 for (d = a->info.devs ; d ; d = d->next) {
4738 if ((d->curr_state & DS_FAULTY) &&
4739 d->state_fd >= 0)
4740 /* wait for Removal to happen */
4741 return NULL;
4742 if (d->state_fd >= 0)
4743 working ++;
4744 }
4745
4746 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4747 a->info.array.raid_disks,
4748 a->info.array.level);
4749 if (working == a->info.array.raid_disks)
4750 return NULL; /* array not degraded */
4751 switch (a->info.array.level) {
4752 case 1:
4753 if (working == 0)
4754 return NULL; /* failed */
4755 break;
4756 case 4:
4757 case 5:
4758 if (working < a->info.array.raid_disks - 1)
4759 return NULL; /* failed */
4760 break;
4761 case 6:
4762 if (working < a->info.array.raid_disks - 2)
4763 return NULL; /* failed */
4764 break;
4765 case 10:
4766 if (raid10_degraded(&a->info) < 1)
4767 return NULL;
4768 break;
4769 default: /* concat or stripe */
4770 return NULL; /* failed */
4771 }
4772
4773 /* For each slot, if it is not working, find a spare */
4774 dl = ddf->dlist;
4775 for (i = 0; i < a->info.array.raid_disks; i++) {
4776 for (d = a->info.devs ; d ; d = d->next)
4777 if (d->disk.raid_disk == i)
4778 break;
4779 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4780 if (d && (d->state_fd >= 0))
4781 continue;
4782
4783 /* OK, this device needs recovery. Find a spare */
4784 again:
4785 for ( ; dl ; dl = dl->next) {
4786 unsigned long long esize;
4787 unsigned long long pos;
4788 struct mdinfo *d2;
4789 int is_global = 0;
4790 int is_dedicated = 0;
4791 struct extent *ex;
4792 unsigned int j;
4793 /* If in this array, skip */
4794 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4795 if (d2->state_fd >= 0 &&
4796 d2->disk.major == dl->major &&
4797 d2->disk.minor == dl->minor) {
4798 dprintf("%x:%x already in array\n", dl->major, dl->minor);
4799 break;
4800 }
4801 if (d2)
4802 continue;
4803 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4804 cpu_to_be16(DDF_Spare))) {
4805 /* Check spare assign record */
4806 if (dl->spare) {
4807 if (dl->spare->type & DDF_spare_dedicated) {
4808 /* check spare_ents for guid */
4809 for (j = 0 ;
4810 j < be16_to_cpu
4811 (dl->spare
4812 ->populated);
4813 j++) {
4814 if (memcmp(dl->spare->spare_ents[j].guid,
4815 ddf->virt->entries[a->info.container_member].guid,
4816 DDF_GUID_LEN) == 0)
4817 is_dedicated = 1;
4818 }
4819 } else
4820 is_global = 1;
4821 }
4822 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4823 cpu_to_be16(DDF_Global_Spare))) {
4824 is_global = 1;
4825 } else if (!be16_and(ddf->phys
4826 ->entries[dl->pdnum].state,
4827 cpu_to_be16(DDF_Failed))) {
4828 /* we can possibly use some of this */
4829 is_global = 1;
4830 }
4831 if ( ! (is_dedicated ||
4832 (is_global && global_ok))) {
4833 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
4834 is_dedicated, is_global);
4835 continue;
4836 }
4837
4838 /* We are allowed to use this device - is there space?
4839 * We need a->info.component_size sectors */
4840 ex = get_extents(ddf, dl);
4841 if (!ex) {
4842 dprintf("cannot get extents\n");
4843 continue;
4844 }
4845 j = 0; pos = 0;
4846 esize = 0;
4847
4848 do {
4849 esize = ex[j].start - pos;
4850 if (esize >= a->info.component_size)
4851 break;
4852 pos = ex[j].start + ex[j].size;
4853 j++;
4854 } while (ex[j-1].size);
4855
4856 free(ex);
4857 if (esize < a->info.component_size) {
4858 dprintf("%x:%x has no room: %llu %llu\n",
4859 dl->major, dl->minor,
4860 esize, a->info.component_size);
4861 /* No room */
4862 continue;
4863 }
4864
4865 /* Cool, we have a device with some space at pos */
4866 di = xcalloc(1, sizeof(*di));
4867 di->disk.number = i;
4868 di->disk.raid_disk = i;
4869 di->disk.major = dl->major;
4870 di->disk.minor = dl->minor;
4871 di->disk.state = 0;
4872 di->recovery_start = 0;
4873 di->data_offset = pos;
4874 di->component_size = a->info.component_size;
4875 di->container_member = dl->pdnum;
4876 di->next = rv;
4877 rv = di;
4878 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
4879 i, pos);
4880
4881 break;
4882 }
4883 if (!dl && ! global_ok) {
4884 /* not enough dedicated spares, try global */
4885 global_ok = 1;
4886 dl = ddf->dlist;
4887 goto again;
4888 }
4889 }
4890
4891 if (!rv)
4892 /* No spares found */
4893 return rv;
4894 /* Now 'rv' has a list of devices to return.
4895 * Create a metadata_update record to update the
4896 * phys_refnum and lba_offset values
4897 */
4898 mu = xmalloc(sizeof(*mu));
4899 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
4900 free(mu);
4901 mu = NULL;
4902 }
4903 mu->buf = xmalloc(ddf->conf_rec_len * 512);
4904 mu->len = ddf->conf_rec_len * 512;
4905 mu->space = NULL;
4906 mu->space_list = NULL;
4907 mu->next = *updates;
4908 vc = find_vdcr(ddf, a->info.container_member, di->disk.raid_disk,
4909 &n_bvd, &vcl);
4910 memcpy(mu->buf, vc, ddf->conf_rec_len * 512);
4911
4912 vc = (struct vd_config*)mu->buf;
4913 for (di = rv ; di ; di = di->next) {
4914 vc->phys_refnum[di->disk.raid_disk] =
4915 ddf->phys->entries[dl->pdnum].refnum;
4916 LBA_OFFSET(ddf, vc)[di->disk.raid_disk]
4917 = cpu_to_be64(di->data_offset);
4918 }
4919 *updates = mu;
4920 return rv;
4921 }
4922 #endif /* MDASSEMBLE */
4923
4924 static int ddf_level_to_layout(int level)
4925 {
4926 switch(level) {
4927 case 0:
4928 case 1:
4929 return 0;
4930 case 5:
4931 return ALGORITHM_LEFT_SYMMETRIC;
4932 case 6:
4933 return ALGORITHM_ROTATING_N_CONTINUE;
4934 case 10:
4935 return 0x102;
4936 default:
4937 return UnSet;
4938 }
4939 }
4940
4941 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
4942 {
4943 if (level && *level == UnSet)
4944 *level = LEVEL_CONTAINER;
4945
4946 if (level && layout && *layout == UnSet)
4947 *layout = ddf_level_to_layout(*level);
4948 }
4949
4950 struct superswitch super_ddf = {
4951 #ifndef MDASSEMBLE
4952 .examine_super = examine_super_ddf,
4953 .brief_examine_super = brief_examine_super_ddf,
4954 .brief_examine_subarrays = brief_examine_subarrays_ddf,
4955 .export_examine_super = export_examine_super_ddf,
4956 .detail_super = detail_super_ddf,
4957 .brief_detail_super = brief_detail_super_ddf,
4958 .validate_geometry = validate_geometry_ddf,
4959 .write_init_super = write_init_super_ddf,
4960 .add_to_super = add_to_super_ddf,
4961 .remove_from_super = remove_from_super_ddf,
4962 .load_container = load_container_ddf,
4963 .copy_metadata = copy_metadata_ddf,
4964 .kill_subarray = kill_subarray_ddf,
4965 #endif
4966 .match_home = match_home_ddf,
4967 .uuid_from_super= uuid_from_super_ddf,
4968 .getinfo_super = getinfo_super_ddf,
4969 .update_super = update_super_ddf,
4970
4971 .avail_size = avail_size_ddf,
4972
4973 .compare_super = compare_super_ddf,
4974
4975 .load_super = load_super_ddf,
4976 .init_super = init_super_ddf,
4977 .store_super = store_super_ddf,
4978 .free_super = free_super_ddf,
4979 .match_metadata_desc = match_metadata_desc_ddf,
4980 .container_content = container_content_ddf,
4981 .default_geometry = default_geometry_ddf,
4982
4983 .external = 1,
4984
4985 #ifndef MDASSEMBLE
4986 /* for mdmon */
4987 .open_new = ddf_open_new,
4988 .set_array_state= ddf_set_array_state,
4989 .set_disk = ddf_set_disk,
4990 .sync_metadata = ddf_sync_metadata,
4991 .process_update = ddf_process_update,
4992 .prepare_update = ddf_prepare_update,
4993 .activate_spare = ddf_activate_spare,
4994 #endif
4995 .name = "ddf",
4996 };