]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: when first activating an array, record any missing devices.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2009 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF takes from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes;
233 be16 max_pdes;
234 __u8 pad[52];
235 struct phys_disk_entry {
236 char guid[DDF_GUID_LEN];
237 be32 refnum;
238 be16 type;
239 be16 state;
240 be64 config_size; /* DDF structures must be after here */
241 char path[18]; /* another horrible structure really */
242 __u8 pad[6];
243 } entries[0];
244 };
245
246 /* phys_disk_entry.type is a bitmap - bigendian remember */
247 #define DDF_Forced_PD_GUID 1
248 #define DDF_Active_in_VD 2
249 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
250 #define DDF_Spare 8 /* overrides Global_spare */
251 #define DDF_Foreign 16
252 #define DDF_Legacy 32 /* no DDF on this device */
253
254 #define DDF_Interface_mask 0xf00
255 #define DDF_Interface_SCSI 0x100
256 #define DDF_Interface_SAS 0x200
257 #define DDF_Interface_SATA 0x300
258 #define DDF_Interface_FC 0x400
259
260 /* phys_disk_entry.state is a bigendian bitmap */
261 #define DDF_Online 1
262 #define DDF_Failed 2 /* overrides 1,4,8 */
263 #define DDF_Rebuilding 4
264 #define DDF_Transition 8
265 #define DDF_SMART 16
266 #define DDF_ReadErrors 32
267 #define DDF_Missing 64
268
269 /* The content of the virt_section global scope */
270 struct virtual_disk {
271 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
272 be32 crc;
273 be16 populated_vdes;
274 be16 max_vdes;
275 __u8 pad[52];
276 struct virtual_entry {
277 char guid[DDF_GUID_LEN];
278 be16 unit;
279 __u16 pad0; /* 0xffff */
280 be16 guid_crc;
281 be16 type;
282 __u8 state;
283 __u8 init_state;
284 __u8 pad1[14];
285 char name[16];
286 } entries[0];
287 };
288
289 /* virtual_entry.type is a bitmap - bigendian */
290 #define DDF_Shared 1
291 #define DDF_Enforce_Groups 2
292 #define DDF_Unicode 4
293 #define DDF_Owner_Valid 8
294
295 /* virtual_entry.state is a bigendian bitmap */
296 #define DDF_state_mask 0x7
297 #define DDF_state_optimal 0x0
298 #define DDF_state_degraded 0x1
299 #define DDF_state_deleted 0x2
300 #define DDF_state_missing 0x3
301 #define DDF_state_failed 0x4
302 #define DDF_state_part_optimal 0x5
303
304 #define DDF_state_morphing 0x8
305 #define DDF_state_inconsistent 0x10
306
307 /* virtual_entry.init_state is a bigendian bitmap */
308 #define DDF_initstate_mask 0x03
309 #define DDF_init_not 0x00
310 #define DDF_init_quick 0x01 /* initialisation is progress.
311 * i.e. 'state_inconsistent' */
312 #define DDF_init_full 0x02
313
314 #define DDF_access_mask 0xc0
315 #define DDF_access_rw 0x00
316 #define DDF_access_ro 0x80
317 #define DDF_access_blocked 0xc0
318
319 /* The content of the config_section - local scope
320 * It has multiple records each config_record_len sectors
321 * They can be vd_config or spare_assign
322 */
323
324 struct vd_config {
325 be32 magic; /* DDF_VD_CONF_MAGIC */
326 be32 crc;
327 char guid[DDF_GUID_LEN];
328 be32 timestamp;
329 be32 seqnum;
330 __u8 pad0[24];
331 be16 prim_elmnt_count;
332 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
333 __u8 prl;
334 __u8 rlq;
335 __u8 sec_elmnt_count;
336 __u8 sec_elmnt_seq;
337 __u8 srl;
338 be64 blocks; /* blocks per component could be different
339 * on different component devices...(only
340 * for concat I hope) */
341 be64 array_blocks; /* blocks in array */
342 __u8 pad1[8];
343 be32 spare_refs[8];
344 __u8 cache_pol[8];
345 __u8 bg_rate;
346 __u8 pad2[3];
347 __u8 pad3[52];
348 __u8 pad4[192];
349 __u8 v0[32]; /* reserved- 0xff */
350 __u8 v1[32]; /* reserved- 0xff */
351 __u8 v2[16]; /* reserved- 0xff */
352 __u8 v3[16]; /* reserved- 0xff */
353 __u8 vendor[32];
354 be32 phys_refnum[0]; /* refnum of each disk in sequence */
355 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
356 bvd are always the same size */
357 };
358 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
359
360 /* vd_config.cache_pol[7] is a bitmap */
361 #define DDF_cache_writeback 1 /* else writethrough */
362 #define DDF_cache_wadaptive 2 /* only applies if writeback */
363 #define DDF_cache_readahead 4
364 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
365 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
366 #define DDF_cache_wallowed 32 /* enable write caching */
367 #define DDF_cache_rallowed 64 /* enable read caching */
368
369 struct spare_assign {
370 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
371 be32 crc;
372 be32 timestamp;
373 __u8 reserved[7];
374 __u8 type;
375 be16 populated; /* SAEs used */
376 be16 max; /* max SAEs */
377 __u8 pad[8];
378 struct spare_assign_entry {
379 char guid[DDF_GUID_LEN];
380 be16 secondary_element;
381 __u8 pad[6];
382 } spare_ents[0];
383 };
384 /* spare_assign.type is a bitmap */
385 #define DDF_spare_dedicated 0x1 /* else global */
386 #define DDF_spare_revertible 0x2 /* else committable */
387 #define DDF_spare_active 0x4 /* else not active */
388 #define DDF_spare_affinity 0x8 /* enclosure affinity */
389
390 /* The data_section contents - local scope */
391 struct disk_data {
392 be32 magic; /* DDF_PHYS_DATA_MAGIC */
393 be32 crc;
394 char guid[DDF_GUID_LEN];
395 be32 refnum; /* crc of some magic drive data ... */
396 __u8 forced_ref; /* set when above was not result of magic */
397 __u8 forced_guid; /* set if guid was forced rather than magic */
398 __u8 vendor[32];
399 __u8 pad[442];
400 };
401
402 /* bbm_section content */
403 struct bad_block_log {
404 be32 magic;
405 be32 crc;
406 be16 entry_count;
407 be32 spare_count;
408 __u8 pad[10];
409 be64 first_spare;
410 struct mapped_block {
411 be64 defective_start;
412 be32 replacement_start;
413 be16 remap_count;
414 __u8 pad[2];
415 } entries[0];
416 };
417
418 /* Struct for internally holding ddf structures */
419 /* The DDF structure stored on each device is potentially
420 * quite different, as some data is global and some is local.
421 * The global data is:
422 * - ddf header
423 * - controller_data
424 * - Physical disk records
425 * - Virtual disk records
426 * The local data is:
427 * - Configuration records
428 * - Physical Disk data section
429 * ( and Bad block and vendor which I don't care about yet).
430 *
431 * The local data is parsed into separate lists as it is read
432 * and reconstructed for writing. This means that we only need
433 * to make config changes once and they are automatically
434 * propagated to all devices.
435 * Note that the ddf_super has space of the conf and disk data
436 * for this disk and also for a list of all such data.
437 * The list is only used for the superblock that is being
438 * built in Create or Assemble to describe the whole array.
439 */
440 struct ddf_super {
441 struct ddf_header anchor, primary, secondary;
442 struct ddf_controller_data controller;
443 struct ddf_header *active;
444 struct phys_disk *phys;
445 struct virtual_disk *virt;
446 char *conf;
447 int pdsize, vdsize;
448 unsigned int max_part, mppe, conf_rec_len;
449 int currentdev;
450 int updates_pending;
451 struct vcl {
452 union {
453 char space[512];
454 struct {
455 struct vcl *next;
456 unsigned int vcnum; /* index into ->virt */
457 struct vd_config **other_bvds;
458 __u64 *block_sizes; /* NULL if all the same */
459 };
460 };
461 struct vd_config conf;
462 } *conflist, *currentconf;
463 struct dl {
464 union {
465 char space[512];
466 struct {
467 struct dl *next;
468 int major, minor;
469 char *devname;
470 int fd;
471 unsigned long long size; /* sectors */
472 be64 primary_lba; /* sectors */
473 be64 secondary_lba; /* sectors */
474 be64 workspace_lba; /* sectors */
475 int pdnum; /* index in ->phys */
476 struct spare_assign *spare;
477 void *mdupdate; /* hold metadata update */
478
479 /* These fields used by auto-layout */
480 int raiddisk; /* slot to fill in autolayout */
481 __u64 esize;
482 };
483 };
484 struct disk_data disk;
485 struct vcl *vlist[0]; /* max_part in size */
486 } *dlist, *add_list;
487 };
488
489 #ifndef offsetof
490 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
491 #endif
492
493 #if DEBUG
494 static int all_ff(const char *guid);
495 static void pr_state(struct ddf_super *ddf, const char *msg)
496 {
497 unsigned int i;
498 dprintf("%s/%s: ", __func__, msg);
499 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
500 if (all_ff(ddf->virt->entries[i].guid))
501 continue;
502 dprintf("%u(s=%02x i=%02x) ", i,
503 ddf->virt->entries[i].state,
504 ddf->virt->entries[i].init_state);
505 }
506 dprintf("\n");
507 }
508 #else
509 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
510 #endif
511
512 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
513 {
514 if (ddf->updates_pending)
515 return;
516 ddf->updates_pending = 1;
517 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
518 pr_state(ddf, func);
519 }
520
521 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
522
523 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
524 be32 refnum, unsigned int nmax,
525 const struct vd_config **bvd,
526 unsigned int *idx);
527
528 static be32 calc_crc(void *buf, int len)
529 {
530 /* crcs are always at the same place as in the ddf_header */
531 struct ddf_header *ddf = buf;
532 be32 oldcrc = ddf->crc;
533 __u32 newcrc;
534 ddf->crc = cpu_to_be32(0xffffffff);
535
536 newcrc = crc32(0, buf, len);
537 ddf->crc = oldcrc;
538 /* The crc is store (like everything) bigendian, so convert
539 * here for simplicity
540 */
541 return cpu_to_be32(newcrc);
542 }
543
544 #define DDF_INVALID_LEVEL 0xff
545 #define DDF_NO_SECONDARY 0xff
546 static int err_bad_md_layout(const mdu_array_info_t *array)
547 {
548 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
549 array->level, array->layout, array->raid_disks);
550 return -1;
551 }
552
553 static int layout_md2ddf(const mdu_array_info_t *array,
554 struct vd_config *conf)
555 {
556 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
557 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
558 __u8 sec_elmnt_count = 1;
559 __u8 srl = DDF_NO_SECONDARY;
560
561 switch (array->level) {
562 case LEVEL_LINEAR:
563 prl = DDF_CONCAT;
564 break;
565 case 0:
566 rlq = DDF_RAID0_SIMPLE;
567 prl = DDF_RAID0;
568 break;
569 case 1:
570 switch (array->raid_disks) {
571 case 2:
572 rlq = DDF_RAID1_SIMPLE;
573 break;
574 case 3:
575 rlq = DDF_RAID1_MULTI;
576 break;
577 default:
578 return err_bad_md_layout(array);
579 }
580 prl = DDF_RAID1;
581 break;
582 case 4:
583 if (array->layout != 0)
584 return err_bad_md_layout(array);
585 rlq = DDF_RAID4_N;
586 prl = DDF_RAID4;
587 break;
588 case 5:
589 switch (array->layout) {
590 case ALGORITHM_LEFT_ASYMMETRIC:
591 rlq = DDF_RAID5_N_RESTART;
592 break;
593 case ALGORITHM_RIGHT_ASYMMETRIC:
594 rlq = DDF_RAID5_0_RESTART;
595 break;
596 case ALGORITHM_LEFT_SYMMETRIC:
597 rlq = DDF_RAID5_N_CONTINUE;
598 break;
599 case ALGORITHM_RIGHT_SYMMETRIC:
600 /* not mentioned in standard */
601 default:
602 return err_bad_md_layout(array);
603 }
604 prl = DDF_RAID5;
605 break;
606 case 6:
607 switch (array->layout) {
608 case ALGORITHM_ROTATING_N_RESTART:
609 rlq = DDF_RAID5_N_RESTART;
610 break;
611 case ALGORITHM_ROTATING_ZERO_RESTART:
612 rlq = DDF_RAID6_0_RESTART;
613 break;
614 case ALGORITHM_ROTATING_N_CONTINUE:
615 rlq = DDF_RAID5_N_CONTINUE;
616 break;
617 default:
618 return err_bad_md_layout(array);
619 }
620 prl = DDF_RAID6;
621 break;
622 case 10:
623 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
624 rlq = DDF_RAID1_SIMPLE;
625 prim_elmnt_count = cpu_to_be16(2);
626 sec_elmnt_count = array->raid_disks / 2;
627 } else if (array->raid_disks % 3 == 0
628 && array->layout == 0x103) {
629 rlq = DDF_RAID1_MULTI;
630 prim_elmnt_count = cpu_to_be16(3);
631 sec_elmnt_count = array->raid_disks / 3;
632 } else
633 return err_bad_md_layout(array);
634 srl = DDF_2SPANNED;
635 prl = DDF_RAID1;
636 break;
637 default:
638 return err_bad_md_layout(array);
639 }
640 conf->prl = prl;
641 conf->prim_elmnt_count = prim_elmnt_count;
642 conf->rlq = rlq;
643 conf->srl = srl;
644 conf->sec_elmnt_count = sec_elmnt_count;
645 return 0;
646 }
647
648 static int err_bad_ddf_layout(const struct vd_config *conf)
649 {
650 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
651 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
652 return -1;
653 }
654
655 static int layout_ddf2md(const struct vd_config *conf,
656 mdu_array_info_t *array)
657 {
658 int level = LEVEL_UNSUPPORTED;
659 int layout = 0;
660 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
661
662 if (conf->sec_elmnt_count > 1) {
663 /* see also check_secondary() */
664 if (conf->prl != DDF_RAID1 ||
665 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
666 pr_err("Unsupported secondary RAID level %u/%u\n",
667 conf->prl, conf->srl);
668 return -1;
669 }
670 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
671 layout = 0x102;
672 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
673 layout = 0x103;
674 else
675 return err_bad_ddf_layout(conf);
676 raiddisks *= conf->sec_elmnt_count;
677 level = 10;
678 goto good;
679 }
680
681 switch (conf->prl) {
682 case DDF_CONCAT:
683 level = LEVEL_LINEAR;
684 break;
685 case DDF_RAID0:
686 if (conf->rlq != DDF_RAID0_SIMPLE)
687 return err_bad_ddf_layout(conf);
688 level = 0;
689 break;
690 case DDF_RAID1:
691 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
692 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
693 return err_bad_ddf_layout(conf);
694 level = 1;
695 break;
696 case DDF_RAID4:
697 if (conf->rlq != DDF_RAID4_N)
698 return err_bad_ddf_layout(conf);
699 level = 4;
700 break;
701 case DDF_RAID5:
702 switch (conf->rlq) {
703 case DDF_RAID5_N_RESTART:
704 layout = ALGORITHM_LEFT_ASYMMETRIC;
705 break;
706 case DDF_RAID5_0_RESTART:
707 layout = ALGORITHM_RIGHT_ASYMMETRIC;
708 break;
709 case DDF_RAID5_N_CONTINUE:
710 layout = ALGORITHM_LEFT_SYMMETRIC;
711 break;
712 default:
713 return err_bad_ddf_layout(conf);
714 }
715 level = 5;
716 break;
717 case DDF_RAID6:
718 switch (conf->rlq) {
719 case DDF_RAID5_N_RESTART:
720 layout = ALGORITHM_ROTATING_N_RESTART;
721 break;
722 case DDF_RAID6_0_RESTART:
723 layout = ALGORITHM_ROTATING_ZERO_RESTART;
724 break;
725 case DDF_RAID5_N_CONTINUE:
726 layout = ALGORITHM_ROTATING_N_CONTINUE;
727 break;
728 default:
729 return err_bad_ddf_layout(conf);
730 }
731 level = 6;
732 break;
733 default:
734 return err_bad_ddf_layout(conf);
735 };
736
737 good:
738 array->level = level;
739 array->layout = layout;
740 array->raid_disks = raiddisks;
741 return 0;
742 }
743
744 static int load_ddf_header(int fd, unsigned long long lba,
745 unsigned long long size,
746 int type,
747 struct ddf_header *hdr, struct ddf_header *anchor)
748 {
749 /* read a ddf header (primary or secondary) from fd/lba
750 * and check that it is consistent with anchor
751 * Need to check:
752 * magic, crc, guid, rev, and LBA's header_type, and
753 * everything after header_type must be the same
754 */
755 if (lba >= size-1)
756 return 0;
757
758 if (lseek64(fd, lba<<9, 0) < 0)
759 return 0;
760
761 if (read(fd, hdr, 512) != 512)
762 return 0;
763
764 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
765 pr_err("%s: bad header magic\n", __func__);
766 return 0;
767 }
768 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
769 pr_err("%s: bad CRC\n", __func__);
770 return 0;
771 }
772 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
773 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
774 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
775 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
776 hdr->type != type ||
777 memcmp(anchor->pad2, hdr->pad2, 512 -
778 offsetof(struct ddf_header, pad2)) != 0) {
779 pr_err("%s: header mismatch\n", __func__);
780 return 0;
781 }
782
783 /* Looks good enough to me... */
784 return 1;
785 }
786
787 static void *load_section(int fd, struct ddf_super *super, void *buf,
788 be32 offset_be, be32 len_be, int check)
789 {
790 unsigned long long offset = be32_to_cpu(offset_be);
791 unsigned long long len = be32_to_cpu(len_be);
792 int dofree = (buf == NULL);
793
794 if (check)
795 if (len != 2 && len != 8 && len != 32
796 && len != 128 && len != 512)
797 return NULL;
798
799 if (len > 1024)
800 return NULL;
801 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
802 buf = NULL;
803
804 if (!buf)
805 return NULL;
806
807 if (super->active->type == 1)
808 offset += be64_to_cpu(super->active->primary_lba);
809 else
810 offset += be64_to_cpu(super->active->secondary_lba);
811
812 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
813 if (dofree)
814 free(buf);
815 return NULL;
816 }
817 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
818 if (dofree)
819 free(buf);
820 return NULL;
821 }
822 return buf;
823 }
824
825 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
826 {
827 unsigned long long dsize;
828
829 get_dev_size(fd, NULL, &dsize);
830
831 if (lseek64(fd, dsize-512, 0) < 0) {
832 if (devname)
833 pr_err("Cannot seek to anchor block on %s: %s\n",
834 devname, strerror(errno));
835 return 1;
836 }
837 if (read(fd, &super->anchor, 512) != 512) {
838 if (devname)
839 pr_err("Cannot read anchor block on %s: %s\n",
840 devname, strerror(errno));
841 return 1;
842 }
843 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
844 if (devname)
845 pr_err("no DDF anchor found on %s\n",
846 devname);
847 return 2;
848 }
849 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
850 if (devname)
851 pr_err("bad CRC on anchor on %s\n",
852 devname);
853 return 2;
854 }
855 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
856 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
857 if (devname)
858 pr_err("can only support super revision"
859 " %.8s and earlier, not %.8s on %s\n",
860 DDF_REVISION_2, super->anchor.revision,devname);
861 return 2;
862 }
863 super->active = NULL;
864 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
865 dsize >> 9, 1,
866 &super->primary, &super->anchor) == 0) {
867 if (devname)
868 pr_err("Failed to load primary DDF header "
869 "on %s\n", devname);
870 } else
871 super->active = &super->primary;
872
873 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
874 dsize >> 9, 2,
875 &super->secondary, &super->anchor)) {
876 if (super->active == NULL
877 || (be32_to_cpu(super->primary.seq)
878 < be32_to_cpu(super->secondary.seq) &&
879 !super->secondary.openflag)
880 || (be32_to_cpu(super->primary.seq)
881 == be32_to_cpu(super->secondary.seq) &&
882 super->primary.openflag && !super->secondary.openflag)
883 )
884 super->active = &super->secondary;
885 } else if (devname &&
886 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
887 pr_err("Failed to load secondary DDF header on %s\n",
888 devname);
889 if (super->active == NULL)
890 return 2;
891 return 0;
892 }
893
894 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
895 {
896 void *ok;
897 ok = load_section(fd, super, &super->controller,
898 super->active->controller_section_offset,
899 super->active->controller_section_length,
900 0);
901 super->phys = load_section(fd, super, NULL,
902 super->active->phys_section_offset,
903 super->active->phys_section_length,
904 1);
905 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
906
907 super->virt = load_section(fd, super, NULL,
908 super->active->virt_section_offset,
909 super->active->virt_section_length,
910 1);
911 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
912 if (!ok ||
913 !super->phys ||
914 !super->virt) {
915 free(super->phys);
916 free(super->virt);
917 super->phys = NULL;
918 super->virt = NULL;
919 return 2;
920 }
921 super->conflist = NULL;
922 super->dlist = NULL;
923
924 super->max_part = be16_to_cpu(super->active->max_partitions);
925 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
926 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
927 return 0;
928 }
929
930 #define DDF_UNUSED_BVD 0xff
931 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
932 {
933 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
934 unsigned int i, vdsize;
935 void *p;
936 if (n_vds == 0) {
937 vcl->other_bvds = NULL;
938 return 0;
939 }
940 vdsize = ddf->conf_rec_len * 512;
941 if (posix_memalign(&p, 512, n_vds *
942 (vdsize + sizeof(struct vd_config *))) != 0)
943 return -1;
944 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
945 for (i = 0; i < n_vds; i++) {
946 vcl->other_bvds[i] = p + i * vdsize;
947 memset(vcl->other_bvds[i], 0, vdsize);
948 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
949 }
950 return 0;
951 }
952
953 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
954 unsigned int len)
955 {
956 int i;
957 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
958 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
959 break;
960
961 if (i < vcl->conf.sec_elmnt_count-1) {
962 if (be32_to_cpu(vd->seqnum) <=
963 be32_to_cpu(vcl->other_bvds[i]->seqnum))
964 return;
965 } else {
966 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
967 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
968 break;
969 if (i == vcl->conf.sec_elmnt_count-1) {
970 pr_err("no space for sec level config %u, count is %u\n",
971 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
972 return;
973 }
974 }
975 memcpy(vcl->other_bvds[i], vd, len);
976 }
977
978 static int load_ddf_local(int fd, struct ddf_super *super,
979 char *devname, int keep)
980 {
981 struct dl *dl;
982 struct stat stb;
983 char *conf;
984 unsigned int i;
985 unsigned int confsec;
986 int vnum;
987 unsigned int max_virt_disks = be16_to_cpu
988 (super->active->max_vd_entries);
989 unsigned long long dsize;
990
991 /* First the local disk info */
992 if (posix_memalign((void**)&dl, 512,
993 sizeof(*dl) +
994 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
995 pr_err("%s could not allocate disk info buffer\n",
996 __func__);
997 return 1;
998 }
999
1000 load_section(fd, super, &dl->disk,
1001 super->active->data_section_offset,
1002 super->active->data_section_length,
1003 0);
1004 dl->devname = devname ? xstrdup(devname) : NULL;
1005
1006 fstat(fd, &stb);
1007 dl->major = major(stb.st_rdev);
1008 dl->minor = minor(stb.st_rdev);
1009 dl->next = super->dlist;
1010 dl->fd = keep ? fd : -1;
1011
1012 dl->size = 0;
1013 if (get_dev_size(fd, devname, &dsize))
1014 dl->size = dsize >> 9;
1015 /* If the disks have different sizes, the LBAs will differ
1016 * between phys disks.
1017 * At this point here, the values in super->active must be valid
1018 * for this phys disk. */
1019 dl->primary_lba = super->active->primary_lba;
1020 dl->secondary_lba = super->active->secondary_lba;
1021 dl->workspace_lba = super->active->workspace_lba;
1022 dl->spare = NULL;
1023 for (i = 0 ; i < super->max_part ; i++)
1024 dl->vlist[i] = NULL;
1025 super->dlist = dl;
1026 dl->pdnum = -1;
1027 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1028 if (memcmp(super->phys->entries[i].guid,
1029 dl->disk.guid, DDF_GUID_LEN) == 0)
1030 dl->pdnum = i;
1031
1032 /* Now the config list. */
1033 /* 'conf' is an array of config entries, some of which are
1034 * probably invalid. Those which are good need to be copied into
1035 * the conflist
1036 */
1037
1038 conf = load_section(fd, super, super->conf,
1039 super->active->config_section_offset,
1040 super->active->config_section_length,
1041 0);
1042 super->conf = conf;
1043 vnum = 0;
1044 for (confsec = 0;
1045 confsec < be32_to_cpu(super->active->config_section_length);
1046 confsec += super->conf_rec_len) {
1047 struct vd_config *vd =
1048 (struct vd_config *)((char*)conf + confsec*512);
1049 struct vcl *vcl;
1050
1051 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1052 if (dl->spare)
1053 continue;
1054 if (posix_memalign((void**)&dl->spare, 512,
1055 super->conf_rec_len*512) != 0) {
1056 pr_err("%s could not allocate spare info buf\n",
1057 __func__);
1058 return 1;
1059 }
1060
1061 memcpy(dl->spare, vd, super->conf_rec_len*512);
1062 continue;
1063 }
1064 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1065 continue;
1066 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1067 if (memcmp(vcl->conf.guid,
1068 vd->guid, DDF_GUID_LEN) == 0)
1069 break;
1070 }
1071
1072 if (vcl) {
1073 dl->vlist[vnum++] = vcl;
1074 if (vcl->other_bvds != NULL &&
1075 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1076 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1077 continue;
1078 }
1079 if (be32_to_cpu(vd->seqnum) <=
1080 be32_to_cpu(vcl->conf.seqnum))
1081 continue;
1082 } else {
1083 if (posix_memalign((void**)&vcl, 512,
1084 (super->conf_rec_len*512 +
1085 offsetof(struct vcl, conf))) != 0) {
1086 pr_err("%s could not allocate vcl buf\n",
1087 __func__);
1088 return 1;
1089 }
1090 vcl->next = super->conflist;
1091 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1092 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1093 if (alloc_other_bvds(super, vcl) != 0) {
1094 pr_err("%s could not allocate other bvds\n",
1095 __func__);
1096 free(vcl);
1097 return 1;
1098 };
1099 super->conflist = vcl;
1100 dl->vlist[vnum++] = vcl;
1101 }
1102 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1103 for (i=0; i < max_virt_disks ; i++)
1104 if (memcmp(super->virt->entries[i].guid,
1105 vcl->conf.guid, DDF_GUID_LEN)==0)
1106 break;
1107 if (i < max_virt_disks)
1108 vcl->vcnum = i;
1109 }
1110
1111 return 0;
1112 }
1113
1114 #ifndef MDASSEMBLE
1115 static int load_super_ddf_all(struct supertype *st, int fd,
1116 void **sbp, char *devname);
1117 #endif
1118
1119 static void free_super_ddf(struct supertype *st);
1120
1121 static int load_super_ddf(struct supertype *st, int fd,
1122 char *devname)
1123 {
1124 unsigned long long dsize;
1125 struct ddf_super *super;
1126 int rv;
1127
1128 if (get_dev_size(fd, devname, &dsize) == 0)
1129 return 1;
1130
1131 if (test_partition(fd))
1132 /* DDF is not allowed on partitions */
1133 return 1;
1134
1135 /* 32M is a lower bound */
1136 if (dsize <= 32*1024*1024) {
1137 if (devname)
1138 pr_err("%s is too small for ddf: "
1139 "size is %llu sectors.\n",
1140 devname, dsize>>9);
1141 return 1;
1142 }
1143 if (dsize & 511) {
1144 if (devname)
1145 pr_err("%s is an odd size for ddf: "
1146 "size is %llu bytes.\n",
1147 devname, dsize);
1148 return 1;
1149 }
1150
1151 free_super_ddf(st);
1152
1153 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1154 pr_err("malloc of %zu failed.\n",
1155 sizeof(*super));
1156 return 1;
1157 }
1158 memset(super, 0, sizeof(*super));
1159
1160 rv = load_ddf_headers(fd, super, devname);
1161 if (rv) {
1162 free(super);
1163 return rv;
1164 }
1165
1166 /* Have valid headers and have chosen the best. Let's read in the rest*/
1167
1168 rv = load_ddf_global(fd, super, devname);
1169
1170 if (rv) {
1171 if (devname)
1172 pr_err("Failed to load all information "
1173 "sections on %s\n", devname);
1174 free(super);
1175 return rv;
1176 }
1177
1178 rv = load_ddf_local(fd, super, devname, 0);
1179
1180 if (rv) {
1181 if (devname)
1182 pr_err("Failed to load all information "
1183 "sections on %s\n", devname);
1184 free(super);
1185 return rv;
1186 }
1187
1188 /* Should possibly check the sections .... */
1189
1190 st->sb = super;
1191 if (st->ss == NULL) {
1192 st->ss = &super_ddf;
1193 st->minor_version = 0;
1194 st->max_devs = 512;
1195 }
1196 return 0;
1197
1198 }
1199
1200 static void free_super_ddf(struct supertype *st)
1201 {
1202 struct ddf_super *ddf = st->sb;
1203 if (ddf == NULL)
1204 return;
1205 free(ddf->phys);
1206 free(ddf->virt);
1207 free(ddf->conf);
1208 while (ddf->conflist) {
1209 struct vcl *v = ddf->conflist;
1210 ddf->conflist = v->next;
1211 if (v->block_sizes)
1212 free(v->block_sizes);
1213 if (v->other_bvds)
1214 /*
1215 v->other_bvds[0] points to beginning of buffer,
1216 see alloc_other_bvds()
1217 */
1218 free(v->other_bvds[0]);
1219 free(v);
1220 }
1221 while (ddf->dlist) {
1222 struct dl *d = ddf->dlist;
1223 ddf->dlist = d->next;
1224 if (d->fd >= 0)
1225 close(d->fd);
1226 if (d->spare)
1227 free(d->spare);
1228 free(d);
1229 }
1230 while (ddf->add_list) {
1231 struct dl *d = ddf->add_list;
1232 ddf->add_list = d->next;
1233 if (d->fd >= 0)
1234 close(d->fd);
1235 if (d->spare)
1236 free(d->spare);
1237 free(d);
1238 }
1239 free(ddf);
1240 st->sb = NULL;
1241 }
1242
1243 static struct supertype *match_metadata_desc_ddf(char *arg)
1244 {
1245 /* 'ddf' only support containers */
1246 struct supertype *st;
1247 if (strcmp(arg, "ddf") != 0 &&
1248 strcmp(arg, "default") != 0
1249 )
1250 return NULL;
1251
1252 st = xcalloc(1, sizeof(*st));
1253 st->ss = &super_ddf;
1254 st->max_devs = 512;
1255 st->minor_version = 0;
1256 st->sb = NULL;
1257 return st;
1258 }
1259
1260 #ifndef MDASSEMBLE
1261
1262 static mapping_t ddf_state[] = {
1263 { "Optimal", 0},
1264 { "Degraded", 1},
1265 { "Deleted", 2},
1266 { "Missing", 3},
1267 { "Failed", 4},
1268 { "Partially Optimal", 5},
1269 { "-reserved-", 6},
1270 { "-reserved-", 7},
1271 { NULL, 0}
1272 };
1273
1274 static mapping_t ddf_init_state[] = {
1275 { "Not Initialised", 0},
1276 { "QuickInit in Progress", 1},
1277 { "Fully Initialised", 2},
1278 { "*UNKNOWN*", 3},
1279 { NULL, 0}
1280 };
1281 static mapping_t ddf_access[] = {
1282 { "Read/Write", 0},
1283 { "Reserved", 1},
1284 { "Read Only", 2},
1285 { "Blocked (no access)", 3},
1286 { NULL ,0}
1287 };
1288
1289 static mapping_t ddf_level[] = {
1290 { "RAID0", DDF_RAID0},
1291 { "RAID1", DDF_RAID1},
1292 { "RAID3", DDF_RAID3},
1293 { "RAID4", DDF_RAID4},
1294 { "RAID5", DDF_RAID5},
1295 { "RAID1E",DDF_RAID1E},
1296 { "JBOD", DDF_JBOD},
1297 { "CONCAT",DDF_CONCAT},
1298 { "RAID5E",DDF_RAID5E},
1299 { "RAID5EE",DDF_RAID5EE},
1300 { "RAID6", DDF_RAID6},
1301 { NULL, 0}
1302 };
1303 static mapping_t ddf_sec_level[] = {
1304 { "Striped", DDF_2STRIPED},
1305 { "Mirrored", DDF_2MIRRORED},
1306 { "Concat", DDF_2CONCAT},
1307 { "Spanned", DDF_2SPANNED},
1308 { NULL, 0}
1309 };
1310 #endif
1311
1312 static int all_ff(const char *guid)
1313 {
1314 int i;
1315 for (i = 0; i < DDF_GUID_LEN; i++)
1316 if (guid[i] != (char)0xff)
1317 return 0;
1318 return 1;
1319 }
1320
1321 static const char *guid_str(const char *guid)
1322 {
1323 static char buf[DDF_GUID_LEN*2+1];
1324 int i;
1325 char *p = buf;
1326 for (i = 0; i < DDF_GUID_LEN; i++) {
1327 unsigned char c = guid[i];
1328 if (c >= 32 && c < 127)
1329 p += sprintf(p, "%c", c);
1330 else
1331 p += sprintf(p, "%02x", c);
1332 }
1333 *p = '\0';
1334 return (const char *) buf;
1335 }
1336
1337 #ifndef MDASSEMBLE
1338 static void print_guid(char *guid, int tstamp)
1339 {
1340 /* A GUIDs are part (or all) ASCII and part binary.
1341 * They tend to be space padded.
1342 * We print the GUID in HEX, then in parentheses add
1343 * any initial ASCII sequence, and a possible
1344 * time stamp from bytes 16-19
1345 */
1346 int l = DDF_GUID_LEN;
1347 int i;
1348
1349 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1350 if ((i&3)==0 && i != 0) printf(":");
1351 printf("%02X", guid[i]&255);
1352 }
1353
1354 printf("\n (");
1355 while (l && guid[l-1] == ' ')
1356 l--;
1357 for (i=0 ; i<l ; i++) {
1358 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1359 fputc(guid[i], stdout);
1360 else
1361 break;
1362 }
1363 if (tstamp) {
1364 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1365 char tbuf[100];
1366 struct tm *tm;
1367 tm = localtime(&then);
1368 strftime(tbuf, 100, " %D %T",tm);
1369 fputs(tbuf, stdout);
1370 }
1371 printf(")");
1372 }
1373
1374 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1375 {
1376 int crl = sb->conf_rec_len;
1377 struct vcl *vcl;
1378
1379 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1380 unsigned int i;
1381 struct vd_config *vc = &vcl->conf;
1382
1383 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1384 continue;
1385 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1386 continue;
1387
1388 /* Ok, we know about this VD, let's give more details */
1389 printf(" Raid Devices[%d] : %d (", n,
1390 be16_to_cpu(vc->prim_elmnt_count));
1391 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1392 int j;
1393 int cnt = be16_to_cpu(sb->phys->used_pdes);
1394 for (j=0; j<cnt; j++)
1395 if (be32_eq(vc->phys_refnum[i],
1396 sb->phys->entries[j].refnum))
1397 break;
1398 if (i) printf(" ");
1399 if (j < cnt)
1400 printf("%d", j);
1401 else
1402 printf("--");
1403 }
1404 printf(")\n");
1405 if (vc->chunk_shift != 255)
1406 printf(" Chunk Size[%d] : %d sectors\n", n,
1407 1 << vc->chunk_shift);
1408 printf(" Raid Level[%d] : %s\n", n,
1409 map_num(ddf_level, vc->prl)?:"-unknown-");
1410 if (vc->sec_elmnt_count != 1) {
1411 printf(" Secondary Position[%d] : %d of %d\n", n,
1412 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1413 printf(" Secondary Level[%d] : %s\n", n,
1414 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1415 }
1416 printf(" Device Size[%d] : %llu\n", n,
1417 be64_to_cpu(vc->blocks)/2);
1418 printf(" Array Size[%d] : %llu\n", n,
1419 be64_to_cpu(vc->array_blocks)/2);
1420 }
1421 }
1422
1423 static void examine_vds(struct ddf_super *sb)
1424 {
1425 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1426 unsigned int i;
1427 printf(" Virtual Disks : %d\n", cnt);
1428
1429 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1430 struct virtual_entry *ve = &sb->virt->entries[i];
1431 if (all_ff(ve->guid))
1432 continue;
1433 printf("\n");
1434 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1435 printf("\n");
1436 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1437 printf(" state[%d] : %s, %s%s\n", i,
1438 map_num(ddf_state, ve->state & 7),
1439 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1440 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1441 printf(" init state[%d] : %s\n", i,
1442 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1443 printf(" access[%d] : %s\n", i,
1444 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1445 printf(" Name[%d] : %.16s\n", i, ve->name);
1446 examine_vd(i, sb, ve->guid);
1447 }
1448 if (cnt) printf("\n");
1449 }
1450
1451 static void examine_pds(struct ddf_super *sb)
1452 {
1453 int cnt = be16_to_cpu(sb->phys->used_pdes);
1454 int i;
1455 struct dl *dl;
1456 printf(" Physical Disks : %d\n", cnt);
1457 printf(" Number RefNo Size Device Type/State\n");
1458
1459 for (i=0 ; i<cnt ; i++) {
1460 struct phys_disk_entry *pd = &sb->phys->entries[i];
1461 int type = be16_to_cpu(pd->type);
1462 int state = be16_to_cpu(pd->state);
1463
1464 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1465 //printf("\n");
1466 printf(" %3d %08x ", i,
1467 be32_to_cpu(pd->refnum));
1468 printf("%8lluK ",
1469 be64_to_cpu(pd->config_size)>>1);
1470 for (dl = sb->dlist; dl ; dl = dl->next) {
1471 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1472 char *dv = map_dev(dl->major, dl->minor, 0);
1473 if (dv) {
1474 printf("%-15s", dv);
1475 break;
1476 }
1477 }
1478 }
1479 if (!dl)
1480 printf("%15s","");
1481 printf(" %s%s%s%s%s",
1482 (type&2) ? "active":"",
1483 (type&4) ? "Global-Spare":"",
1484 (type&8) ? "spare" : "",
1485 (type&16)? ", foreign" : "",
1486 (type&32)? "pass-through" : "");
1487 if (state & DDF_Failed)
1488 /* This over-rides these three */
1489 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1490 printf("/%s%s%s%s%s%s%s",
1491 (state&1)? "Online": "Offline",
1492 (state&2)? ", Failed": "",
1493 (state&4)? ", Rebuilding": "",
1494 (state&8)? ", in-transition": "",
1495 (state&16)? ", SMART-errors": "",
1496 (state&32)? ", Unrecovered-Read-Errors": "",
1497 (state&64)? ", Missing" : "");
1498 printf("\n");
1499 }
1500 }
1501
1502 static void examine_super_ddf(struct supertype *st, char *homehost)
1503 {
1504 struct ddf_super *sb = st->sb;
1505
1506 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1507 printf(" Version : %.8s\n", sb->anchor.revision);
1508 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1509 printf("\n");
1510 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1511 printf("\n");
1512 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1513 printf(" Redundant hdr : %s\n", be32_eq(sb->secondary.magic,
1514 DDF_HEADER_MAGIC)
1515 ?"yes" : "no");
1516 examine_vds(sb);
1517 examine_pds(sb);
1518 }
1519
1520 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
1521
1522 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
1523 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
1524 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
1525
1526 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1527 {
1528 /*
1529 * Figure out the VD number for this supertype.
1530 * Returns DDF_CONTAINER for the container itself,
1531 * and DDF_NOTFOUND on error.
1532 */
1533 struct ddf_super *ddf = st->sb;
1534 struct mdinfo *sra;
1535 char *sub, *end;
1536 unsigned int vcnum;
1537
1538 if (*st->container_devnm == '\0')
1539 return DDF_CONTAINER;
1540
1541 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1542 if (!sra || sra->array.major_version != -1 ||
1543 sra->array.minor_version != -2 ||
1544 !is_subarray(sra->text_version))
1545 return DDF_NOTFOUND;
1546
1547 sub = strchr(sra->text_version + 1, '/');
1548 if (sub != NULL)
1549 vcnum = strtoul(sub + 1, &end, 10);
1550 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1551 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1552 return DDF_NOTFOUND;
1553
1554 return vcnum;
1555 }
1556
1557 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1558 {
1559 /* We just write a generic DDF ARRAY entry
1560 */
1561 struct mdinfo info;
1562 char nbuf[64];
1563 getinfo_super_ddf(st, &info, NULL);
1564 fname_from_uuid(st, &info, nbuf, ':');
1565
1566 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1567 }
1568
1569 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1570 {
1571 /* We just write a generic DDF ARRAY entry
1572 */
1573 struct ddf_super *ddf = st->sb;
1574 struct mdinfo info;
1575 unsigned int i;
1576 char nbuf[64];
1577 getinfo_super_ddf(st, &info, NULL);
1578 fname_from_uuid(st, &info, nbuf, ':');
1579
1580 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1581 struct virtual_entry *ve = &ddf->virt->entries[i];
1582 struct vcl vcl;
1583 char nbuf1[64];
1584 char namebuf[17];
1585 if (all_ff(ve->guid))
1586 continue;
1587 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1588 ddf->currentconf =&vcl;
1589 vcl.vcnum = i;
1590 uuid_from_super_ddf(st, info.uuid);
1591 fname_from_uuid(st, &info, nbuf1, ':');
1592 _ddf_array_name(namebuf, ddf, i);
1593 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1594 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1595 nbuf+5, i, nbuf1+5);
1596 }
1597 }
1598
1599 static void export_examine_super_ddf(struct supertype *st)
1600 {
1601 struct mdinfo info;
1602 char nbuf[64];
1603 getinfo_super_ddf(st, &info, NULL);
1604 fname_from_uuid(st, &info, nbuf, ':');
1605 printf("MD_METADATA=ddf\n");
1606 printf("MD_LEVEL=container\n");
1607 printf("MD_UUID=%s\n", nbuf+5);
1608 printf("MD_DEVICES=%u\n",
1609 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1610 }
1611
1612 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1613 {
1614 void *buf;
1615 unsigned long long dsize, offset;
1616 int bytes;
1617 struct ddf_header *ddf;
1618 int written = 0;
1619
1620 /* The meta consists of an anchor, a primary, and a secondary.
1621 * This all lives at the end of the device.
1622 * So it is easiest to find the earliest of primary and
1623 * secondary, and copy everything from there.
1624 *
1625 * Anchor is 512 from end It contains primary_lba and secondary_lba
1626 * we choose one of those
1627 */
1628
1629 if (posix_memalign(&buf, 4096, 4096) != 0)
1630 return 1;
1631
1632 if (!get_dev_size(from, NULL, &dsize))
1633 goto err;
1634
1635 if (lseek64(from, dsize-512, 0) < 0)
1636 goto err;
1637 if (read(from, buf, 512) != 512)
1638 goto err;
1639 ddf = buf;
1640 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1641 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1642 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1643 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1644 goto err;
1645
1646 offset = dsize - 512;
1647 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1648 offset = be64_to_cpu(ddf->primary_lba) << 9;
1649 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1650 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1651
1652 bytes = dsize - offset;
1653
1654 if (lseek64(from, offset, 0) < 0 ||
1655 lseek64(to, offset, 0) < 0)
1656 goto err;
1657 while (written < bytes) {
1658 int n = bytes - written;
1659 if (n > 4096)
1660 n = 4096;
1661 if (read(from, buf, n) != n)
1662 goto err;
1663 if (write(to, buf, n) != n)
1664 goto err;
1665 written += n;
1666 }
1667 free(buf);
1668 return 0;
1669 err:
1670 free(buf);
1671 return 1;
1672 }
1673
1674 static void detail_super_ddf(struct supertype *st, char *homehost)
1675 {
1676 /* FIXME later
1677 * Could print DDF GUID
1678 * Need to find which array
1679 * If whole, briefly list all arrays
1680 * If one, give name
1681 */
1682 }
1683
1684 static const char *vendors_with_variable_volume_UUID[] = {
1685 "LSI ",
1686 };
1687
1688 static int volume_id_is_reliable(const struct ddf_super *ddf)
1689 {
1690 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1691 int i;
1692 for (i = 0; i < n; i++)
1693 if (!memcmp(ddf->controller.guid,
1694 vendors_with_variable_volume_UUID[i], 8))
1695 return 0;
1696 return 1;
1697 }
1698
1699 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1700 unsigned int vcnum, int uuid[4])
1701 {
1702 char buf[DDF_GUID_LEN+18], sha[20], *p;
1703 struct sha1_ctx ctx;
1704 if (volume_id_is_reliable(ddf)) {
1705 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1706 return;
1707 }
1708 /*
1709 * Some fake RAID BIOSes (in particular, LSI ones) change the
1710 * VD GUID at every boot. These GUIDs are not suitable for
1711 * identifying an array. Luckily the header GUID appears to
1712 * remain constant.
1713 * We construct a pseudo-UUID from the header GUID and those
1714 * properties of the subarray that we expect to remain constant.
1715 */
1716 memset(buf, 0, sizeof(buf));
1717 p = buf;
1718 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1719 p += DDF_GUID_LEN;
1720 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1721 p += 16;
1722 *((__u16 *) p) = vcnum;
1723 sha1_init_ctx(&ctx);
1724 sha1_process_bytes(buf, sizeof(buf), &ctx);
1725 sha1_finish_ctx(&ctx, sha);
1726 memcpy(uuid, sha, 4*4);
1727 }
1728
1729 static void brief_detail_super_ddf(struct supertype *st)
1730 {
1731 struct mdinfo info;
1732 char nbuf[64];
1733 struct ddf_super *ddf = st->sb;
1734 unsigned int vcnum = get_vd_num_of_subarray(st);
1735 if (vcnum == DDF_CONTAINER)
1736 uuid_from_super_ddf(st, info.uuid);
1737 else if (vcnum == DDF_NOTFOUND)
1738 return;
1739 else
1740 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1741 fname_from_uuid(st, &info, nbuf,':');
1742 printf(" UUID=%s", nbuf + 5);
1743 }
1744 #endif
1745
1746 static int match_home_ddf(struct supertype *st, char *homehost)
1747 {
1748 /* It matches 'this' host if the controller is a
1749 * Linux-MD controller with vendor_data matching
1750 * the hostname
1751 */
1752 struct ddf_super *ddf = st->sb;
1753 unsigned int len;
1754
1755 if (!homehost)
1756 return 0;
1757 len = strlen(homehost);
1758
1759 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1760 len < sizeof(ddf->controller.vendor_data) &&
1761 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1762 ddf->controller.vendor_data[len] == 0);
1763 }
1764
1765 #ifndef MDASSEMBLE
1766 static int find_index_in_bvd(const struct ddf_super *ddf,
1767 const struct vd_config *conf, unsigned int n,
1768 unsigned int *n_bvd)
1769 {
1770 /*
1771 * Find the index of the n-th valid physical disk in this BVD
1772 */
1773 unsigned int i, j;
1774 for (i = 0, j = 0; i < ddf->mppe &&
1775 j < be16_to_cpu(conf->prim_elmnt_count); i++) {
1776 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1777 if (n == j) {
1778 *n_bvd = i;
1779 return 1;
1780 }
1781 j++;
1782 }
1783 }
1784 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1785 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1786 return 0;
1787 }
1788
1789 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1790 unsigned int n,
1791 unsigned int *n_bvd, struct vcl **vcl)
1792 {
1793 struct vcl *v;
1794
1795 for (v = ddf->conflist; v; v = v->next) {
1796 unsigned int nsec, ibvd = 0;
1797 struct vd_config *conf;
1798 if (inst != v->vcnum)
1799 continue;
1800 conf = &v->conf;
1801 if (conf->sec_elmnt_count == 1) {
1802 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1803 *vcl = v;
1804 return conf;
1805 } else
1806 goto bad;
1807 }
1808 if (v->other_bvds == NULL) {
1809 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1810 __func__, conf->sec_elmnt_count);
1811 goto bad;
1812 }
1813 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1814 if (conf->sec_elmnt_seq != nsec) {
1815 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1816 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1817 == nsec)
1818 break;
1819 }
1820 if (ibvd == conf->sec_elmnt_count)
1821 goto bad;
1822 conf = v->other_bvds[ibvd-1];
1823 }
1824 if (!find_index_in_bvd(ddf, conf,
1825 n - nsec*conf->sec_elmnt_count, n_bvd))
1826 goto bad;
1827 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1828 , __func__, n, *n_bvd, ibvd, inst);
1829 *vcl = v;
1830 return conf;
1831 }
1832 bad:
1833 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1834 return NULL;
1835 }
1836 #endif
1837
1838 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1839 {
1840 /* Find the entry in phys_disk which has the given refnum
1841 * and return it's index
1842 */
1843 unsigned int i;
1844 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1845 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1846 return i;
1847 return -1;
1848 }
1849
1850 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1851 {
1852 char buf[20];
1853 struct sha1_ctx ctx;
1854 sha1_init_ctx(&ctx);
1855 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1856 sha1_finish_ctx(&ctx, buf);
1857 memcpy(uuid, buf, 4*4);
1858 }
1859
1860 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1861 {
1862 /* The uuid returned here is used for:
1863 * uuid to put into bitmap file (Create, Grow)
1864 * uuid for backup header when saving critical section (Grow)
1865 * comparing uuids when re-adding a device into an array
1866 * In these cases the uuid required is that of the data-array,
1867 * not the device-set.
1868 * uuid to recognise same set when adding a missing device back
1869 * to an array. This is a uuid for the device-set.
1870 *
1871 * For each of these we can make do with a truncated
1872 * or hashed uuid rather than the original, as long as
1873 * everyone agrees.
1874 * In the case of SVD we assume the BVD is of interest,
1875 * though that might be the case if a bitmap were made for
1876 * a mirrored SVD - worry about that later.
1877 * So we need to find the VD configuration record for the
1878 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1879 * The first 16 bytes of the sha1 of these is used.
1880 */
1881 struct ddf_super *ddf = st->sb;
1882 struct vcl *vcl = ddf->currentconf;
1883
1884 if (vcl)
1885 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1886 else
1887 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1888 }
1889
1890 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
1891
1892 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1893 {
1894 struct ddf_super *ddf = st->sb;
1895 int map_disks = info->array.raid_disks;
1896 __u32 *cptr;
1897
1898 if (ddf->currentconf) {
1899 getinfo_super_ddf_bvd(st, info, map);
1900 return;
1901 }
1902 memset(info, 0, sizeof(*info));
1903
1904 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1905 info->array.level = LEVEL_CONTAINER;
1906 info->array.layout = 0;
1907 info->array.md_minor = -1;
1908 cptr = (__u32 *)(ddf->anchor.guid + 16);
1909 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1910
1911 info->array.utime = 0;
1912 info->array.chunk_size = 0;
1913 info->container_enough = 1;
1914
1915 info->disk.major = 0;
1916 info->disk.minor = 0;
1917 if (ddf->dlist) {
1918 struct phys_disk_entry *pde = NULL;
1919 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1920 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1921
1922 info->data_offset = be64_to_cpu(ddf->phys->
1923 entries[info->disk.raid_disk].
1924 config_size);
1925 info->component_size = ddf->dlist->size - info->data_offset;
1926 if (info->disk.raid_disk >= 0)
1927 pde = ddf->phys->entries + info->disk.raid_disk;
1928 if (pde &&
1929 !(be16_to_cpu(pde->state) & DDF_Failed))
1930 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1931 else
1932 info->disk.state = 1 << MD_DISK_FAULTY;
1933
1934 info->events = be32_to_cpu(ddf->active->seq);
1935 } else {
1936 info->disk.number = -1;
1937 info->disk.raid_disk = -1;
1938 // info->disk.raid_disk = find refnum in the table and use index;
1939 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1940 }
1941
1942 info->recovery_start = MaxSector;
1943 info->reshape_active = 0;
1944 info->recovery_blocked = 0;
1945 info->name[0] = 0;
1946
1947 info->array.major_version = -1;
1948 info->array.minor_version = -2;
1949 strcpy(info->text_version, "ddf");
1950 info->safe_mode_delay = 0;
1951
1952 uuid_from_super_ddf(st, info->uuid);
1953
1954 if (map) {
1955 int i;
1956 for (i = 0 ; i < map_disks; i++) {
1957 if (i < info->array.raid_disks &&
1958 !(be16_to_cpu(ddf->phys->entries[i].state)
1959 & DDF_Failed))
1960 map[i] = 1;
1961 else
1962 map[i] = 0;
1963 }
1964 }
1965 }
1966
1967 /* size of name must be at least 17 bytes! */
1968 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
1969 {
1970 int j;
1971 memcpy(name, ddf->virt->entries[i].name, 16);
1972 name[16] = 0;
1973 for(j = 0; j < 16; j++)
1974 if (name[j] == ' ')
1975 name[j] = 0;
1976 }
1977
1978 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
1979 {
1980 struct ddf_super *ddf = st->sb;
1981 struct vcl *vc = ddf->currentconf;
1982 int cd = ddf->currentdev;
1983 int n_prim;
1984 int j;
1985 struct dl *dl;
1986 int map_disks = info->array.raid_disks;
1987 __u32 *cptr;
1988 struct vd_config *conf;
1989
1990 memset(info, 0, sizeof(*info));
1991 if (layout_ddf2md(&vc->conf, &info->array) == -1)
1992 return;
1993 info->array.md_minor = -1;
1994 cptr = (__u32 *)(vc->conf.guid + 16);
1995 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1996 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
1997 info->array.chunk_size = 512 << vc->conf.chunk_shift;
1998 info->custom_array_size = 0;
1999
2000 conf = &vc->conf;
2001 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2002 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2003 int ibvd = cd / n_prim - 1;
2004 cd %= n_prim;
2005 conf = vc->other_bvds[ibvd];
2006 }
2007
2008 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2009 info->data_offset =
2010 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2011 if (vc->block_sizes)
2012 info->component_size = vc->block_sizes[cd];
2013 else
2014 info->component_size = be64_to_cpu(conf->blocks);
2015 }
2016
2017 for (dl = ddf->dlist; dl ; dl = dl->next)
2018 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2019 break;
2020
2021 info->disk.major = 0;
2022 info->disk.minor = 0;
2023 info->disk.state = 0;
2024 if (dl) {
2025 info->disk.major = dl->major;
2026 info->disk.minor = dl->minor;
2027 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2028 * be16_to_cpu(conf->prim_elmnt_count);
2029 info->disk.number = dl->pdnum;
2030 info->disk.state = 0;
2031 if (info->disk.number >= 0 &&
2032 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2033 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2034 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2035 info->events = be32_to_cpu(ddf->active->seq);
2036 }
2037
2038 info->container_member = ddf->currentconf->vcnum;
2039
2040 info->recovery_start = MaxSector;
2041 info->resync_start = 0;
2042 info->reshape_active = 0;
2043 info->recovery_blocked = 0;
2044 if (!(ddf->virt->entries[info->container_member].state
2045 & DDF_state_inconsistent) &&
2046 (ddf->virt->entries[info->container_member].init_state
2047 & DDF_initstate_mask)
2048 == DDF_init_full)
2049 info->resync_start = MaxSector;
2050
2051 uuid_from_super_ddf(st, info->uuid);
2052
2053 info->array.major_version = -1;
2054 info->array.minor_version = -2;
2055 sprintf(info->text_version, "/%s/%d",
2056 st->container_devnm,
2057 info->container_member);
2058 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2059
2060 _ddf_array_name(info->name, ddf, info->container_member);
2061
2062 if (map)
2063 for (j = 0; j < map_disks; j++) {
2064 map[j] = 0;
2065 if (j < info->array.raid_disks) {
2066 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2067 if (i >= 0 &&
2068 (be16_to_cpu(ddf->phys->entries[i].state)
2069 & DDF_Online) &&
2070 !(be16_to_cpu(ddf->phys->entries[i].state)
2071 & DDF_Failed))
2072 map[i] = 1;
2073 }
2074 }
2075 }
2076
2077 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2078 char *update,
2079 char *devname, int verbose,
2080 int uuid_set, char *homehost)
2081 {
2082 /* For 'assemble' and 'force' we need to return non-zero if any
2083 * change was made. For others, the return value is ignored.
2084 * Update options are:
2085 * force-one : This device looks a bit old but needs to be included,
2086 * update age info appropriately.
2087 * assemble: clear any 'faulty' flag to allow this device to
2088 * be assembled.
2089 * force-array: Array is degraded but being forced, mark it clean
2090 * if that will be needed to assemble it.
2091 *
2092 * newdev: not used ????
2093 * grow: Array has gained a new device - this is currently for
2094 * linear only
2095 * resync: mark as dirty so a resync will happen.
2096 * uuid: Change the uuid of the array to match what is given
2097 * homehost: update the recorded homehost
2098 * name: update the name - preserving the homehost
2099 * _reshape_progress: record new reshape_progress position.
2100 *
2101 * Following are not relevant for this version:
2102 * sparc2.2 : update from old dodgey metadata
2103 * super-minor: change the preferred_minor number
2104 * summaries: update redundant counters.
2105 */
2106 int rv = 0;
2107 // struct ddf_super *ddf = st->sb;
2108 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2109 // struct virtual_entry *ve = find_ve(ddf);
2110
2111 /* we don't need to handle "force-*" or "assemble" as
2112 * there is no need to 'trick' the kernel. We the metadata is
2113 * first updated to activate the array, all the implied modifications
2114 * will just happen.
2115 */
2116
2117 if (strcmp(update, "grow") == 0) {
2118 /* FIXME */
2119 } else if (strcmp(update, "resync") == 0) {
2120 // info->resync_checkpoint = 0;
2121 } else if (strcmp(update, "homehost") == 0) {
2122 /* homehost is stored in controller->vendor_data,
2123 * or it is when we are the vendor
2124 */
2125 // if (info->vendor_is_local)
2126 // strcpy(ddf->controller.vendor_data, homehost);
2127 rv = -1;
2128 } else if (strcmp(update, "name") == 0) {
2129 /* name is stored in virtual_entry->name */
2130 // memset(ve->name, ' ', 16);
2131 // strncpy(ve->name, info->name, 16);
2132 rv = -1;
2133 } else if (strcmp(update, "_reshape_progress") == 0) {
2134 /* We don't support reshape yet */
2135 } else if (strcmp(update, "assemble") == 0 ) {
2136 /* Do nothing, just succeed */
2137 rv = 0;
2138 } else
2139 rv = -1;
2140
2141 // update_all_csum(ddf);
2142
2143 return rv;
2144 }
2145
2146 static void make_header_guid(char *guid)
2147 {
2148 be32 stamp;
2149 /* Create a DDF Header of Virtual Disk GUID */
2150
2151 /* 24 bytes of fiction required.
2152 * first 8 are a 'vendor-id' - "Linux-MD"
2153 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2154 * Remaining 8 random number plus timestamp
2155 */
2156 memcpy(guid, T10, sizeof(T10));
2157 stamp = cpu_to_be32(0xdeadbeef);
2158 memcpy(guid+8, &stamp, 4);
2159 stamp = cpu_to_be32(0);
2160 memcpy(guid+12, &stamp, 4);
2161 stamp = cpu_to_be32(time(0) - DECADE);
2162 memcpy(guid+16, &stamp, 4);
2163 stamp._v32 = random32();
2164 memcpy(guid+20, &stamp, 4);
2165 }
2166
2167 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2168 {
2169 unsigned int i;
2170 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2171 if (all_ff(ddf->virt->entries[i].guid))
2172 return i;
2173 }
2174 return DDF_NOTFOUND;
2175 }
2176
2177 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2178 const char *name)
2179 {
2180 unsigned int i;
2181 if (name == NULL)
2182 return DDF_NOTFOUND;
2183 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2184 if (all_ff(ddf->virt->entries[i].guid))
2185 continue;
2186 if (!strncmp(name, ddf->virt->entries[i].name,
2187 sizeof(ddf->virt->entries[i].name)))
2188 return i;
2189 }
2190 return DDF_NOTFOUND;
2191 }
2192
2193 #ifndef MDASSEMBLE
2194 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2195 const char *guid)
2196 {
2197 unsigned int i;
2198 if (guid == NULL || all_ff(guid))
2199 return DDF_NOTFOUND;
2200 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2201 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2202 return i;
2203 return DDF_NOTFOUND;
2204 }
2205 #endif
2206
2207 static int init_super_ddf_bvd(struct supertype *st,
2208 mdu_array_info_t *info,
2209 unsigned long long size,
2210 char *name, char *homehost,
2211 int *uuid, unsigned long long data_offset);
2212
2213 static int init_super_ddf(struct supertype *st,
2214 mdu_array_info_t *info,
2215 unsigned long long size, char *name, char *homehost,
2216 int *uuid, unsigned long long data_offset)
2217 {
2218 /* This is primarily called by Create when creating a new array.
2219 * We will then get add_to_super called for each component, and then
2220 * write_init_super called to write it out to each device.
2221 * For DDF, Create can create on fresh devices or on a pre-existing
2222 * array.
2223 * To create on a pre-existing array a different method will be called.
2224 * This one is just for fresh drives.
2225 *
2226 * We need to create the entire 'ddf' structure which includes:
2227 * DDF headers - these are easy.
2228 * Controller data - a Sector describing this controller .. not that
2229 * this is a controller exactly.
2230 * Physical Disk Record - one entry per device, so
2231 * leave plenty of space.
2232 * Virtual Disk Records - again, just leave plenty of space.
2233 * This just lists VDs, doesn't give details
2234 * Config records - describes the VDs that use this disk
2235 * DiskData - describes 'this' device.
2236 * BadBlockManagement - empty
2237 * Diag Space - empty
2238 * Vendor Logs - Could we put bitmaps here?
2239 *
2240 */
2241 struct ddf_super *ddf;
2242 char hostname[17];
2243 int hostlen;
2244 int max_phys_disks, max_virt_disks;
2245 unsigned long long sector;
2246 int clen;
2247 int i;
2248 int pdsize, vdsize;
2249 struct phys_disk *pd;
2250 struct virtual_disk *vd;
2251
2252 if (data_offset != INVALID_SECTORS) {
2253 pr_err("data-offset not supported by DDF\n");
2254 return 0;
2255 }
2256
2257 if (st->sb)
2258 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2259 data_offset);
2260
2261 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2262 pr_err("%s could not allocate superblock\n", __func__);
2263 return 0;
2264 }
2265 memset(ddf, 0, sizeof(*ddf));
2266 ddf->dlist = NULL; /* no physical disks yet */
2267 ddf->conflist = NULL; /* No virtual disks yet */
2268 st->sb = ddf;
2269
2270 if (info == NULL) {
2271 /* zeroing superblock */
2272 return 0;
2273 }
2274
2275 /* At least 32MB *must* be reserved for the ddf. So let's just
2276 * start 32MB from the end, and put the primary header there.
2277 * Don't do secondary for now.
2278 * We don't know exactly where that will be yet as it could be
2279 * different on each device. To just set up the lengths.
2280 *
2281 */
2282
2283 ddf->anchor.magic = DDF_HEADER_MAGIC;
2284 make_header_guid(ddf->anchor.guid);
2285
2286 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2287 ddf->anchor.seq = cpu_to_be32(1);
2288 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2289 ddf->anchor.openflag = 0xFF;
2290 ddf->anchor.foreignflag = 0;
2291 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2292 ddf->anchor.pad0 = 0xff;
2293 memset(ddf->anchor.pad1, 0xff, 12);
2294 memset(ddf->anchor.header_ext, 0xff, 32);
2295 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2296 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2297 ddf->anchor.type = DDF_HEADER_ANCHOR;
2298 memset(ddf->anchor.pad2, 0xff, 3);
2299 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2300 /* Put this at bottom of 32M reserved.. */
2301 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2302 max_phys_disks = 1023; /* Should be enough */
2303 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2304 max_virt_disks = 255;
2305 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks); /* ?? */
2306 ddf->anchor.max_partitions = cpu_to_be16(64); /* ?? */
2307 ddf->max_part = 64;
2308 ddf->mppe = 256;
2309 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2310 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2311 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2312 memset(ddf->anchor.pad3, 0xff, 54);
2313 /* controller sections is one sector long immediately
2314 * after the ddf header */
2315 sector = 1;
2316 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2317 ddf->anchor.controller_section_length = cpu_to_be32(1);
2318 sector += 1;
2319
2320 /* phys is 8 sectors after that */
2321 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2322 sizeof(struct phys_disk_entry)*max_phys_disks,
2323 512);
2324 switch(pdsize/512) {
2325 case 2: case 8: case 32: case 128: case 512: break;
2326 default: abort();
2327 }
2328 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2329 ddf->anchor.phys_section_length =
2330 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2331 sector += pdsize/512;
2332
2333 /* virt is another 32 sectors */
2334 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2335 sizeof(struct virtual_entry) * max_virt_disks,
2336 512);
2337 switch(vdsize/512) {
2338 case 2: case 8: case 32: case 128: case 512: break;
2339 default: abort();
2340 }
2341 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2342 ddf->anchor.virt_section_length =
2343 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2344 sector += vdsize/512;
2345
2346 clen = ddf->conf_rec_len * (ddf->max_part+1);
2347 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2348 ddf->anchor.config_section_length = cpu_to_be32(clen);
2349 sector += clen;
2350
2351 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2352 ddf->anchor.data_section_length = cpu_to_be32(1);
2353 sector += 1;
2354
2355 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2356 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2357 ddf->anchor.diag_space_length = cpu_to_be32(0);
2358 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2359 ddf->anchor.vendor_length = cpu_to_be32(0);
2360 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2361
2362 memset(ddf->anchor.pad4, 0xff, 256);
2363
2364 memcpy(&ddf->primary, &ddf->anchor, 512);
2365 memcpy(&ddf->secondary, &ddf->anchor, 512);
2366
2367 ddf->primary.openflag = 1; /* I guess.. */
2368 ddf->primary.type = DDF_HEADER_PRIMARY;
2369
2370 ddf->secondary.openflag = 1; /* I guess.. */
2371 ddf->secondary.type = DDF_HEADER_SECONDARY;
2372
2373 ddf->active = &ddf->primary;
2374
2375 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2376
2377 /* 24 more bytes of fiction required.
2378 * first 8 are a 'vendor-id' - "Linux-MD"
2379 * Remaining 16 are serial number.... maybe a hostname would do?
2380 */
2381 memcpy(ddf->controller.guid, T10, sizeof(T10));
2382 gethostname(hostname, sizeof(hostname));
2383 hostname[sizeof(hostname) - 1] = 0;
2384 hostlen = strlen(hostname);
2385 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2386 for (i = strlen(T10) ; i+hostlen < 24; i++)
2387 ddf->controller.guid[i] = ' ';
2388
2389 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2390 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2391 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2392 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2393 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2394 memset(ddf->controller.pad, 0xff, 8);
2395 memset(ddf->controller.vendor_data, 0xff, 448);
2396 if (homehost && strlen(homehost) < 440)
2397 strcpy((char*)ddf->controller.vendor_data, homehost);
2398
2399 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2400 pr_err("%s could not allocate pd\n", __func__);
2401 return 0;
2402 }
2403 ddf->phys = pd;
2404 ddf->pdsize = pdsize;
2405
2406 memset(pd, 0xff, pdsize);
2407 memset(pd, 0, sizeof(*pd));
2408 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2409 pd->used_pdes = cpu_to_be16(0);
2410 pd->max_pdes = cpu_to_be16(max_phys_disks);
2411 memset(pd->pad, 0xff, 52);
2412 for (i = 0; i < max_phys_disks; i++)
2413 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2414
2415 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2416 pr_err("%s could not allocate vd\n", __func__);
2417 return 0;
2418 }
2419 ddf->virt = vd;
2420 ddf->vdsize = vdsize;
2421 memset(vd, 0, vdsize);
2422 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2423 vd->populated_vdes = cpu_to_be16(0);
2424 vd->max_vdes = cpu_to_be16(max_virt_disks);
2425 memset(vd->pad, 0xff, 52);
2426
2427 for (i=0; i<max_virt_disks; i++)
2428 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2429
2430 st->sb = ddf;
2431 ddf_set_updates_pending(ddf);
2432 return 1;
2433 }
2434
2435 static int chunk_to_shift(int chunksize)
2436 {
2437 return ffs(chunksize/512)-1;
2438 }
2439
2440 #ifndef MDASSEMBLE
2441 struct extent {
2442 unsigned long long start, size;
2443 };
2444 static int cmp_extent(const void *av, const void *bv)
2445 {
2446 const struct extent *a = av;
2447 const struct extent *b = bv;
2448 if (a->start < b->start)
2449 return -1;
2450 if (a->start > b->start)
2451 return 1;
2452 return 0;
2453 }
2454
2455 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2456 {
2457 /* find a list of used extents on the give physical device
2458 * (dnum) of the given ddf.
2459 * Return a malloced array of 'struct extent'
2460
2461 * FIXME ignore DDF_Legacy devices?
2462
2463 */
2464 struct extent *rv;
2465 int n = 0;
2466 unsigned int i;
2467 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2468
2469 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2470 return NULL;
2471
2472 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2473
2474 for (i = 0; i < ddf->max_part; i++) {
2475 const struct vd_config *bvd;
2476 unsigned int ibvd;
2477 struct vcl *v = dl->vlist[i];
2478 if (v == NULL ||
2479 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2480 &bvd, &ibvd) == DDF_NOTFOUND)
2481 continue;
2482 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2483 rv[n].size = be64_to_cpu(bvd->blocks);
2484 n++;
2485 }
2486 qsort(rv, n, sizeof(*rv), cmp_extent);
2487
2488 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2489 rv[n].size = 0;
2490 return rv;
2491 }
2492 #endif
2493
2494 static int init_super_ddf_bvd(struct supertype *st,
2495 mdu_array_info_t *info,
2496 unsigned long long size,
2497 char *name, char *homehost,
2498 int *uuid, unsigned long long data_offset)
2499 {
2500 /* We are creating a BVD inside a pre-existing container.
2501 * so st->sb is already set.
2502 * We need to create a new vd_config and a new virtual_entry
2503 */
2504 struct ddf_super *ddf = st->sb;
2505 unsigned int venum, i;
2506 struct virtual_entry *ve;
2507 struct vcl *vcl;
2508 struct vd_config *vc;
2509
2510 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2511 pr_err("This ddf already has an array called %s\n", name);
2512 return 0;
2513 }
2514 venum = find_unused_vde(ddf);
2515 if (venum == DDF_NOTFOUND) {
2516 pr_err("Cannot find spare slot for virtual disk\n");
2517 return 0;
2518 }
2519 ve = &ddf->virt->entries[venum];
2520
2521 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2522 * timestamp, random number
2523 */
2524 make_header_guid(ve->guid);
2525 ve->unit = cpu_to_be16(info->md_minor);
2526 ve->pad0 = 0xFFFF;
2527 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2528 DDF_GUID_LEN);
2529 ve->type = cpu_to_be16(0);
2530 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2531 if (info->state & 1) /* clean */
2532 ve->init_state = DDF_init_full;
2533 else
2534 ve->init_state = DDF_init_not;
2535
2536 memset(ve->pad1, 0xff, 14);
2537 memset(ve->name, ' ', 16);
2538 if (name)
2539 strncpy(ve->name, name, 16);
2540 ddf->virt->populated_vdes =
2541 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2542
2543 /* Now create a new vd_config */
2544 if (posix_memalign((void**)&vcl, 512,
2545 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2546 pr_err("%s could not allocate vd_config\n", __func__);
2547 return 0;
2548 }
2549 vcl->vcnum = venum;
2550 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2551 vc = &vcl->conf;
2552
2553 vc->magic = DDF_VD_CONF_MAGIC;
2554 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2555 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2556 vc->seqnum = cpu_to_be32(1);
2557 memset(vc->pad0, 0xff, 24);
2558 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2559 if (layout_md2ddf(info, vc) == -1 ||
2560 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2561 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2562 __func__, info->level, info->layout, info->raid_disks);
2563 free(vcl);
2564 return 0;
2565 }
2566 vc->sec_elmnt_seq = 0;
2567 if (alloc_other_bvds(ddf, vcl) != 0) {
2568 pr_err("%s could not allocate other bvds\n",
2569 __func__);
2570 free(vcl);
2571 return 0;
2572 }
2573 vc->blocks = cpu_to_be64(info->size * 2);
2574 vc->array_blocks = cpu_to_be64(
2575 calc_array_size(info->level, info->raid_disks, info->layout,
2576 info->chunk_size, info->size*2));
2577 memset(vc->pad1, 0xff, 8);
2578 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2579 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2580 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2581 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2582 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2583 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2584 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2585 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2586 memset(vc->cache_pol, 0, 8);
2587 vc->bg_rate = 0x80;
2588 memset(vc->pad2, 0xff, 3);
2589 memset(vc->pad3, 0xff, 52);
2590 memset(vc->pad4, 0xff, 192);
2591 memset(vc->v0, 0xff, 32);
2592 memset(vc->v1, 0xff, 32);
2593 memset(vc->v2, 0xff, 16);
2594 memset(vc->v3, 0xff, 16);
2595 memset(vc->vendor, 0xff, 32);
2596
2597 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2598 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2599
2600 for (i = 1; i < vc->sec_elmnt_count; i++) {
2601 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2602 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2603 }
2604
2605 vcl->next = ddf->conflist;
2606 ddf->conflist = vcl;
2607 ddf->currentconf = vcl;
2608 ddf_set_updates_pending(ddf);
2609 return 1;
2610 }
2611
2612
2613 #ifndef MDASSEMBLE
2614 static int get_svd_state(const struct ddf_super *, const struct vcl *);
2615
2616 static void add_to_super_ddf_bvd(struct supertype *st,
2617 mdu_disk_info_t *dk, int fd, char *devname)
2618 {
2619 /* fd and devname identify a device with-in the ddf container (st).
2620 * dk identifies a location in the new BVD.
2621 * We need to find suitable free space in that device and update
2622 * the phys_refnum and lba_offset for the newly created vd_config.
2623 * We might also want to update the type in the phys_disk
2624 * section.
2625 *
2626 * Alternately: fd == -1 and we have already chosen which device to
2627 * use and recorded in dlist->raid_disk;
2628 */
2629 struct dl *dl;
2630 struct ddf_super *ddf = st->sb;
2631 struct vd_config *vc;
2632 unsigned int i;
2633 unsigned long long blocks, pos, esize;
2634 struct extent *ex;
2635 unsigned int raid_disk = dk->raid_disk;
2636
2637 if (fd == -1) {
2638 for (dl = ddf->dlist; dl ; dl = dl->next)
2639 if (dl->raiddisk == dk->raid_disk)
2640 break;
2641 } else {
2642 for (dl = ddf->dlist; dl ; dl = dl->next)
2643 if (dl->major == dk->major &&
2644 dl->minor == dk->minor)
2645 break;
2646 }
2647 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2648 return;
2649
2650 vc = &ddf->currentconf->conf;
2651 if (vc->sec_elmnt_count > 1) {
2652 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2653 if (raid_disk >= n)
2654 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2655 raid_disk %= n;
2656 }
2657
2658 ex = get_extents(ddf, dl);
2659 if (!ex)
2660 return;
2661
2662 i = 0; pos = 0;
2663 blocks = be64_to_cpu(vc->blocks);
2664 if (ddf->currentconf->block_sizes)
2665 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2666
2667 do {
2668 esize = ex[i].start - pos;
2669 if (esize >= blocks)
2670 break;
2671 pos = ex[i].start + ex[i].size;
2672 i++;
2673 } while (ex[i-1].size);
2674
2675 free(ex);
2676 if (esize < blocks)
2677 return;
2678
2679 ddf->currentdev = dk->raid_disk;
2680 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2681 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2682
2683 for (i = 0; i < ddf->max_part ; i++)
2684 if (dl->vlist[i] == NULL)
2685 break;
2686 if (i == ddf->max_part)
2687 return;
2688 dl->vlist[i] = ddf->currentconf;
2689
2690 if (fd >= 0)
2691 dl->fd = fd;
2692 if (devname)
2693 dl->devname = devname;
2694
2695 /* Check if we can mark array as optimal yet */
2696 i = ddf->currentconf->vcnum;
2697 ddf->virt->entries[i].state =
2698 (ddf->virt->entries[i].state & ~DDF_state_mask)
2699 | get_svd_state(ddf, ddf->currentconf);
2700 be16_clear(ddf->phys->entries[dl->pdnum].type,
2701 cpu_to_be16(DDF_Global_Spare));
2702 be16_set(ddf->phys->entries[dl->pdnum].type,
2703 cpu_to_be16(DDF_Active_in_VD));
2704 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2705 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2706 ddf->currentconf->vcnum, guid_str(vc->guid),
2707 dk->raid_disk);
2708 ddf_set_updates_pending(ddf);
2709 }
2710
2711 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2712 {
2713 unsigned int i;
2714 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2715 if (all_ff(ddf->phys->entries[i].guid))
2716 return i;
2717 }
2718 return DDF_NOTFOUND;
2719 }
2720
2721 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2722 {
2723 __u64 cfs, t;
2724 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2725 t = be64_to_cpu(dl->secondary_lba);
2726 if (t != ~(__u64)0)
2727 cfs = min(cfs, t);
2728 /*
2729 * Some vendor DDF structures interpret workspace_lba
2730 * very differently then us. Make a sanity check on the value.
2731 */
2732 t = be64_to_cpu(dl->workspace_lba);
2733 if (t < cfs) {
2734 __u64 wsp = cfs - t;
2735 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2736 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2737 __func__, dl->major, dl->minor, wsp);
2738 } else
2739 cfs = t;
2740 }
2741 pde->config_size = cpu_to_be64(cfs);
2742 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2743 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2744 }
2745
2746 /* add a device to a container, either while creating it or while
2747 * expanding a pre-existing container
2748 */
2749 static int add_to_super_ddf(struct supertype *st,
2750 mdu_disk_info_t *dk, int fd, char *devname,
2751 unsigned long long data_offset)
2752 {
2753 struct ddf_super *ddf = st->sb;
2754 struct dl *dd;
2755 time_t now;
2756 struct tm *tm;
2757 unsigned long long size;
2758 struct phys_disk_entry *pde;
2759 unsigned int n, i;
2760 struct stat stb;
2761 __u32 *tptr;
2762
2763 if (ddf->currentconf) {
2764 add_to_super_ddf_bvd(st, dk, fd, devname);
2765 return 0;
2766 }
2767
2768 /* This is device numbered dk->number. We need to create
2769 * a phys_disk entry and a more detailed disk_data entry.
2770 */
2771 fstat(fd, &stb);
2772 n = find_unused_pde(ddf);
2773 if (n == DDF_NOTFOUND) {
2774 pr_err("%s: No free slot in array, cannot add disk\n",
2775 __func__);
2776 return 1;
2777 }
2778 pde = &ddf->phys->entries[n];
2779 get_dev_size(fd, NULL, &size);
2780 if (size <= 32*1024*1024) {
2781 pr_err("%s: device size must be at least 32MB\n",
2782 __func__);
2783 return 1;
2784 }
2785 size >>= 9;
2786
2787 if (posix_memalign((void**)&dd, 512,
2788 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2789 pr_err("%s could allocate buffer for new disk, aborting\n",
2790 __func__);
2791 return 1;
2792 }
2793 dd->major = major(stb.st_rdev);
2794 dd->minor = minor(stb.st_rdev);
2795 dd->devname = devname;
2796 dd->fd = fd;
2797 dd->spare = NULL;
2798
2799 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2800 now = time(0);
2801 tm = localtime(&now);
2802 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2803 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2804 tptr = (__u32 *)(dd->disk.guid + 16);
2805 *tptr++ = random32();
2806 *tptr = random32();
2807
2808 do {
2809 /* Cannot be bothered finding a CRC of some irrelevant details*/
2810 dd->disk.refnum._v32 = random32();
2811 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2812 i > 0; i--)
2813 if (be32_eq(ddf->phys->entries[i-1].refnum,
2814 dd->disk.refnum))
2815 break;
2816 } while (i > 0);
2817
2818 dd->disk.forced_ref = 1;
2819 dd->disk.forced_guid = 1;
2820 memset(dd->disk.vendor, ' ', 32);
2821 memcpy(dd->disk.vendor, "Linux", 5);
2822 memset(dd->disk.pad, 0xff, 442);
2823 for (i = 0; i < ddf->max_part ; i++)
2824 dd->vlist[i] = NULL;
2825
2826 dd->pdnum = n;
2827
2828 if (st->update_tail) {
2829 int len = (sizeof(struct phys_disk) +
2830 sizeof(struct phys_disk_entry));
2831 struct phys_disk *pd;
2832
2833 pd = xmalloc(len);
2834 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2835 pd->used_pdes = cpu_to_be16(n);
2836 pde = &pd->entries[0];
2837 dd->mdupdate = pd;
2838 } else
2839 ddf->phys->used_pdes = cpu_to_be16(
2840 1 + be16_to_cpu(ddf->phys->used_pdes));
2841
2842 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2843 pde->refnum = dd->disk.refnum;
2844 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2845 pde->state = cpu_to_be16(DDF_Online);
2846 dd->size = size;
2847 /*
2848 * If there is already a device in dlist, try to reserve the same
2849 * amount of workspace. Otherwise, use 32MB.
2850 * We checked disk size above already.
2851 */
2852 #define __calc_lba(new, old, lba, mb) do { \
2853 unsigned long long dif; \
2854 if ((old) != NULL) \
2855 dif = (old)->size - be64_to_cpu((old)->lba); \
2856 else \
2857 dif = (new)->size; \
2858 if ((new)->size > dif) \
2859 (new)->lba = cpu_to_be64((new)->size - dif); \
2860 else \
2861 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2862 } while (0)
2863 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2864 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2865 if (ddf->dlist == NULL ||
2866 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2867 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2868 _set_config_size(pde, dd);
2869
2870 sprintf(pde->path, "%17.17s","Information: nil") ;
2871 memset(pde->pad, 0xff, 6);
2872
2873 if (st->update_tail) {
2874 dd->next = ddf->add_list;
2875 ddf->add_list = dd;
2876 } else {
2877 dd->next = ddf->dlist;
2878 ddf->dlist = dd;
2879 ddf_set_updates_pending(ddf);
2880 }
2881
2882 return 0;
2883 }
2884
2885 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2886 {
2887 struct ddf_super *ddf = st->sb;
2888 struct dl *dl;
2889
2890 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2891 * disappeared from the container.
2892 * We need to arrange that it disappears from the metadata and
2893 * internal data structures too.
2894 * Most of the work is done by ddf_process_update which edits
2895 * the metadata and closes the file handle and attaches the memory
2896 * where free_updates will free it.
2897 */
2898 for (dl = ddf->dlist; dl ; dl = dl->next)
2899 if (dl->major == dk->major &&
2900 dl->minor == dk->minor)
2901 break;
2902 if (!dl)
2903 return -1;
2904
2905 if (st->update_tail) {
2906 int len = (sizeof(struct phys_disk) +
2907 sizeof(struct phys_disk_entry));
2908 struct phys_disk *pd;
2909
2910 pd = xmalloc(len);
2911 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2912 pd->used_pdes = cpu_to_be16(dl->pdnum);
2913 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2914 append_metadata_update(st, pd, len);
2915 }
2916 return 0;
2917 }
2918 #endif
2919
2920 /*
2921 * This is the write_init_super method for a ddf container. It is
2922 * called when creating a container or adding another device to a
2923 * container.
2924 */
2925 #define NULL_CONF_SZ 4096
2926
2927 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2928 {
2929 unsigned long long sector;
2930 struct ddf_header *header;
2931 int fd, i, n_config, conf_size, buf_size;
2932 int ret = 0;
2933 char *conf;
2934
2935 fd = d->fd;
2936
2937 switch (type) {
2938 case DDF_HEADER_PRIMARY:
2939 header = &ddf->primary;
2940 sector = be64_to_cpu(header->primary_lba);
2941 break;
2942 case DDF_HEADER_SECONDARY:
2943 header = &ddf->secondary;
2944 sector = be64_to_cpu(header->secondary_lba);
2945 break;
2946 default:
2947 return 0;
2948 }
2949 if (sector == ~(__u64)0)
2950 return 0;
2951
2952 header->type = type;
2953 header->openflag = 1;
2954 header->crc = calc_crc(header, 512);
2955
2956 lseek64(fd, sector<<9, 0);
2957 if (write(fd, header, 512) < 0)
2958 goto out;
2959
2960 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2961 if (write(fd, &ddf->controller, 512) < 0)
2962 goto out;
2963
2964 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2965 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2966 goto out;
2967 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2968 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2969 goto out;
2970
2971 /* Now write lots of config records. */
2972 n_config = ddf->max_part;
2973 conf_size = ddf->conf_rec_len * 512;
2974 conf = ddf->conf;
2975 buf_size = conf_size * (n_config + 1);
2976 if (!conf) {
2977 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2978 goto out;
2979 ddf->conf = conf;
2980 }
2981 for (i = 0 ; i <= n_config ; i++) {
2982 struct vcl *c;
2983 struct vd_config *vdc = NULL;
2984 if (i == n_config) {
2985 c = (struct vcl *)d->spare;
2986 if (c)
2987 vdc = &c->conf;
2988 } else {
2989 unsigned int dummy;
2990 c = d->vlist[i];
2991 if (c)
2992 get_pd_index_from_refnum(
2993 c, d->disk.refnum,
2994 ddf->mppe,
2995 (const struct vd_config **)&vdc,
2996 &dummy);
2997 }
2998 if (c) {
2999 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3000 i, be32_to_cpu(d->disk.refnum),
3001 guid_str(vdc->guid),
3002 vdc->sec_elmnt_seq);
3003 vdc->seqnum = header->seq;
3004 vdc->crc = calc_crc(vdc, conf_size);
3005 memcpy(conf + i*conf_size, vdc, conf_size);
3006 } else
3007 memset(conf + i*conf_size, 0xff, conf_size);
3008 }
3009 if (write(fd, conf, buf_size) != buf_size)
3010 goto out;
3011
3012 d->disk.crc = calc_crc(&d->disk, 512);
3013 if (write(fd, &d->disk, 512) < 0)
3014 goto out;
3015
3016 ret = 1;
3017 out:
3018 header->openflag = 0;
3019 header->crc = calc_crc(header, 512);
3020
3021 lseek64(fd, sector<<9, 0);
3022 if (write(fd, header, 512) < 0)
3023 ret = 0;
3024
3025 return ret;
3026 }
3027
3028 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3029 {
3030 unsigned long long size;
3031 int fd = d->fd;
3032 if (fd < 0)
3033 return 0;
3034
3035 /* We need to fill in the primary, (secondary) and workspace
3036 * lba's in the headers, set their checksums,
3037 * Also checksum phys, virt....
3038 *
3039 * Then write everything out, finally the anchor is written.
3040 */
3041 get_dev_size(fd, NULL, &size);
3042 size /= 512;
3043 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3044 ddf->anchor.workspace_lba = d->workspace_lba;
3045 else
3046 ddf->anchor.workspace_lba =
3047 cpu_to_be64(size - 32*1024*2);
3048 if (be64_to_cpu(d->primary_lba) != 0ULL)
3049 ddf->anchor.primary_lba = d->primary_lba;
3050 else
3051 ddf->anchor.primary_lba =
3052 cpu_to_be64(size - 16*1024*2);
3053 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3054 ddf->anchor.secondary_lba = d->secondary_lba;
3055 else
3056 ddf->anchor.secondary_lba =
3057 cpu_to_be64(size - 32*1024*2);
3058 ddf->anchor.seq = ddf->active->seq;
3059 memcpy(&ddf->primary, &ddf->anchor, 512);
3060 memcpy(&ddf->secondary, &ddf->anchor, 512);
3061
3062 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3063 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3064 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3065
3066 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3067 return 0;
3068
3069 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3070 return 0;
3071
3072 lseek64(fd, (size-1)*512, SEEK_SET);
3073 if (write(fd, &ddf->anchor, 512) < 0)
3074 return 0;
3075
3076 return 1;
3077 }
3078
3079 #ifndef MDASSEMBLE
3080 static int __write_init_super_ddf(struct supertype *st)
3081 {
3082 struct ddf_super *ddf = st->sb;
3083 struct dl *d;
3084 int attempts = 0;
3085 int successes = 0;
3086
3087 pr_state(ddf, __func__);
3088
3089 /* try to write updated metadata,
3090 * if we catch a failure move on to the next disk
3091 */
3092 for (d = ddf->dlist; d; d=d->next) {
3093 attempts++;
3094 successes += _write_super_to_disk(ddf, d);
3095 }
3096
3097 return attempts != successes;
3098 }
3099
3100 static int write_init_super_ddf(struct supertype *st)
3101 {
3102 struct ddf_super *ddf = st->sb;
3103 struct vcl *currentconf = ddf->currentconf;
3104
3105 /* we are done with currentconf reset it to point st at the container */
3106 ddf->currentconf = NULL;
3107
3108 if (st->update_tail) {
3109 /* queue the virtual_disk and vd_config as metadata updates */
3110 struct virtual_disk *vd;
3111 struct vd_config *vc;
3112 int len, tlen;
3113 unsigned int i;
3114
3115 if (!currentconf) {
3116 int len = (sizeof(struct phys_disk) +
3117 sizeof(struct phys_disk_entry));
3118
3119 /* adding a disk to the container. */
3120 if (!ddf->add_list)
3121 return 0;
3122
3123 append_metadata_update(st, ddf->add_list->mdupdate, len);
3124 ddf->add_list->mdupdate = NULL;
3125 return 0;
3126 }
3127
3128 /* Newly created VD */
3129
3130 /* First the virtual disk. We have a slightly fake header */
3131 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3132 vd = xmalloc(len);
3133 *vd = *ddf->virt;
3134 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3135 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3136 append_metadata_update(st, vd, len);
3137
3138 /* Then the vd_config */
3139 len = ddf->conf_rec_len * 512;
3140 tlen = len * currentconf->conf.sec_elmnt_count;
3141 vc = xmalloc(tlen);
3142 memcpy(vc, &currentconf->conf, len);
3143 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3144 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3145 len);
3146 append_metadata_update(st, vc, tlen);
3147
3148 /* FIXME I need to close the fds! */
3149 return 0;
3150 } else {
3151 struct dl *d;
3152 if (!currentconf)
3153 for (d = ddf->dlist; d; d=d->next)
3154 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3155 return __write_init_super_ddf(st);
3156 }
3157 }
3158
3159 #endif
3160
3161 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3162 unsigned long long data_offset)
3163 {
3164 /* We must reserve the last 32Meg */
3165 if (devsize <= 32*1024*2)
3166 return 0;
3167 return devsize - 32*1024*2;
3168 }
3169
3170 #ifndef MDASSEMBLE
3171
3172 static int reserve_space(struct supertype *st, int raiddisks,
3173 unsigned long long size, int chunk,
3174 unsigned long long *freesize)
3175 {
3176 /* Find 'raiddisks' spare extents at least 'size' big (but
3177 * only caring about multiples of 'chunk') and remember
3178 * them.
3179 * If the cannot be found, fail.
3180 */
3181 struct dl *dl;
3182 struct ddf_super *ddf = st->sb;
3183 int cnt = 0;
3184
3185 for (dl = ddf->dlist; dl ; dl=dl->next) {
3186 dl->raiddisk = -1;
3187 dl->esize = 0;
3188 }
3189 /* Now find largest extent on each device */
3190 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3191 struct extent *e = get_extents(ddf, dl);
3192 unsigned long long pos = 0;
3193 int i = 0;
3194 int found = 0;
3195 unsigned long long minsize = size;
3196
3197 if (size == 0)
3198 minsize = chunk;
3199
3200 if (!e)
3201 continue;
3202 do {
3203 unsigned long long esize;
3204 esize = e[i].start - pos;
3205 if (esize >= minsize) {
3206 found = 1;
3207 minsize = esize;
3208 }
3209 pos = e[i].start + e[i].size;
3210 i++;
3211 } while (e[i-1].size);
3212 if (found) {
3213 cnt++;
3214 dl->esize = minsize;
3215 }
3216 free(e);
3217 }
3218 if (cnt < raiddisks) {
3219 pr_err("not enough devices with space to create array.\n");
3220 return 0; /* No enough free spaces large enough */
3221 }
3222 if (size == 0) {
3223 /* choose the largest size of which there are at least 'raiddisk' */
3224 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3225 struct dl *dl2;
3226 if (dl->esize <= size)
3227 continue;
3228 /* This is bigger than 'size', see if there are enough */
3229 cnt = 0;
3230 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3231 if (dl2->esize >= dl->esize)
3232 cnt++;
3233 if (cnt >= raiddisks)
3234 size = dl->esize;
3235 }
3236 if (chunk) {
3237 size = size / chunk;
3238 size *= chunk;
3239 }
3240 *freesize = size;
3241 if (size < 32) {
3242 pr_err("not enough spare devices to create array.\n");
3243 return 0;
3244 }
3245 }
3246 /* We have a 'size' of which there are enough spaces.
3247 * We simply do a first-fit */
3248 cnt = 0;
3249 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3250 if (dl->esize < size)
3251 continue;
3252
3253 dl->raiddisk = cnt;
3254 cnt++;
3255 }
3256 return 1;
3257 }
3258
3259 static int
3260 validate_geometry_ddf_container(struct supertype *st,
3261 int level, int layout, int raiddisks,
3262 int chunk, unsigned long long size,
3263 unsigned long long data_offset,
3264 char *dev, unsigned long long *freesize,
3265 int verbose);
3266
3267 static int validate_geometry_ddf_bvd(struct supertype *st,
3268 int level, int layout, int raiddisks,
3269 int *chunk, unsigned long long size,
3270 unsigned long long data_offset,
3271 char *dev, unsigned long long *freesize,
3272 int verbose);
3273
3274 static int validate_geometry_ddf(struct supertype *st,
3275 int level, int layout, int raiddisks,
3276 int *chunk, unsigned long long size,
3277 unsigned long long data_offset,
3278 char *dev, unsigned long long *freesize,
3279 int verbose)
3280 {
3281 int fd;
3282 struct mdinfo *sra;
3283 int cfd;
3284
3285 /* ddf potentially supports lots of things, but it depends on
3286 * what devices are offered (and maybe kernel version?)
3287 * If given unused devices, we will make a container.
3288 * If given devices in a container, we will make a BVD.
3289 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3290 */
3291
3292 if (*chunk == UnSet)
3293 *chunk = DEFAULT_CHUNK;
3294
3295 if (level == -1000000) level = LEVEL_CONTAINER;
3296 if (level == LEVEL_CONTAINER) {
3297 /* Must be a fresh device to add to a container */
3298 return validate_geometry_ddf_container(st, level, layout,
3299 raiddisks, *chunk,
3300 size, data_offset, dev,
3301 freesize,
3302 verbose);
3303 }
3304
3305 if (!dev) {
3306 mdu_array_info_t array = {
3307 .level = level, .layout = layout,
3308 .raid_disks = raiddisks
3309 };
3310 struct vd_config conf;
3311 if (layout_md2ddf(&array, &conf) == -1) {
3312 if (verbose)
3313 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3314 level, layout, raiddisks);
3315 return 0;
3316 }
3317 /* Should check layout? etc */
3318
3319 if (st->sb && freesize) {
3320 /* --create was given a container to create in.
3321 * So we need to check that there are enough
3322 * free spaces and return the amount of space.
3323 * We may as well remember which drives were
3324 * chosen so that add_to_super/getinfo_super
3325 * can return them.
3326 */
3327 return reserve_space(st, raiddisks, size, *chunk, freesize);
3328 }
3329 return 1;
3330 }
3331
3332 if (st->sb) {
3333 /* A container has already been opened, so we are
3334 * creating in there. Maybe a BVD, maybe an SVD.
3335 * Should make a distinction one day.
3336 */
3337 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3338 chunk, size, data_offset, dev,
3339 freesize,
3340 verbose);
3341 }
3342 /* This is the first device for the array.
3343 * If it is a container, we read it in and do automagic allocations,
3344 * no other devices should be given.
3345 * Otherwise it must be a member device of a container, and we
3346 * do manual allocation.
3347 * Later we should check for a BVD and make an SVD.
3348 */
3349 fd = open(dev, O_RDONLY|O_EXCL, 0);
3350 if (fd >= 0) {
3351 sra = sysfs_read(fd, NULL, GET_VERSION);
3352 close(fd);
3353 if (sra && sra->array.major_version == -1 &&
3354 strcmp(sra->text_version, "ddf") == 0) {
3355
3356 /* load super */
3357 /* find space for 'n' devices. */
3358 /* remember the devices */
3359 /* Somehow return the fact that we have enough */
3360 }
3361
3362 if (verbose)
3363 pr_err("ddf: Cannot create this array "
3364 "on device %s - a container is required.\n",
3365 dev);
3366 return 0;
3367 }
3368 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3369 if (verbose)
3370 pr_err("ddf: Cannot open %s: %s\n",
3371 dev, strerror(errno));
3372 return 0;
3373 }
3374 /* Well, it is in use by someone, maybe a 'ddf' container. */
3375 cfd = open_container(fd);
3376 if (cfd < 0) {
3377 close(fd);
3378 if (verbose)
3379 pr_err("ddf: Cannot use %s: %s\n",
3380 dev, strerror(EBUSY));
3381 return 0;
3382 }
3383 sra = sysfs_read(cfd, NULL, GET_VERSION);
3384 close(fd);
3385 if (sra && sra->array.major_version == -1 &&
3386 strcmp(sra->text_version, "ddf") == 0) {
3387 /* This is a member of a ddf container. Load the container
3388 * and try to create a bvd
3389 */
3390 struct ddf_super *ddf;
3391 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3392 st->sb = ddf;
3393 strcpy(st->container_devnm, fd2devnm(cfd));
3394 close(cfd);
3395 return validate_geometry_ddf_bvd(st, level, layout,
3396 raiddisks, chunk, size,
3397 data_offset,
3398 dev, freesize,
3399 verbose);
3400 }
3401 close(cfd);
3402 } else /* device may belong to a different container */
3403 return 0;
3404
3405 return 1;
3406 }
3407
3408 static int
3409 validate_geometry_ddf_container(struct supertype *st,
3410 int level, int layout, int raiddisks,
3411 int chunk, unsigned long long size,
3412 unsigned long long data_offset,
3413 char *dev, unsigned long long *freesize,
3414 int verbose)
3415 {
3416 int fd;
3417 unsigned long long ldsize;
3418
3419 if (level != LEVEL_CONTAINER)
3420 return 0;
3421 if (!dev)
3422 return 1;
3423
3424 fd = open(dev, O_RDONLY|O_EXCL, 0);
3425 if (fd < 0) {
3426 if (verbose)
3427 pr_err("ddf: Cannot open %s: %s\n",
3428 dev, strerror(errno));
3429 return 0;
3430 }
3431 if (!get_dev_size(fd, dev, &ldsize)) {
3432 close(fd);
3433 return 0;
3434 }
3435 close(fd);
3436
3437 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3438 if (*freesize == 0)
3439 return 0;
3440
3441 return 1;
3442 }
3443
3444 static int validate_geometry_ddf_bvd(struct supertype *st,
3445 int level, int layout, int raiddisks,
3446 int *chunk, unsigned long long size,
3447 unsigned long long data_offset,
3448 char *dev, unsigned long long *freesize,
3449 int verbose)
3450 {
3451 struct stat stb;
3452 struct ddf_super *ddf = st->sb;
3453 struct dl *dl;
3454 unsigned long long pos = 0;
3455 unsigned long long maxsize;
3456 struct extent *e;
3457 int i;
3458 /* ddf/bvd supports lots of things, but not containers */
3459 if (level == LEVEL_CONTAINER) {
3460 if (verbose)
3461 pr_err("DDF cannot create a container within an container\n");
3462 return 0;
3463 }
3464 /* We must have the container info already read in. */
3465 if (!ddf)
3466 return 0;
3467
3468 if (!dev) {
3469 /* General test: make sure there is space for
3470 * 'raiddisks' device extents of size 'size'.
3471 */
3472 unsigned long long minsize = size;
3473 int dcnt = 0;
3474 if (minsize == 0)
3475 minsize = 8;
3476 for (dl = ddf->dlist; dl ; dl = dl->next)
3477 {
3478 int found = 0;
3479 pos = 0;
3480
3481 i = 0;
3482 e = get_extents(ddf, dl);
3483 if (!e) continue;
3484 do {
3485 unsigned long long esize;
3486 esize = e[i].start - pos;
3487 if (esize >= minsize)
3488 found = 1;
3489 pos = e[i].start + e[i].size;
3490 i++;
3491 } while (e[i-1].size);
3492 if (found)
3493 dcnt++;
3494 free(e);
3495 }
3496 if (dcnt < raiddisks) {
3497 if (verbose)
3498 pr_err("ddf: Not enough devices with "
3499 "space for this array (%d < %d)\n",
3500 dcnt, raiddisks);
3501 return 0;
3502 }
3503 return 1;
3504 }
3505 /* This device must be a member of the set */
3506 if (stat(dev, &stb) < 0)
3507 return 0;
3508 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3509 return 0;
3510 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3511 if (dl->major == (int)major(stb.st_rdev) &&
3512 dl->minor == (int)minor(stb.st_rdev))
3513 break;
3514 }
3515 if (!dl) {
3516 if (verbose)
3517 pr_err("ddf: %s is not in the "
3518 "same DDF set\n",
3519 dev);
3520 return 0;
3521 }
3522 e = get_extents(ddf, dl);
3523 maxsize = 0;
3524 i = 0;
3525 if (e) do {
3526 unsigned long long esize;
3527 esize = e[i].start - pos;
3528 if (esize >= maxsize)
3529 maxsize = esize;
3530 pos = e[i].start + e[i].size;
3531 i++;
3532 } while (e[i-1].size);
3533 *freesize = maxsize;
3534 // FIXME here I am
3535
3536 return 1;
3537 }
3538
3539 static int load_super_ddf_all(struct supertype *st, int fd,
3540 void **sbp, char *devname)
3541 {
3542 struct mdinfo *sra;
3543 struct ddf_super *super;
3544 struct mdinfo *sd, *best = NULL;
3545 int bestseq = 0;
3546 int seq;
3547 char nm[20];
3548 int dfd;
3549
3550 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3551 if (!sra)
3552 return 1;
3553 if (sra->array.major_version != -1 ||
3554 sra->array.minor_version != -2 ||
3555 strcmp(sra->text_version, "ddf") != 0)
3556 return 1;
3557
3558 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3559 return 1;
3560 memset(super, 0, sizeof(*super));
3561
3562 /* first, try each device, and choose the best ddf */
3563 for (sd = sra->devs ; sd ; sd = sd->next) {
3564 int rv;
3565 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3566 dfd = dev_open(nm, O_RDONLY);
3567 if (dfd < 0)
3568 return 2;
3569 rv = load_ddf_headers(dfd, super, NULL);
3570 close(dfd);
3571 if (rv == 0) {
3572 seq = be32_to_cpu(super->active->seq);
3573 if (super->active->openflag)
3574 seq--;
3575 if (!best || seq > bestseq) {
3576 bestseq = seq;
3577 best = sd;
3578 }
3579 }
3580 }
3581 if (!best)
3582 return 1;
3583 /* OK, load this ddf */
3584 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3585 dfd = dev_open(nm, O_RDONLY);
3586 if (dfd < 0)
3587 return 1;
3588 load_ddf_headers(dfd, super, NULL);
3589 load_ddf_global(dfd, super, NULL);
3590 close(dfd);
3591 /* Now we need the device-local bits */
3592 for (sd = sra->devs ; sd ; sd = sd->next) {
3593 int rv;
3594
3595 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3596 dfd = dev_open(nm, O_RDWR);
3597 if (dfd < 0)
3598 return 2;
3599 rv = load_ddf_headers(dfd, super, NULL);
3600 if (rv == 0)
3601 rv = load_ddf_local(dfd, super, NULL, 1);
3602 if (rv)
3603 return 1;
3604 }
3605
3606 *sbp = super;
3607 if (st->ss == NULL) {
3608 st->ss = &super_ddf;
3609 st->minor_version = 0;
3610 st->max_devs = 512;
3611 }
3612 strcpy(st->container_devnm, fd2devnm(fd));
3613 return 0;
3614 }
3615
3616 static int load_container_ddf(struct supertype *st, int fd,
3617 char *devname)
3618 {
3619 return load_super_ddf_all(st, fd, &st->sb, devname);
3620 }
3621
3622 #endif /* MDASSEMBLE */
3623
3624 static int check_secondary(const struct vcl *vc)
3625 {
3626 const struct vd_config *conf = &vc->conf;
3627 int i;
3628
3629 /* The only DDF secondary RAID level md can support is
3630 * RAID 10, if the stripe sizes and Basic volume sizes
3631 * are all equal.
3632 * Other configurations could in theory be supported by exposing
3633 * the BVDs to user space and using device mapper for the secondary
3634 * mapping. So far we don't support that.
3635 */
3636
3637 __u64 sec_elements[4] = {0, 0, 0, 0};
3638 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3639 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3640
3641 if (vc->other_bvds == NULL) {
3642 pr_err("No BVDs for secondary RAID found\n");
3643 return -1;
3644 }
3645 if (conf->prl != DDF_RAID1) {
3646 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3647 return -1;
3648 }
3649 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3650 pr_err("Secondary RAID level %d is unsupported\n",
3651 conf->srl);
3652 return -1;
3653 }
3654 __set_sec_seen(conf->sec_elmnt_seq);
3655 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3656 const struct vd_config *bvd = vc->other_bvds[i];
3657 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3658 continue;
3659 if (bvd->srl != conf->srl) {
3660 pr_err("Inconsistent secondary RAID level across BVDs\n");
3661 return -1;
3662 }
3663 if (bvd->prl != conf->prl) {
3664 pr_err("Different RAID levels for BVDs are unsupported\n");
3665 return -1;
3666 }
3667 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3668 pr_err("All BVDs must have the same number of primary elements\n");
3669 return -1;
3670 }
3671 if (bvd->chunk_shift != conf->chunk_shift) {
3672 pr_err("Different strip sizes for BVDs are unsupported\n");
3673 return -1;
3674 }
3675 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3676 pr_err("Different BVD sizes are unsupported\n");
3677 return -1;
3678 }
3679 __set_sec_seen(bvd->sec_elmnt_seq);
3680 }
3681 for (i = 0; i < conf->sec_elmnt_count; i++) {
3682 if (!__was_sec_seen(i)) {
3683 pr_err("BVD %d is missing\n", i);
3684 return -1;
3685 }
3686 }
3687 return 0;
3688 }
3689
3690 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3691 be32 refnum, unsigned int nmax,
3692 const struct vd_config **bvd,
3693 unsigned int *idx)
3694 {
3695 unsigned int i, j, n, sec, cnt;
3696
3697 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3698 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3699
3700 for (i = 0, j = 0 ; i < nmax ; i++) {
3701 /* j counts valid entries for this BVD */
3702 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3703 j++;
3704 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3705 *bvd = &vc->conf;
3706 *idx = i;
3707 return sec * cnt + j - 1;
3708 }
3709 }
3710 if (vc->other_bvds == NULL)
3711 goto bad;
3712
3713 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3714 struct vd_config *vd = vc->other_bvds[n-1];
3715 sec = vd->sec_elmnt_seq;
3716 if (sec == DDF_UNUSED_BVD)
3717 continue;
3718 for (i = 0, j = 0 ; i < nmax ; i++) {
3719 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3720 j++;
3721 if (be32_eq(vd->phys_refnum[i], refnum)) {
3722 *bvd = vd;
3723 *idx = i;
3724 return sec * cnt + j - 1;
3725 }
3726 }
3727 }
3728 bad:
3729 *bvd = NULL;
3730 return DDF_NOTFOUND;
3731 }
3732
3733 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3734 {
3735 /* Given a container loaded by load_super_ddf_all,
3736 * extract information about all the arrays into
3737 * an mdinfo tree.
3738 *
3739 * For each vcl in conflist: create an mdinfo, fill it in,
3740 * then look for matching devices (phys_refnum) in dlist
3741 * and create appropriate device mdinfo.
3742 */
3743 struct ddf_super *ddf = st->sb;
3744 struct mdinfo *rest = NULL;
3745 struct vcl *vc;
3746
3747 for (vc = ddf->conflist ; vc ; vc=vc->next)
3748 {
3749 unsigned int i;
3750 struct mdinfo *this;
3751 char *ep;
3752 __u32 *cptr;
3753 unsigned int pd;
3754
3755 if (subarray &&
3756 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3757 *ep != '\0'))
3758 continue;
3759
3760 if (vc->conf.sec_elmnt_count > 1) {
3761 if (check_secondary(vc) != 0)
3762 continue;
3763 }
3764
3765 this = xcalloc(1, sizeof(*this));
3766 this->next = rest;
3767 rest = this;
3768
3769 if (layout_ddf2md(&vc->conf, &this->array))
3770 continue;
3771 this->array.md_minor = -1;
3772 this->array.major_version = -1;
3773 this->array.minor_version = -2;
3774 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3775 cptr = (__u32 *)(vc->conf.guid + 16);
3776 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3777 this->array.utime = DECADE +
3778 be32_to_cpu(vc->conf.timestamp);
3779 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3780
3781 i = vc->vcnum;
3782 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3783 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3784 DDF_init_full) {
3785 this->array.state = 0;
3786 this->resync_start = 0;
3787 } else {
3788 this->array.state = 1;
3789 this->resync_start = MaxSector;
3790 }
3791 _ddf_array_name(this->name, ddf, i);
3792 memset(this->uuid, 0, sizeof(this->uuid));
3793 this->component_size = be64_to_cpu(vc->conf.blocks);
3794 this->array.size = this->component_size / 2;
3795 this->container_member = i;
3796
3797 ddf->currentconf = vc;
3798 uuid_from_super_ddf(st, this->uuid);
3799 if (!subarray)
3800 ddf->currentconf = NULL;
3801
3802 sprintf(this->text_version, "/%s/%d",
3803 st->container_devnm, this->container_member);
3804
3805 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3806 struct mdinfo *dev;
3807 struct dl *d;
3808 const struct vd_config *bvd;
3809 unsigned int iphys;
3810 int stt;
3811
3812 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3813 == 0xFFFFFFFF)
3814 continue;
3815
3816 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3817 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3818 != DDF_Online)
3819 continue;
3820
3821 i = get_pd_index_from_refnum(
3822 vc, ddf->phys->entries[pd].refnum,
3823 ddf->mppe, &bvd, &iphys);
3824 if (i == DDF_NOTFOUND)
3825 continue;
3826
3827 this->array.working_disks++;
3828
3829 for (d = ddf->dlist; d ; d=d->next)
3830 if (be32_eq(d->disk.refnum,
3831 ddf->phys->entries[pd].refnum))
3832 break;
3833 if (d == NULL)
3834 /* Haven't found that one yet, maybe there are others */
3835 continue;
3836
3837 dev = xcalloc(1, sizeof(*dev));
3838 dev->next = this->devs;
3839 this->devs = dev;
3840
3841 dev->disk.number = be32_to_cpu(d->disk.refnum);
3842 dev->disk.major = d->major;
3843 dev->disk.minor = d->minor;
3844 dev->disk.raid_disk = i;
3845 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3846 dev->recovery_start = MaxSector;
3847
3848 dev->events = be32_to_cpu(ddf->active->seq);
3849 dev->data_offset =
3850 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3851 dev->component_size = be64_to_cpu(bvd->blocks);
3852 if (d->devname)
3853 strcpy(dev->name, d->devname);
3854 }
3855 }
3856 return rest;
3857 }
3858
3859 static int store_super_ddf(struct supertype *st, int fd)
3860 {
3861 struct ddf_super *ddf = st->sb;
3862 unsigned long long dsize;
3863 void *buf;
3864 int rc;
3865
3866 if (!ddf)
3867 return 1;
3868
3869 if (!get_dev_size(fd, NULL, &dsize))
3870 return 1;
3871
3872 if (ddf->dlist || ddf->conflist) {
3873 struct stat sta;
3874 struct dl *dl;
3875 int ofd, ret;
3876
3877 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3878 pr_err("%s: file descriptor for invalid device\n",
3879 __func__);
3880 return 1;
3881 }
3882 for (dl = ddf->dlist; dl; dl = dl->next)
3883 if (dl->major == (int)major(sta.st_rdev) &&
3884 dl->minor == (int)minor(sta.st_rdev))
3885 break;
3886 if (!dl) {
3887 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3888 (int)major(sta.st_rdev),
3889 (int)minor(sta.st_rdev));
3890 return 1;
3891 }
3892 ofd = dl->fd;
3893 dl->fd = fd;
3894 ret = (_write_super_to_disk(ddf, dl) != 1);
3895 dl->fd = ofd;
3896 return ret;
3897 }
3898
3899 if (posix_memalign(&buf, 512, 512) != 0)
3900 return 1;
3901 memset(buf, 0, 512);
3902
3903 lseek64(fd, dsize-512, 0);
3904 rc = write(fd, buf, 512);
3905 free(buf);
3906 if (rc < 0)
3907 return 1;
3908 return 0;
3909 }
3910
3911 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3912 {
3913 /*
3914 * return:
3915 * 0 same, or first was empty, and second was copied
3916 * 1 second had wrong number
3917 * 2 wrong uuid
3918 * 3 wrong other info
3919 */
3920 struct ddf_super *first = st->sb;
3921 struct ddf_super *second = tst->sb;
3922 struct dl *dl1, *dl2;
3923 struct vcl *vl1, *vl2;
3924 unsigned int max_vds, max_pds, pd, vd;
3925
3926 if (!first) {
3927 st->sb = tst->sb;
3928 tst->sb = NULL;
3929 return 0;
3930 }
3931
3932 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3933 return 2;
3934
3935 if (first->max_part != second->max_part ||
3936 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3937 !be16_eq(first->virt->populated_vdes,
3938 second->virt->populated_vdes)) {
3939 dprintf("%s: PD/VD number mismatch\n", __func__);
3940 return 3;
3941 }
3942
3943 max_pds = be16_to_cpu(first->phys->used_pdes);
3944 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3945 for (pd = 0; pd < max_pds; pd++)
3946 if (be32_eq(first->phys->entries[pd].refnum,
3947 dl2->disk.refnum))
3948 break;
3949 if (pd == max_pds) {
3950 dprintf("%s: no match for disk %08x\n", __func__,
3951 be32_to_cpu(dl2->disk.refnum));
3952 return 3;
3953 }
3954 }
3955
3956 max_vds = be16_to_cpu(first->active->max_vd_entries);
3957 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3958 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3959 continue;
3960 for (vd = 0; vd < max_vds; vd++)
3961 if (!memcmp(first->virt->entries[vd].guid,
3962 vl2->conf.guid, DDF_GUID_LEN))
3963 break;
3964 if (vd == max_vds) {
3965 dprintf("%s: no match for VD config\n", __func__);
3966 return 3;
3967 }
3968 }
3969 /* FIXME should I look at anything else? */
3970
3971 /*
3972 At this point we are fairly sure that the meta data matches.
3973 But the new disk may contain additional local data.
3974 Add it to the super block.
3975 */
3976 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3977 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3978 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3979 DDF_GUID_LEN))
3980 break;
3981 if (vl1) {
3982 if (vl1->other_bvds != NULL &&
3983 vl1->conf.sec_elmnt_seq !=
3984 vl2->conf.sec_elmnt_seq) {
3985 dprintf("%s: adding BVD %u\n", __func__,
3986 vl2->conf.sec_elmnt_seq);
3987 add_other_bvd(vl1, &vl2->conf,
3988 first->conf_rec_len*512);
3989 }
3990 continue;
3991 }
3992
3993 if (posix_memalign((void **)&vl1, 512,
3994 (first->conf_rec_len*512 +
3995 offsetof(struct vcl, conf))) != 0) {
3996 pr_err("%s could not allocate vcl buf\n",
3997 __func__);
3998 return 3;
3999 }
4000
4001 vl1->next = first->conflist;
4002 vl1->block_sizes = NULL;
4003 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4004 if (alloc_other_bvds(first, vl1) != 0) {
4005 pr_err("%s could not allocate other bvds\n",
4006 __func__);
4007 free(vl1);
4008 return 3;
4009 }
4010 for (vd = 0; vd < max_vds; vd++)
4011 if (!memcmp(first->virt->entries[vd].guid,
4012 vl1->conf.guid, DDF_GUID_LEN))
4013 break;
4014 vl1->vcnum = vd;
4015 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4016 first->conflist = vl1;
4017 }
4018
4019 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4020 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4021 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4022 break;
4023 if (dl1)
4024 continue;
4025
4026 if (posix_memalign((void **)&dl1, 512,
4027 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4028 != 0) {
4029 pr_err("%s could not allocate disk info buffer\n",
4030 __func__);
4031 return 3;
4032 }
4033 memcpy(dl1, dl2, sizeof(*dl1));
4034 dl1->mdupdate = NULL;
4035 dl1->next = first->dlist;
4036 dl1->fd = -1;
4037 for (pd = 0; pd < max_pds; pd++)
4038 if (be32_eq(first->phys->entries[pd].refnum,
4039 dl1->disk.refnum))
4040 break;
4041 dl1->pdnum = pd;
4042 if (dl2->spare) {
4043 if (posix_memalign((void **)&dl1->spare, 512,
4044 first->conf_rec_len*512) != 0) {
4045 pr_err("%s could not allocate spare info buf\n",
4046 __func__);
4047 return 3;
4048 }
4049 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4050 }
4051 for (vd = 0 ; vd < first->max_part ; vd++) {
4052 if (!dl2->vlist[vd]) {
4053 dl1->vlist[vd] = NULL;
4054 continue;
4055 }
4056 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4057 if (!memcmp(vl1->conf.guid,
4058 dl2->vlist[vd]->conf.guid,
4059 DDF_GUID_LEN))
4060 break;
4061 dl1->vlist[vd] = vl1;
4062 }
4063 }
4064 first->dlist = dl1;
4065 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4066 be32_to_cpu(dl1->disk.refnum));
4067 }
4068
4069 return 0;
4070 }
4071
4072 #ifndef MDASSEMBLE
4073 /*
4074 * A new array 'a' has been started which claims to be instance 'inst'
4075 * within container 'c'.
4076 * We need to confirm that the array matches the metadata in 'c' so
4077 * that we don't corrupt any metadata.
4078 */
4079 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4080 {
4081 struct ddf_super *ddf = c->sb;
4082 int n = atoi(inst);
4083 struct mdinfo *dev;
4084 struct dl *dl;
4085 static const char faulty[] = "faulty";
4086
4087 if (all_ff(ddf->virt->entries[n].guid)) {
4088 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4089 return -ENODEV;
4090 }
4091 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4092 guid_str(ddf->virt->entries[n].guid));
4093 for (dev = a->info.devs; dev; dev = dev->next) {
4094 for (dl = ddf->dlist; dl; dl = dl->next)
4095 if (dl->major == dev->disk.major &&
4096 dl->minor == dev->disk.minor)
4097 break;
4098 if (!dl) {
4099 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4100 __func__, dev->disk.major, dev->disk.minor, n);
4101 return -1;
4102 }
4103 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4104 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4105 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4106 __func__, n, dl->major, dl->minor,
4107 be16_to_cpu(
4108 ddf->phys->entries[dl->pdnum].state));
4109 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4110 sizeof(faulty) - 1)
4111 pr_err("Write to state_fd failed\n");
4112 dev->curr_state = DS_FAULTY;
4113 }
4114 }
4115 a->info.container_member = n;
4116 return 0;
4117 }
4118
4119 static void handle_missing(struct ddf_super *ddf, int inst)
4120 {
4121 /* This member array is being activated. If any devices
4122 * are missing they must now be marked as failed.
4123 */
4124 struct vd_config *vc;
4125 unsigned int n_bvd;
4126 struct vcl *vcl;
4127 struct dl *dl;
4128 int n;
4129
4130 for (n = 0; ; n++) {
4131 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4132 if (!vc)
4133 break;
4134 for (dl = ddf->dlist; dl; dl = dl->next)
4135 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4136 break;
4137 if (dl)
4138 /* Found this disk, so not missing */
4139 continue;
4140 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4141 }
4142 }
4143
4144 /*
4145 * The array 'a' is to be marked clean in the metadata.
4146 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4147 * clean up to the point (in sectors). If that cannot be recorded in the
4148 * metadata, then leave it as dirty.
4149 *
4150 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4151 * !global! virtual_disk.virtual_entry structure.
4152 */
4153 static int ddf_set_array_state(struct active_array *a, int consistent)
4154 {
4155 struct ddf_super *ddf = a->container->sb;
4156 int inst = a->info.container_member;
4157 int old = ddf->virt->entries[inst].state;
4158 if (consistent == 2) {
4159 handle_missing(ddf, inst);
4160 /* Should check if a recovery should be started FIXME */
4161 consistent = 1;
4162 if (!is_resync_complete(&a->info))
4163 consistent = 0;
4164 }
4165 if (consistent)
4166 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4167 else
4168 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4169 if (old != ddf->virt->entries[inst].state)
4170 ddf_set_updates_pending(ddf);
4171
4172 old = ddf->virt->entries[inst].init_state;
4173 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4174 if (is_resync_complete(&a->info))
4175 ddf->virt->entries[inst].init_state |= DDF_init_full;
4176 else if (a->info.resync_start == 0)
4177 ddf->virt->entries[inst].init_state |= DDF_init_not;
4178 else
4179 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4180 if (old != ddf->virt->entries[inst].init_state)
4181 ddf_set_updates_pending(ddf);
4182
4183 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4184 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4185 consistent?"clean":"dirty",
4186 a->info.resync_start);
4187 return consistent;
4188 }
4189
4190 static int get_bvd_state(const struct ddf_super *ddf,
4191 const struct vd_config *vc)
4192 {
4193 unsigned int i, n_bvd, working = 0;
4194 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4195 int pd, st, state;
4196 for (i = 0; i < n_prim; i++) {
4197 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4198 continue;
4199 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4200 if (pd < 0)
4201 continue;
4202 st = be16_to_cpu(ddf->phys->entries[pd].state);
4203 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4204 == DDF_Online)
4205 working++;
4206 }
4207
4208 state = DDF_state_degraded;
4209 if (working == n_prim)
4210 state = DDF_state_optimal;
4211 else
4212 switch (vc->prl) {
4213 case DDF_RAID0:
4214 case DDF_CONCAT:
4215 case DDF_JBOD:
4216 state = DDF_state_failed;
4217 break;
4218 case DDF_RAID1:
4219 if (working == 0)
4220 state = DDF_state_failed;
4221 else if (working >= 2)
4222 state = DDF_state_part_optimal;
4223 break;
4224 case DDF_RAID4:
4225 case DDF_RAID5:
4226 if (working < n_prim - 1)
4227 state = DDF_state_failed;
4228 break;
4229 case DDF_RAID6:
4230 if (working < n_prim - 2)
4231 state = DDF_state_failed;
4232 else if (working == n_prim - 1)
4233 state = DDF_state_part_optimal;
4234 break;
4235 }
4236 return state;
4237 }
4238
4239 static int secondary_state(int state, int other, int seclevel)
4240 {
4241 if (state == DDF_state_optimal && other == DDF_state_optimal)
4242 return DDF_state_optimal;
4243 if (seclevel == DDF_2MIRRORED) {
4244 if (state == DDF_state_optimal || other == DDF_state_optimal)
4245 return DDF_state_part_optimal;
4246 if (state == DDF_state_failed && other == DDF_state_failed)
4247 return DDF_state_failed;
4248 return DDF_state_degraded;
4249 } else {
4250 if (state == DDF_state_failed || other == DDF_state_failed)
4251 return DDF_state_failed;
4252 if (state == DDF_state_degraded || other == DDF_state_degraded)
4253 return DDF_state_degraded;
4254 return DDF_state_part_optimal;
4255 }
4256 }
4257
4258 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4259 {
4260 int state = get_bvd_state(ddf, &vcl->conf);
4261 unsigned int i;
4262 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4263 state = secondary_state(
4264 state,
4265 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4266 vcl->conf.srl);
4267 }
4268 return state;
4269 }
4270
4271 /*
4272 * The state of each disk is stored in the global phys_disk structure
4273 * in phys_disk.entries[n].state.
4274 * This makes various combinations awkward.
4275 * - When a device fails in any array, it must be failed in all arrays
4276 * that include a part of this device.
4277 * - When a component is rebuilding, we cannot include it officially in the
4278 * array unless this is the only array that uses the device.
4279 *
4280 * So: when transitioning:
4281 * Online -> failed, just set failed flag. monitor will propagate
4282 * spare -> online, the device might need to be added to the array.
4283 * spare -> failed, just set failed. Don't worry if in array or not.
4284 */
4285 static void ddf_set_disk(struct active_array *a, int n, int state)
4286 {
4287 struct ddf_super *ddf = a->container->sb;
4288 unsigned int inst = a->info.container_member, n_bvd;
4289 struct vcl *vcl;
4290 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4291 &n_bvd, &vcl);
4292 int pd;
4293 struct mdinfo *mdi;
4294 struct dl *dl;
4295
4296 dprintf("%s: %d to %x\n", __func__, n, state);
4297 if (vc == NULL) {
4298 dprintf("ddf: cannot find instance %d!!\n", inst);
4299 return;
4300 }
4301 /* Find the matching slot in 'info'. */
4302 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4303 if (mdi->disk.raid_disk == n)
4304 break;
4305 if (!mdi) {
4306 pr_err("%s: cannot find raid disk %d\n",
4307 __func__, n);
4308 return;
4309 }
4310
4311 /* and find the 'dl' entry corresponding to that. */
4312 for (dl = ddf->dlist; dl; dl = dl->next)
4313 if (mdi->state_fd >= 0 &&
4314 mdi->disk.major == dl->major &&
4315 mdi->disk.minor == dl->minor)
4316 break;
4317 if (!dl) {
4318 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4319 __func__, n,
4320 mdi->disk.major, mdi->disk.minor);
4321 return;
4322 }
4323
4324 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4325 if (pd < 0 || pd != dl->pdnum) {
4326 /* disk doesn't currently exist or has changed.
4327 * If it is now in_sync, insert it. */
4328 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4329 __func__, dl->pdnum, dl->major, dl->minor,
4330 be32_to_cpu(dl->disk.refnum));
4331 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4332 __func__, inst, n_bvd,
4333 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4334 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4335 pd = dl->pdnum; /* FIXME: is this really correct ? */
4336 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4337 LBA_OFFSET(ddf, vc)[n_bvd] =
4338 cpu_to_be64(mdi->data_offset);
4339 be16_clear(ddf->phys->entries[pd].type,
4340 cpu_to_be16(DDF_Global_Spare));
4341 be16_set(ddf->phys->entries[pd].type,
4342 cpu_to_be16(DDF_Active_in_VD));
4343 ddf_set_updates_pending(ddf);
4344 }
4345 } else {
4346 be16 old = ddf->phys->entries[pd].state;
4347 if (state & DS_FAULTY)
4348 be16_set(ddf->phys->entries[pd].state,
4349 cpu_to_be16(DDF_Failed));
4350 if (state & DS_INSYNC) {
4351 be16_set(ddf->phys->entries[pd].state,
4352 cpu_to_be16(DDF_Online));
4353 be16_clear(ddf->phys->entries[pd].state,
4354 cpu_to_be16(DDF_Rebuilding));
4355 }
4356 if (!be16_eq(old, ddf->phys->entries[pd].state))
4357 ddf_set_updates_pending(ddf);
4358 }
4359
4360 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4361 be32_to_cpu(dl->disk.refnum), state,
4362 be16_to_cpu(ddf->phys->entries[pd].state));
4363
4364 /* Now we need to check the state of the array and update
4365 * virtual_disk.entries[n].state.
4366 * It needs to be one of "optimal", "degraded", "failed".
4367 * I don't understand 'deleted' or 'missing'.
4368 */
4369 state = get_svd_state(ddf, vcl);
4370
4371 if (ddf->virt->entries[inst].state !=
4372 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4373 | state)) {
4374
4375 ddf->virt->entries[inst].state =
4376 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4377 | state;
4378 ddf_set_updates_pending(ddf);
4379 }
4380
4381 }
4382
4383 static void ddf_sync_metadata(struct supertype *st)
4384 {
4385
4386 /*
4387 * Write all data to all devices.
4388 * Later, we might be able to track whether only local changes
4389 * have been made, or whether any global data has been changed,
4390 * but ddf is sufficiently weird that it probably always
4391 * changes global data ....
4392 */
4393 struct ddf_super *ddf = st->sb;
4394 if (!ddf->updates_pending)
4395 return;
4396 ddf->updates_pending = 0;
4397 __write_init_super_ddf(st);
4398 dprintf("ddf: sync_metadata\n");
4399 }
4400
4401 static int del_from_conflist(struct vcl **list, const char *guid)
4402 {
4403 struct vcl **p;
4404 int found = 0;
4405 for (p = list; p && *p; p = &((*p)->next))
4406 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4407 found = 1;
4408 *p = (*p)->next;
4409 }
4410 return found;
4411 }
4412
4413 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4414 {
4415 struct dl *dl;
4416 unsigned int vdnum, i;
4417 vdnum = find_vde_by_guid(ddf, guid);
4418 if (vdnum == DDF_NOTFOUND) {
4419 pr_err("%s: could not find VD %s\n", __func__,
4420 guid_str(guid));
4421 return -1;
4422 }
4423 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4424 pr_err("%s: could not find conf %s\n", __func__,
4425 guid_str(guid));
4426 return -1;
4427 }
4428 for (dl = ddf->dlist; dl; dl = dl->next)
4429 for (i = 0; i < ddf->max_part; i++)
4430 if (dl->vlist[i] != NULL &&
4431 !memcmp(dl->vlist[i]->conf.guid, guid,
4432 DDF_GUID_LEN))
4433 dl->vlist[i] = NULL;
4434 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4435 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4436 return 0;
4437 }
4438
4439 static int kill_subarray_ddf(struct supertype *st)
4440 {
4441 struct ddf_super *ddf = st->sb;
4442 /*
4443 * currentconf is set in container_content_ddf,
4444 * called with subarray arg
4445 */
4446 struct vcl *victim = ddf->currentconf;
4447 struct vd_config *conf;
4448 ddf->currentconf = NULL;
4449 unsigned int vdnum;
4450 if (!victim) {
4451 pr_err("%s: nothing to kill\n", __func__);
4452 return -1;
4453 }
4454 conf = &victim->conf;
4455 vdnum = find_vde_by_guid(ddf, conf->guid);
4456 if (vdnum == DDF_NOTFOUND) {
4457 pr_err("%s: could not find VD %s\n", __func__,
4458 guid_str(conf->guid));
4459 return -1;
4460 }
4461 if (st->update_tail) {
4462 struct virtual_disk *vd;
4463 int len = sizeof(struct virtual_disk)
4464 + sizeof(struct virtual_entry);
4465 vd = xmalloc(len);
4466 if (vd == NULL) {
4467 pr_err("%s: failed to allocate %d bytes\n", __func__,
4468 len);
4469 return -1;
4470 }
4471 memset(vd, 0 , len);
4472 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4473 vd->populated_vdes = cpu_to_be16(0);
4474 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4475 /* we use DDF_state_deleted as marker */
4476 vd->entries[0].state = DDF_state_deleted;
4477 append_metadata_update(st, vd, len);
4478 } else {
4479 _kill_subarray_ddf(ddf, conf->guid);
4480 ddf_set_updates_pending(ddf);
4481 ddf_sync_metadata(st);
4482 }
4483 return 0;
4484 }
4485
4486 static void copy_matching_bvd(struct ddf_super *ddf,
4487 struct vd_config *conf,
4488 const struct metadata_update *update)
4489 {
4490 unsigned int mppe =
4491 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4492 unsigned int len = ddf->conf_rec_len * 512;
4493 char *p;
4494 struct vd_config *vc;
4495 for (p = update->buf; p < update->buf + update->len; p += len) {
4496 vc = (struct vd_config *) p;
4497 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4498 memcpy(conf->phys_refnum, vc->phys_refnum,
4499 mppe * (sizeof(__u32) + sizeof(__u64)));
4500 return;
4501 }
4502 }
4503 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4504 conf->sec_elmnt_seq, guid_str(conf->guid));
4505 }
4506
4507 static void ddf_process_update(struct supertype *st,
4508 struct metadata_update *update)
4509 {
4510 /* Apply this update to the metadata.
4511 * The first 4 bytes are a DDF_*_MAGIC which guides
4512 * our actions.
4513 * Possible update are:
4514 * DDF_PHYS_RECORDS_MAGIC
4515 * Add a new physical device or remove an old one.
4516 * Changes to this record only happen implicitly.
4517 * used_pdes is the device number.
4518 * DDF_VIRT_RECORDS_MAGIC
4519 * Add a new VD. Possibly also change the 'access' bits.
4520 * populated_vdes is the entry number.
4521 * DDF_VD_CONF_MAGIC
4522 * New or updated VD. the VIRT_RECORD must already
4523 * exist. For an update, phys_refnum and lba_offset
4524 * (at least) are updated, and the VD_CONF must
4525 * be written to precisely those devices listed with
4526 * a phys_refnum.
4527 * DDF_SPARE_ASSIGN_MAGIC
4528 * replacement Spare Assignment Record... but for which device?
4529 *
4530 * So, e.g.:
4531 * - to create a new array, we send a VIRT_RECORD and
4532 * a VD_CONF. Then assemble and start the array.
4533 * - to activate a spare we send a VD_CONF to add the phys_refnum
4534 * and offset. This will also mark the spare as active with
4535 * a spare-assignment record.
4536 */
4537 struct ddf_super *ddf = st->sb;
4538 be32 *magic = (be32 *)update->buf;
4539 struct phys_disk *pd;
4540 struct virtual_disk *vd;
4541 struct vd_config *vc;
4542 struct vcl *vcl;
4543 struct dl *dl;
4544 unsigned int ent;
4545 unsigned int pdnum, pd2, len;
4546
4547 dprintf("Process update %x\n", be32_to_cpu(*magic));
4548
4549 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4550
4551 if (update->len != (sizeof(struct phys_disk) +
4552 sizeof(struct phys_disk_entry)))
4553 return;
4554 pd = (struct phys_disk*)update->buf;
4555
4556 ent = be16_to_cpu(pd->used_pdes);
4557 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4558 return;
4559 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4560 struct dl **dlp;
4561 /* removing this disk. */
4562 be16_set(ddf->phys->entries[ent].state,
4563 cpu_to_be16(DDF_Missing));
4564 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4565 struct dl *dl = *dlp;
4566 if (dl->pdnum == (signed)ent) {
4567 close(dl->fd);
4568 dl->fd = -1;
4569 /* FIXME this doesn't free
4570 * dl->devname */
4571 update->space = dl;
4572 *dlp = dl->next;
4573 break;
4574 }
4575 }
4576 ddf_set_updates_pending(ddf);
4577 return;
4578 }
4579 if (!all_ff(ddf->phys->entries[ent].guid))
4580 return;
4581 ddf->phys->entries[ent] = pd->entries[0];
4582 ddf->phys->used_pdes = cpu_to_be16
4583 (1 + be16_to_cpu(ddf->phys->used_pdes));
4584 ddf_set_updates_pending(ddf);
4585 if (ddf->add_list) {
4586 struct active_array *a;
4587 struct dl *al = ddf->add_list;
4588 ddf->add_list = al->next;
4589
4590 al->next = ddf->dlist;
4591 ddf->dlist = al;
4592
4593 /* As a device has been added, we should check
4594 * for any degraded devices that might make
4595 * use of this spare */
4596 for (a = st->arrays ; a; a=a->next)
4597 a->check_degraded = 1;
4598 }
4599 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4600
4601 if (update->len != (sizeof(struct virtual_disk) +
4602 sizeof(struct virtual_entry)))
4603 return;
4604 vd = (struct virtual_disk*)update->buf;
4605
4606 if (vd->entries[0].state == DDF_state_deleted) {
4607 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4608 return;
4609 } else {
4610
4611 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4612 if (ent != DDF_NOTFOUND) {
4613 dprintf("%s: VD %s exists already in slot %d\n",
4614 __func__, guid_str(vd->entries[0].guid),
4615 ent);
4616 return;
4617 }
4618 ent = find_unused_vde(ddf);
4619 if (ent == DDF_NOTFOUND)
4620 return;
4621 ddf->virt->entries[ent] = vd->entries[0];
4622 ddf->virt->populated_vdes =
4623 cpu_to_be16(
4624 1 + be16_to_cpu(
4625 ddf->virt->populated_vdes));
4626 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4627 __func__, guid_str(vd->entries[0].guid), ent,
4628 ddf->virt->entries[ent].state,
4629 ddf->virt->entries[ent].init_state);
4630 }
4631 ddf_set_updates_pending(ddf);
4632 }
4633
4634 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4635 vc = (struct vd_config*)update->buf;
4636 len = ddf->conf_rec_len * 512;
4637 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4638 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4639 __func__, guid_str(vc->guid), update->len,
4640 vc->sec_elmnt_count);
4641 return;
4642 }
4643 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4644 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4645 break;
4646 dprintf("%s: conf update for %s (%s)\n", __func__,
4647 guid_str(vc->guid), (vcl ? "old" : "new"));
4648 if (vcl) {
4649 /* An update, just copy the phys_refnum and lba_offset
4650 * fields
4651 */
4652 unsigned int i;
4653 unsigned int k;
4654 copy_matching_bvd(ddf, &vcl->conf, update);
4655 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4656 dprintf("BVD %u has %08x at %llu\n", 0,
4657 be32_to_cpu(vcl->conf.phys_refnum[k]),
4658 be64_to_cpu(LBA_OFFSET(ddf,
4659 &vcl->conf)[k]));
4660 for (i = 1; i < vc->sec_elmnt_count; i++) {
4661 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4662 update);
4663 for (k = 0; k < be16_to_cpu(
4664 vc->prim_elmnt_count); k++)
4665 dprintf("BVD %u has %08x at %llu\n", i,
4666 be32_to_cpu
4667 (vcl->other_bvds[i-1]->
4668 phys_refnum[k]),
4669 be64_to_cpu
4670 (LBA_OFFSET
4671 (ddf,
4672 vcl->other_bvds[i-1])[k]));
4673 }
4674 } else {
4675 /* A new VD_CONF */
4676 unsigned int i;
4677 if (!update->space)
4678 return;
4679 vcl = update->space;
4680 update->space = NULL;
4681 vcl->next = ddf->conflist;
4682 memcpy(&vcl->conf, vc, len);
4683 ent = find_vde_by_guid(ddf, vc->guid);
4684 if (ent == DDF_NOTFOUND)
4685 return;
4686 vcl->vcnum = ent;
4687 ddf->conflist = vcl;
4688 for (i = 1; i < vc->sec_elmnt_count; i++)
4689 memcpy(vcl->other_bvds[i-1],
4690 update->buf + len * i, len);
4691 }
4692 /* Set DDF_Transition on all Failed devices - to help
4693 * us detect those that are no longer in use
4694 */
4695 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4696 pdnum++)
4697 if (be16_and(ddf->phys->entries[pdnum].state,
4698 cpu_to_be16(DDF_Failed)))
4699 be16_set(ddf->phys->entries[pdnum].state,
4700 cpu_to_be16(DDF_Transition));
4701 /* Now make sure vlist is correct for each dl. */
4702 for (dl = ddf->dlist; dl; dl = dl->next) {
4703 unsigned int vn = 0;
4704 int in_degraded = 0;
4705 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4706 unsigned int dn, ibvd;
4707 const struct vd_config *conf;
4708 int vstate;
4709 dn = get_pd_index_from_refnum(vcl,
4710 dl->disk.refnum,
4711 ddf->mppe,
4712 &conf, &ibvd);
4713 if (dn == DDF_NOTFOUND)
4714 continue;
4715 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4716 dl->pdnum,
4717 be32_to_cpu(dl->disk.refnum),
4718 guid_str(conf->guid),
4719 conf->sec_elmnt_seq, vn);
4720 /* Clear the Transition flag */
4721 if (be16_and
4722 (ddf->phys->entries[dl->pdnum].state,
4723 cpu_to_be16(DDF_Failed)))
4724 be16_clear(ddf->phys
4725 ->entries[dl->pdnum].state,
4726 cpu_to_be16(DDF_Transition));
4727 dl->vlist[vn++] = vcl;
4728 vstate = ddf->virt->entries[vcl->vcnum].state
4729 & DDF_state_mask;
4730 if (vstate == DDF_state_degraded ||
4731 vstate == DDF_state_part_optimal)
4732 in_degraded = 1;
4733 }
4734 while (vn < ddf->max_part)
4735 dl->vlist[vn++] = NULL;
4736 if (dl->vlist[0]) {
4737 be16_clear(ddf->phys->entries[dl->pdnum].type,
4738 cpu_to_be16(DDF_Global_Spare));
4739 if (!be16_and(ddf->phys
4740 ->entries[dl->pdnum].type,
4741 cpu_to_be16(DDF_Active_in_VD))) {
4742 be16_set(ddf->phys
4743 ->entries[dl->pdnum].type,
4744 cpu_to_be16(DDF_Active_in_VD));
4745 if (in_degraded)
4746 be16_set(ddf->phys
4747 ->entries[dl->pdnum]
4748 .state,
4749 cpu_to_be16
4750 (DDF_Rebuilding));
4751 }
4752 }
4753 if (dl->spare) {
4754 be16_clear(ddf->phys->entries[dl->pdnum].type,
4755 cpu_to_be16(DDF_Global_Spare));
4756 be16_set(ddf->phys->entries[dl->pdnum].type,
4757 cpu_to_be16(DDF_Spare));
4758 }
4759 if (!dl->vlist[0] && !dl->spare) {
4760 be16_set(ddf->phys->entries[dl->pdnum].type,
4761 cpu_to_be16(DDF_Global_Spare));
4762 be16_clear(ddf->phys->entries[dl->pdnum].type,
4763 cpu_to_be16(DDF_Spare));
4764 be16_clear(ddf->phys->entries[dl->pdnum].type,
4765 cpu_to_be16(DDF_Active_in_VD));
4766 }
4767 }
4768
4769 /* Now remove any 'Failed' devices that are not part
4770 * of any VD. They will have the Transition flag set.
4771 * Once done, we need to update all dl->pdnum numbers.
4772 */
4773 pd2 = 0;
4774 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4775 pdnum++) {
4776 if (be16_and(ddf->phys->entries[pdnum].state,
4777 cpu_to_be16(DDF_Failed))
4778 && be16_and(ddf->phys->entries[pdnum].state,
4779 cpu_to_be16(DDF_Transition))) {
4780 /* skip this one unless in dlist*/
4781 for (dl = ddf->dlist; dl; dl = dl->next)
4782 if (dl->pdnum == (int)pdnum)
4783 break;
4784 if (!dl)
4785 continue;
4786 }
4787 if (pdnum == pd2)
4788 pd2++;
4789 else {
4790 ddf->phys->entries[pd2] =
4791 ddf->phys->entries[pdnum];
4792 for (dl = ddf->dlist; dl; dl = dl->next)
4793 if (dl->pdnum == (int)pdnum)
4794 dl->pdnum = pd2;
4795 pd2++;
4796 }
4797 }
4798 ddf->phys->used_pdes = cpu_to_be16(pd2);
4799 while (pd2 < pdnum) {
4800 memset(ddf->phys->entries[pd2].guid, 0xff,
4801 DDF_GUID_LEN);
4802 pd2++;
4803 }
4804
4805 ddf_set_updates_pending(ddf);
4806 }
4807 /* case DDF_SPARE_ASSIGN_MAGIC */
4808 }
4809
4810 static void ddf_prepare_update(struct supertype *st,
4811 struct metadata_update *update)
4812 {
4813 /* This update arrived at managemon.
4814 * We are about to pass it to monitor.
4815 * If a malloc is needed, do it here.
4816 */
4817 struct ddf_super *ddf = st->sb;
4818 be32 *magic = (be32 *)update->buf;
4819 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4820 struct vcl *vcl;
4821 struct vd_config *conf = (struct vd_config *) update->buf;
4822 if (posix_memalign(&update->space, 512,
4823 offsetof(struct vcl, conf)
4824 + ddf->conf_rec_len * 512) != 0) {
4825 update->space = NULL;
4826 return;
4827 }
4828 vcl = update->space;
4829 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4830 if (alloc_other_bvds(ddf, vcl) != 0) {
4831 free(update->space);
4832 update->space = NULL;
4833 }
4834 }
4835 }
4836
4837 /*
4838 * Check degraded state of a RAID10.
4839 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4840 */
4841 static int raid10_degraded(struct mdinfo *info)
4842 {
4843 int n_prim, n_bvds;
4844 int i;
4845 struct mdinfo *d;
4846 char *found;
4847 int ret = -1;
4848
4849 n_prim = info->array.layout & ~0x100;
4850 n_bvds = info->array.raid_disks / n_prim;
4851 found = xmalloc(n_bvds);
4852 if (found == NULL)
4853 return ret;
4854 memset(found, 0, n_bvds);
4855 for (d = info->devs; d; d = d->next) {
4856 i = d->disk.raid_disk / n_prim;
4857 if (i >= n_bvds) {
4858 pr_err("%s: BUG: invalid raid disk\n", __func__);
4859 goto out;
4860 }
4861 if (d->state_fd > 0)
4862 found[i]++;
4863 }
4864 ret = 2;
4865 for (i = 0; i < n_bvds; i++)
4866 if (!found[i]) {
4867 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4868 ret = 0;
4869 goto out;
4870 } else if (found[i] < n_prim) {
4871 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4872 n_bvds);
4873 ret = 1;
4874 }
4875 out:
4876 free(found);
4877 return ret;
4878 }
4879
4880 /*
4881 * Check if the array 'a' is degraded but not failed.
4882 * If it is, find as many spares as are available and needed and
4883 * arrange for their inclusion.
4884 * We only choose devices which are not already in the array,
4885 * and prefer those with a spare-assignment to this array.
4886 * otherwise we choose global spares - assuming always that
4887 * there is enough room.
4888 * For each spare that we assign, we return an 'mdinfo' which
4889 * describes the position for the device in the array.
4890 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4891 * the new phys_refnum and lba_offset values.
4892 *
4893 * Only worry about BVDs at the moment.
4894 */
4895 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4896 struct metadata_update **updates)
4897 {
4898 int working = 0;
4899 struct mdinfo *d;
4900 struct ddf_super *ddf = a->container->sb;
4901 int global_ok = 0;
4902 struct mdinfo *rv = NULL;
4903 struct mdinfo *di;
4904 struct metadata_update *mu;
4905 struct dl *dl;
4906 int i;
4907 unsigned int j;
4908 struct vcl *vcl;
4909 struct vd_config *vc;
4910 unsigned int n_bvd;
4911
4912 for (d = a->info.devs ; d ; d = d->next) {
4913 if ((d->curr_state & DS_FAULTY) &&
4914 d->state_fd >= 0)
4915 /* wait for Removal to happen */
4916 return NULL;
4917 if (d->state_fd >= 0)
4918 working ++;
4919 }
4920
4921 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4922 a->info.array.raid_disks,
4923 a->info.array.level);
4924 if (working == a->info.array.raid_disks)
4925 return NULL; /* array not degraded */
4926 switch (a->info.array.level) {
4927 case 1:
4928 if (working == 0)
4929 return NULL; /* failed */
4930 break;
4931 case 4:
4932 case 5:
4933 if (working < a->info.array.raid_disks - 1)
4934 return NULL; /* failed */
4935 break;
4936 case 6:
4937 if (working < a->info.array.raid_disks - 2)
4938 return NULL; /* failed */
4939 break;
4940 case 10:
4941 if (raid10_degraded(&a->info) < 1)
4942 return NULL;
4943 break;
4944 default: /* concat or stripe */
4945 return NULL; /* failed */
4946 }
4947
4948 /* For each slot, if it is not working, find a spare */
4949 dl = ddf->dlist;
4950 for (i = 0; i < a->info.array.raid_disks; i++) {
4951 for (d = a->info.devs ; d ; d = d->next)
4952 if (d->disk.raid_disk == i)
4953 break;
4954 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4955 if (d && (d->state_fd >= 0))
4956 continue;
4957
4958 /* OK, this device needs recovery. Find a spare */
4959 again:
4960 for ( ; dl ; dl = dl->next) {
4961 unsigned long long esize;
4962 unsigned long long pos;
4963 struct mdinfo *d2;
4964 int is_global = 0;
4965 int is_dedicated = 0;
4966 struct extent *ex;
4967 unsigned int j;
4968 be16 state = ddf->phys->entries[dl->pdnum].state;
4969 if (be16_and(state,
4970 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4971 !be16_and(state,
4972 cpu_to_be16(DDF_Online)))
4973 continue;
4974
4975 /* If in this array, skip */
4976 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4977 if (d2->state_fd >= 0 &&
4978 d2->disk.major == dl->major &&
4979 d2->disk.minor == dl->minor) {
4980 dprintf("%x:%x (%08x) already in array\n",
4981 dl->major, dl->minor,
4982 be32_to_cpu(dl->disk.refnum));
4983 break;
4984 }
4985 if (d2)
4986 continue;
4987 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4988 cpu_to_be16(DDF_Spare))) {
4989 /* Check spare assign record */
4990 if (dl->spare) {
4991 if (dl->spare->type & DDF_spare_dedicated) {
4992 /* check spare_ents for guid */
4993 for (j = 0 ;
4994 j < be16_to_cpu
4995 (dl->spare
4996 ->populated);
4997 j++) {
4998 if (memcmp(dl->spare->spare_ents[j].guid,
4999 ddf->virt->entries[a->info.container_member].guid,
5000 DDF_GUID_LEN) == 0)
5001 is_dedicated = 1;
5002 }
5003 } else
5004 is_global = 1;
5005 }
5006 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5007 cpu_to_be16(DDF_Global_Spare))) {
5008 is_global = 1;
5009 } else if (!be16_and(ddf->phys
5010 ->entries[dl->pdnum].state,
5011 cpu_to_be16(DDF_Failed))) {
5012 /* we can possibly use some of this */
5013 is_global = 1;
5014 }
5015 if ( ! (is_dedicated ||
5016 (is_global && global_ok))) {
5017 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5018 is_dedicated, is_global);
5019 continue;
5020 }
5021
5022 /* We are allowed to use this device - is there space?
5023 * We need a->info.component_size sectors */
5024 ex = get_extents(ddf, dl);
5025 if (!ex) {
5026 dprintf("cannot get extents\n");
5027 continue;
5028 }
5029 j = 0; pos = 0;
5030 esize = 0;
5031
5032 do {
5033 esize = ex[j].start - pos;
5034 if (esize >= a->info.component_size)
5035 break;
5036 pos = ex[j].start + ex[j].size;
5037 j++;
5038 } while (ex[j-1].size);
5039
5040 free(ex);
5041 if (esize < a->info.component_size) {
5042 dprintf("%x:%x has no room: %llu %llu\n",
5043 dl->major, dl->minor,
5044 esize, a->info.component_size);
5045 /* No room */
5046 continue;
5047 }
5048
5049 /* Cool, we have a device with some space at pos */
5050 di = xcalloc(1, sizeof(*di));
5051 di->disk.number = i;
5052 di->disk.raid_disk = i;
5053 di->disk.major = dl->major;
5054 di->disk.minor = dl->minor;
5055 di->disk.state = 0;
5056 di->recovery_start = 0;
5057 di->data_offset = pos;
5058 di->component_size = a->info.component_size;
5059 di->container_member = dl->pdnum;
5060 di->next = rv;
5061 rv = di;
5062 dprintf("%x:%x (%08x) to be %d at %llu\n",
5063 dl->major, dl->minor,
5064 be32_to_cpu(dl->disk.refnum), i, pos);
5065
5066 break;
5067 }
5068 if (!dl && ! global_ok) {
5069 /* not enough dedicated spares, try global */
5070 global_ok = 1;
5071 dl = ddf->dlist;
5072 goto again;
5073 }
5074 }
5075
5076 if (!rv)
5077 /* No spares found */
5078 return rv;
5079 /* Now 'rv' has a list of devices to return.
5080 * Create a metadata_update record to update the
5081 * phys_refnum and lba_offset values
5082 */
5083 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5084 &n_bvd, &vcl);
5085 if (vc == NULL)
5086 return NULL;
5087
5088 mu = xmalloc(sizeof(*mu));
5089 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5090 free(mu);
5091 mu = NULL;
5092 }
5093
5094 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5095 mu->buf = xmalloc(mu->len);
5096 mu->space = NULL;
5097 mu->space_list = NULL;
5098 mu->next = *updates;
5099 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5100 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5101 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5102 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5103
5104 vc = (struct vd_config*)mu->buf;
5105 for (di = rv ; di ; di = di->next) {
5106 unsigned int i_sec, i_prim;
5107 i_sec = di->disk.raid_disk
5108 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5109 i_prim = di->disk.raid_disk
5110 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5111 vc = (struct vd_config *)(mu->buf
5112 + i_sec * ddf->conf_rec_len * 512);
5113 for (dl = ddf->dlist; dl; dl = dl->next)
5114 if (dl->major == di->disk.major
5115 && dl->minor == di->disk.minor)
5116 break;
5117 if (!dl) {
5118 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5119 __func__, di->disk.raid_disk,
5120 di->disk.major, di->disk.minor);
5121 return NULL;
5122 }
5123 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5124 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5125 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5126 be32_to_cpu(vc->phys_refnum[i_prim]),
5127 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5128 }
5129 *updates = mu;
5130 return rv;
5131 }
5132 #endif /* MDASSEMBLE */
5133
5134 static int ddf_level_to_layout(int level)
5135 {
5136 switch(level) {
5137 case 0:
5138 case 1:
5139 return 0;
5140 case 5:
5141 return ALGORITHM_LEFT_SYMMETRIC;
5142 case 6:
5143 return ALGORITHM_ROTATING_N_CONTINUE;
5144 case 10:
5145 return 0x102;
5146 default:
5147 return UnSet;
5148 }
5149 }
5150
5151 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5152 {
5153 if (level && *level == UnSet)
5154 *level = LEVEL_CONTAINER;
5155
5156 if (level && layout && *layout == UnSet)
5157 *layout = ddf_level_to_layout(*level);
5158 }
5159
5160 struct superswitch super_ddf = {
5161 #ifndef MDASSEMBLE
5162 .examine_super = examine_super_ddf,
5163 .brief_examine_super = brief_examine_super_ddf,
5164 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5165 .export_examine_super = export_examine_super_ddf,
5166 .detail_super = detail_super_ddf,
5167 .brief_detail_super = brief_detail_super_ddf,
5168 .validate_geometry = validate_geometry_ddf,
5169 .write_init_super = write_init_super_ddf,
5170 .add_to_super = add_to_super_ddf,
5171 .remove_from_super = remove_from_super_ddf,
5172 .load_container = load_container_ddf,
5173 .copy_metadata = copy_metadata_ddf,
5174 .kill_subarray = kill_subarray_ddf,
5175 #endif
5176 .match_home = match_home_ddf,
5177 .uuid_from_super= uuid_from_super_ddf,
5178 .getinfo_super = getinfo_super_ddf,
5179 .update_super = update_super_ddf,
5180
5181 .avail_size = avail_size_ddf,
5182
5183 .compare_super = compare_super_ddf,
5184
5185 .load_super = load_super_ddf,
5186 .init_super = init_super_ddf,
5187 .store_super = store_super_ddf,
5188 .free_super = free_super_ddf,
5189 .match_metadata_desc = match_metadata_desc_ddf,
5190 .container_content = container_content_ddf,
5191 .default_geometry = default_geometry_ddf,
5192
5193 .external = 1,
5194
5195 #ifndef MDASSEMBLE
5196 /* for mdmon */
5197 .open_new = ddf_open_new,
5198 .set_array_state= ddf_set_array_state,
5199 .set_disk = ddf_set_disk,
5200 .sync_metadata = ddf_sync_metadata,
5201 .process_update = ddf_process_update,
5202 .prepare_update = ddf_prepare_update,
5203 .activate_spare = ddf_activate_spare,
5204 #endif
5205 .name = "ddf",
5206 };