]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: Fix assorted typos and do some reformatting.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes;
233 be16 max_pdes;
234 __u8 pad[52];
235 struct phys_disk_entry {
236 char guid[DDF_GUID_LEN];
237 be32 refnum;
238 be16 type;
239 be16 state;
240 be64 config_size; /* DDF structures must be after here */
241 char path[18]; /* Another horrible structure really
242 * but is "used for information
243 * purposes only" */
244 __u8 pad[6];
245 } entries[0];
246 };
247
248 /* phys_disk_entry.type is a bitmap - bigendian remember */
249 #define DDF_Forced_PD_GUID 1
250 #define DDF_Active_in_VD 2
251 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
252 #define DDF_Spare 8 /* overrides Global_spare */
253 #define DDF_Foreign 16
254 #define DDF_Legacy 32 /* no DDF on this device */
255
256 #define DDF_Interface_mask 0xf00
257 #define DDF_Interface_SCSI 0x100
258 #define DDF_Interface_SAS 0x200
259 #define DDF_Interface_SATA 0x300
260 #define DDF_Interface_FC 0x400
261
262 /* phys_disk_entry.state is a bigendian bitmap */
263 #define DDF_Online 1
264 #define DDF_Failed 2 /* overrides 1,4,8 */
265 #define DDF_Rebuilding 4
266 #define DDF_Transition 8
267 #define DDF_SMART 16
268 #define DDF_ReadErrors 32
269 #define DDF_Missing 64
270
271 /* The content of the virt_section global scope */
272 struct virtual_disk {
273 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
274 be32 crc;
275 be16 populated_vdes;
276 be16 max_vdes;
277 __u8 pad[52];
278 struct virtual_entry {
279 char guid[DDF_GUID_LEN];
280 be16 unit;
281 __u16 pad0; /* 0xffff */
282 be16 guid_crc;
283 be16 type;
284 __u8 state;
285 __u8 init_state;
286 __u8 pad1[14];
287 char name[16];
288 } entries[0];
289 };
290
291 /* virtual_entry.type is a bitmap - bigendian */
292 #define DDF_Shared 1
293 #define DDF_Enforce_Groups 2
294 #define DDF_Unicode 4
295 #define DDF_Owner_Valid 8
296
297 /* virtual_entry.state is a bigendian bitmap */
298 #define DDF_state_mask 0x7
299 #define DDF_state_optimal 0x0
300 #define DDF_state_degraded 0x1
301 #define DDF_state_deleted 0x2
302 #define DDF_state_missing 0x3
303 #define DDF_state_failed 0x4
304 #define DDF_state_part_optimal 0x5
305
306 #define DDF_state_morphing 0x8
307 #define DDF_state_inconsistent 0x10
308
309 /* virtual_entry.init_state is a bigendian bitmap */
310 #define DDF_initstate_mask 0x03
311 #define DDF_init_not 0x00
312 #define DDF_init_quick 0x01 /* initialisation is progress.
313 * i.e. 'state_inconsistent' */
314 #define DDF_init_full 0x02
315
316 #define DDF_access_mask 0xc0
317 #define DDF_access_rw 0x00
318 #define DDF_access_ro 0x80
319 #define DDF_access_blocked 0xc0
320
321 /* The content of the config_section - local scope
322 * It has multiple records each config_record_len sectors
323 * They can be vd_config or spare_assign
324 */
325
326 struct vd_config {
327 be32 magic; /* DDF_VD_CONF_MAGIC */
328 be32 crc;
329 char guid[DDF_GUID_LEN];
330 be32 timestamp;
331 be32 seqnum;
332 __u8 pad0[24];
333 be16 prim_elmnt_count;
334 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
335 __u8 prl;
336 __u8 rlq;
337 __u8 sec_elmnt_count;
338 __u8 sec_elmnt_seq;
339 __u8 srl;
340 be64 blocks; /* blocks per component could be different
341 * on different component devices...(only
342 * for concat I hope) */
343 be64 array_blocks; /* blocks in array */
344 __u8 pad1[8];
345 be32 spare_refs[8];
346 __u8 cache_pol[8];
347 __u8 bg_rate;
348 __u8 pad2[3];
349 __u8 pad3[52];
350 __u8 pad4[192];
351 __u8 v0[32]; /* reserved- 0xff */
352 __u8 v1[32]; /* reserved- 0xff */
353 __u8 v2[16]; /* reserved- 0xff */
354 __u8 v3[16]; /* reserved- 0xff */
355 __u8 vendor[32];
356 be32 phys_refnum[0]; /* refnum of each disk in sequence */
357 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
358 bvd are always the same size */
359 };
360 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
361
362 /* vd_config.cache_pol[7] is a bitmap */
363 #define DDF_cache_writeback 1 /* else writethrough */
364 #define DDF_cache_wadaptive 2 /* only applies if writeback */
365 #define DDF_cache_readahead 4
366 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
367 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
368 #define DDF_cache_wallowed 32 /* enable write caching */
369 #define DDF_cache_rallowed 64 /* enable read caching */
370
371 struct spare_assign {
372 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
373 be32 crc;
374 be32 timestamp;
375 __u8 reserved[7];
376 __u8 type;
377 be16 populated; /* SAEs used */
378 be16 max; /* max SAEs */
379 __u8 pad[8];
380 struct spare_assign_entry {
381 char guid[DDF_GUID_LEN];
382 be16 secondary_element;
383 __u8 pad[6];
384 } spare_ents[0];
385 };
386 /* spare_assign.type is a bitmap */
387 #define DDF_spare_dedicated 0x1 /* else global */
388 #define DDF_spare_revertible 0x2 /* else committable */
389 #define DDF_spare_active 0x4 /* else not active */
390 #define DDF_spare_affinity 0x8 /* enclosure affinity */
391
392 /* The data_section contents - local scope */
393 struct disk_data {
394 be32 magic; /* DDF_PHYS_DATA_MAGIC */
395 be32 crc;
396 char guid[DDF_GUID_LEN];
397 be32 refnum; /* crc of some magic drive data ... */
398 __u8 forced_ref; /* set when above was not result of magic */
399 __u8 forced_guid; /* set if guid was forced rather than magic */
400 __u8 vendor[32];
401 __u8 pad[442];
402 };
403
404 /* bbm_section content */
405 struct bad_block_log {
406 be32 magic;
407 be32 crc;
408 be16 entry_count;
409 be32 spare_count;
410 __u8 pad[10];
411 be64 first_spare;
412 struct mapped_block {
413 be64 defective_start;
414 be32 replacement_start;
415 be16 remap_count;
416 __u8 pad[2];
417 } entries[0];
418 };
419
420 /* Struct for internally holding ddf structures */
421 /* The DDF structure stored on each device is potentially
422 * quite different, as some data is global and some is local.
423 * The global data is:
424 * - ddf header
425 * - controller_data
426 * - Physical disk records
427 * - Virtual disk records
428 * The local data is:
429 * - Configuration records
430 * - Physical Disk data section
431 * ( and Bad block and vendor which I don't care about yet).
432 *
433 * The local data is parsed into separate lists as it is read
434 * and reconstructed for writing. This means that we only need
435 * to make config changes once and they are automatically
436 * propagated to all devices.
437 * The global (config and disk data) records are each in a list
438 * of separate data structures. When writing we find the entry
439 * or entries applicable to the particular device.
440 */
441 struct ddf_super {
442 struct ddf_header anchor, primary, secondary;
443 struct ddf_controller_data controller;
444 struct ddf_header *active;
445 struct phys_disk *phys;
446 struct virtual_disk *virt;
447 char *conf;
448 int pdsize, vdsize;
449 unsigned int max_part, mppe, conf_rec_len;
450 int currentdev;
451 int updates_pending;
452 struct vcl {
453 union {
454 char space[512];
455 struct {
456 struct vcl *next;
457 unsigned int vcnum; /* index into ->virt */
458 /* For an array with a secondary level there are
459 * multiple vd_config structures, all with the same
460 * guid but with different sec_elmnt_seq.
461 * One of these structures is in 'conf' below.
462 * The others are in other_bvds, not in any
463 * particular order.
464 */
465 struct vd_config **other_bvds;
466 __u64 *block_sizes; /* NULL if all the same */
467 };
468 };
469 struct vd_config conf;
470 } *conflist, *currentconf;
471 struct dl {
472 union {
473 char space[512];
474 struct {
475 struct dl *next;
476 int major, minor;
477 char *devname;
478 int fd;
479 unsigned long long size; /* sectors */
480 be64 primary_lba; /* sectors */
481 be64 secondary_lba; /* sectors */
482 be64 workspace_lba; /* sectors */
483 int pdnum; /* index in ->phys */
484 struct spare_assign *spare;
485 void *mdupdate; /* hold metadata update */
486
487 /* These fields used by auto-layout */
488 int raiddisk; /* slot to fill in autolayout */
489 __u64 esize;
490 };
491 };
492 struct disk_data disk;
493 struct vcl *vlist[0]; /* max_part in size */
494 } *dlist, *add_list;
495 };
496
497 #ifndef MDASSEMBLE
498 static int load_super_ddf_all(struct supertype *st, int fd,
499 void **sbp, char *devname);
500 static int get_svd_state(const struct ddf_super *, const struct vcl *);
501 static int
502 validate_geometry_ddf_container(struct supertype *st,
503 int level, int layout, int raiddisks,
504 int chunk, unsigned long long size,
505 unsigned long long data_offset,
506 char *dev, unsigned long long *freesize,
507 int verbose);
508
509 static int validate_geometry_ddf_bvd(struct supertype *st,
510 int level, int layout, int raiddisks,
511 int *chunk, unsigned long long size,
512 unsigned long long data_offset,
513 char *dev, unsigned long long *freesize,
514 int verbose);
515 #endif
516
517 static void free_super_ddf(struct supertype *st);
518 static int all_ff(const char *guid);
519 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
520 be32 refnum, unsigned int nmax,
521 const struct vd_config **bvd,
522 unsigned int *idx);
523 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
524 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
525 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
526 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
527 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
528 static int init_super_ddf_bvd(struct supertype *st,
529 mdu_array_info_t *info,
530 unsigned long long size,
531 char *name, char *homehost,
532 int *uuid, unsigned long long data_offset);
533
534 #ifndef offsetof
535 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
536 #endif
537
538 #if DEBUG
539 static void pr_state(struct ddf_super *ddf, const char *msg)
540 {
541 unsigned int i;
542 dprintf("%s/%s: ", __func__, msg);
543 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
544 if (all_ff(ddf->virt->entries[i].guid))
545 continue;
546 dprintf("%u(s=%02x i=%02x) ", i,
547 ddf->virt->entries[i].state,
548 ddf->virt->entries[i].init_state);
549 }
550 dprintf("\n");
551 }
552 #else
553 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
554 #endif
555
556 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
557 {
558 if (ddf->updates_pending)
559 return;
560 ddf->updates_pending = 1;
561 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
562 pr_state(ddf, func);
563 }
564
565 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
566
567 static be32 calc_crc(void *buf, int len)
568 {
569 /* crcs are always at the same place as in the ddf_header */
570 struct ddf_header *ddf = buf;
571 be32 oldcrc = ddf->crc;
572 __u32 newcrc;
573 ddf->crc = cpu_to_be32(0xffffffff);
574
575 newcrc = crc32(0, buf, len);
576 ddf->crc = oldcrc;
577 /* The crc is stored (like everything) bigendian, so convert
578 * here for simplicity
579 */
580 return cpu_to_be32(newcrc);
581 }
582
583 #define DDF_INVALID_LEVEL 0xff
584 #define DDF_NO_SECONDARY 0xff
585 static int err_bad_md_layout(const mdu_array_info_t *array)
586 {
587 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
588 array->level, array->layout, array->raid_disks);
589 return -1;
590 }
591
592 static int layout_md2ddf(const mdu_array_info_t *array,
593 struct vd_config *conf)
594 {
595 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
596 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
597 __u8 sec_elmnt_count = 1;
598 __u8 srl = DDF_NO_SECONDARY;
599
600 switch (array->level) {
601 case LEVEL_LINEAR:
602 prl = DDF_CONCAT;
603 break;
604 case 0:
605 rlq = DDF_RAID0_SIMPLE;
606 prl = DDF_RAID0;
607 break;
608 case 1:
609 switch (array->raid_disks) {
610 case 2:
611 rlq = DDF_RAID1_SIMPLE;
612 break;
613 case 3:
614 rlq = DDF_RAID1_MULTI;
615 break;
616 default:
617 return err_bad_md_layout(array);
618 }
619 prl = DDF_RAID1;
620 break;
621 case 4:
622 if (array->layout != 0)
623 return err_bad_md_layout(array);
624 rlq = DDF_RAID4_N;
625 prl = DDF_RAID4;
626 break;
627 case 5:
628 switch (array->layout) {
629 case ALGORITHM_LEFT_ASYMMETRIC:
630 rlq = DDF_RAID5_N_RESTART;
631 break;
632 case ALGORITHM_RIGHT_ASYMMETRIC:
633 rlq = DDF_RAID5_0_RESTART;
634 break;
635 case ALGORITHM_LEFT_SYMMETRIC:
636 rlq = DDF_RAID5_N_CONTINUE;
637 break;
638 case ALGORITHM_RIGHT_SYMMETRIC:
639 /* not mentioned in standard */
640 default:
641 return err_bad_md_layout(array);
642 }
643 prl = DDF_RAID5;
644 break;
645 case 6:
646 switch (array->layout) {
647 case ALGORITHM_ROTATING_N_RESTART:
648 rlq = DDF_RAID5_N_RESTART;
649 break;
650 case ALGORITHM_ROTATING_ZERO_RESTART:
651 rlq = DDF_RAID6_0_RESTART;
652 break;
653 case ALGORITHM_ROTATING_N_CONTINUE:
654 rlq = DDF_RAID5_N_CONTINUE;
655 break;
656 default:
657 return err_bad_md_layout(array);
658 }
659 prl = DDF_RAID6;
660 break;
661 case 10:
662 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
663 rlq = DDF_RAID1_SIMPLE;
664 prim_elmnt_count = cpu_to_be16(2);
665 sec_elmnt_count = array->raid_disks / 2;
666 } else if (array->raid_disks % 3 == 0
667 && array->layout == 0x103) {
668 rlq = DDF_RAID1_MULTI;
669 prim_elmnt_count = cpu_to_be16(3);
670 sec_elmnt_count = array->raid_disks / 3;
671 } else
672 return err_bad_md_layout(array);
673 srl = DDF_2SPANNED;
674 prl = DDF_RAID1;
675 break;
676 default:
677 return err_bad_md_layout(array);
678 }
679 conf->prl = prl;
680 conf->prim_elmnt_count = prim_elmnt_count;
681 conf->rlq = rlq;
682 conf->srl = srl;
683 conf->sec_elmnt_count = sec_elmnt_count;
684 return 0;
685 }
686
687 static int err_bad_ddf_layout(const struct vd_config *conf)
688 {
689 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
690 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
691 return -1;
692 }
693
694 static int layout_ddf2md(const struct vd_config *conf,
695 mdu_array_info_t *array)
696 {
697 int level = LEVEL_UNSUPPORTED;
698 int layout = 0;
699 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
700
701 if (conf->sec_elmnt_count > 1) {
702 /* see also check_secondary() */
703 if (conf->prl != DDF_RAID1 ||
704 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
705 pr_err("Unsupported secondary RAID level %u/%u\n",
706 conf->prl, conf->srl);
707 return -1;
708 }
709 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
710 layout = 0x102;
711 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
712 layout = 0x103;
713 else
714 return err_bad_ddf_layout(conf);
715 raiddisks *= conf->sec_elmnt_count;
716 level = 10;
717 goto good;
718 }
719
720 switch (conf->prl) {
721 case DDF_CONCAT:
722 level = LEVEL_LINEAR;
723 break;
724 case DDF_RAID0:
725 if (conf->rlq != DDF_RAID0_SIMPLE)
726 return err_bad_ddf_layout(conf);
727 level = 0;
728 break;
729 case DDF_RAID1:
730 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
731 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
732 return err_bad_ddf_layout(conf);
733 level = 1;
734 break;
735 case DDF_RAID4:
736 if (conf->rlq != DDF_RAID4_N)
737 return err_bad_ddf_layout(conf);
738 level = 4;
739 break;
740 case DDF_RAID5:
741 switch (conf->rlq) {
742 case DDF_RAID5_N_RESTART:
743 layout = ALGORITHM_LEFT_ASYMMETRIC;
744 break;
745 case DDF_RAID5_0_RESTART:
746 layout = ALGORITHM_RIGHT_ASYMMETRIC;
747 break;
748 case DDF_RAID5_N_CONTINUE:
749 layout = ALGORITHM_LEFT_SYMMETRIC;
750 break;
751 default:
752 return err_bad_ddf_layout(conf);
753 }
754 level = 5;
755 break;
756 case DDF_RAID6:
757 switch (conf->rlq) {
758 case DDF_RAID5_N_RESTART:
759 layout = ALGORITHM_ROTATING_N_RESTART;
760 break;
761 case DDF_RAID6_0_RESTART:
762 layout = ALGORITHM_ROTATING_ZERO_RESTART;
763 break;
764 case DDF_RAID5_N_CONTINUE:
765 layout = ALGORITHM_ROTATING_N_CONTINUE;
766 break;
767 default:
768 return err_bad_ddf_layout(conf);
769 }
770 level = 6;
771 break;
772 default:
773 return err_bad_ddf_layout(conf);
774 };
775
776 good:
777 array->level = level;
778 array->layout = layout;
779 array->raid_disks = raiddisks;
780 return 0;
781 }
782
783 static int load_ddf_header(int fd, unsigned long long lba,
784 unsigned long long size,
785 int type,
786 struct ddf_header *hdr, struct ddf_header *anchor)
787 {
788 /* read a ddf header (primary or secondary) from fd/lba
789 * and check that it is consistent with anchor
790 * Need to check:
791 * magic, crc, guid, rev, and LBA's header_type, and
792 * everything after header_type must be the same
793 */
794 if (lba >= size-1)
795 return 0;
796
797 if (lseek64(fd, lba<<9, 0) < 0)
798 return 0;
799
800 if (read(fd, hdr, 512) != 512)
801 return 0;
802
803 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
804 pr_err("%s: bad header magic\n", __func__);
805 return 0;
806 }
807 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
808 pr_err("%s: bad CRC\n", __func__);
809 return 0;
810 }
811 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
812 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
813 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
814 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
815 hdr->type != type ||
816 memcmp(anchor->pad2, hdr->pad2, 512 -
817 offsetof(struct ddf_header, pad2)) != 0) {
818 pr_err("%s: header mismatch\n", __func__);
819 return 0;
820 }
821
822 /* Looks good enough to me... */
823 return 1;
824 }
825
826 static void *load_section(int fd, struct ddf_super *super, void *buf,
827 be32 offset_be, be32 len_be, int check)
828 {
829 unsigned long long offset = be32_to_cpu(offset_be);
830 unsigned long long len = be32_to_cpu(len_be);
831 int dofree = (buf == NULL);
832
833 if (check)
834 if (len != 2 && len != 8 && len != 32
835 && len != 128 && len != 512)
836 return NULL;
837
838 if (len > 1024)
839 return NULL;
840 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
841 buf = NULL;
842
843 if (!buf)
844 return NULL;
845
846 if (super->active->type == 1)
847 offset += be64_to_cpu(super->active->primary_lba);
848 else
849 offset += be64_to_cpu(super->active->secondary_lba);
850
851 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
852 if (dofree)
853 free(buf);
854 return NULL;
855 }
856 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
857 if (dofree)
858 free(buf);
859 return NULL;
860 }
861 return buf;
862 }
863
864 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
865 {
866 unsigned long long dsize;
867
868 get_dev_size(fd, NULL, &dsize);
869
870 if (lseek64(fd, dsize-512, 0) < 0) {
871 if (devname)
872 pr_err("Cannot seek to anchor block on %s: %s\n",
873 devname, strerror(errno));
874 return 1;
875 }
876 if (read(fd, &super->anchor, 512) != 512) {
877 if (devname)
878 pr_err("Cannot read anchor block on %s: %s\n",
879 devname, strerror(errno));
880 return 1;
881 }
882 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
883 if (devname)
884 pr_err("no DDF anchor found on %s\n",
885 devname);
886 return 2;
887 }
888 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
889 if (devname)
890 pr_err("bad CRC on anchor on %s\n",
891 devname);
892 return 2;
893 }
894 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
895 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
896 if (devname)
897 pr_err("can only support super revision"
898 " %.8s and earlier, not %.8s on %s\n",
899 DDF_REVISION_2, super->anchor.revision,devname);
900 return 2;
901 }
902 super->active = NULL;
903 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
904 dsize >> 9, 1,
905 &super->primary, &super->anchor) == 0) {
906 if (devname)
907 pr_err("Failed to load primary DDF header "
908 "on %s\n", devname);
909 } else
910 super->active = &super->primary;
911
912 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
913 dsize >> 9, 2,
914 &super->secondary, &super->anchor)) {
915 if (super->active == NULL
916 || (be32_to_cpu(super->primary.seq)
917 < be32_to_cpu(super->secondary.seq) &&
918 !super->secondary.openflag)
919 || (be32_to_cpu(super->primary.seq)
920 == be32_to_cpu(super->secondary.seq) &&
921 super->primary.openflag && !super->secondary.openflag)
922 )
923 super->active = &super->secondary;
924 } else if (devname &&
925 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
926 pr_err("Failed to load secondary DDF header on %s\n",
927 devname);
928 if (super->active == NULL)
929 return 2;
930 return 0;
931 }
932
933 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
934 {
935 void *ok;
936 ok = load_section(fd, super, &super->controller,
937 super->active->controller_section_offset,
938 super->active->controller_section_length,
939 0);
940 super->phys = load_section(fd, super, NULL,
941 super->active->phys_section_offset,
942 super->active->phys_section_length,
943 1);
944 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
945
946 super->virt = load_section(fd, super, NULL,
947 super->active->virt_section_offset,
948 super->active->virt_section_length,
949 1);
950 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
951 if (!ok ||
952 !super->phys ||
953 !super->virt) {
954 free(super->phys);
955 free(super->virt);
956 super->phys = NULL;
957 super->virt = NULL;
958 return 2;
959 }
960 super->conflist = NULL;
961 super->dlist = NULL;
962
963 super->max_part = be16_to_cpu(super->active->max_partitions);
964 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
965 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
966 return 0;
967 }
968
969 #define DDF_UNUSED_BVD 0xff
970 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
971 {
972 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
973 unsigned int i, vdsize;
974 void *p;
975 if (n_vds == 0) {
976 vcl->other_bvds = NULL;
977 return 0;
978 }
979 vdsize = ddf->conf_rec_len * 512;
980 if (posix_memalign(&p, 512, n_vds *
981 (vdsize + sizeof(struct vd_config *))) != 0)
982 return -1;
983 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
984 for (i = 0; i < n_vds; i++) {
985 vcl->other_bvds[i] = p + i * vdsize;
986 memset(vcl->other_bvds[i], 0, vdsize);
987 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
988 }
989 return 0;
990 }
991
992 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
993 unsigned int len)
994 {
995 int i;
996 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
997 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
998 break;
999
1000 if (i < vcl->conf.sec_elmnt_count-1) {
1001 if (be32_to_cpu(vd->seqnum) <=
1002 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1003 return;
1004 } else {
1005 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1006 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1007 break;
1008 if (i == vcl->conf.sec_elmnt_count-1) {
1009 pr_err("no space for sec level config %u, count is %u\n",
1010 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1011 return;
1012 }
1013 }
1014 memcpy(vcl->other_bvds[i], vd, len);
1015 }
1016
1017 static int load_ddf_local(int fd, struct ddf_super *super,
1018 char *devname, int keep)
1019 {
1020 struct dl *dl;
1021 struct stat stb;
1022 char *conf;
1023 unsigned int i;
1024 unsigned int confsec;
1025 int vnum;
1026 unsigned int max_virt_disks =
1027 be16_to_cpu(super->active->max_vd_entries);
1028 unsigned long long dsize;
1029
1030 /* First the local disk info */
1031 if (posix_memalign((void**)&dl, 512,
1032 sizeof(*dl) +
1033 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1034 pr_err("%s could not allocate disk info buffer\n",
1035 __func__);
1036 return 1;
1037 }
1038
1039 load_section(fd, super, &dl->disk,
1040 super->active->data_section_offset,
1041 super->active->data_section_length,
1042 0);
1043 dl->devname = devname ? xstrdup(devname) : NULL;
1044
1045 fstat(fd, &stb);
1046 dl->major = major(stb.st_rdev);
1047 dl->minor = minor(stb.st_rdev);
1048 dl->next = super->dlist;
1049 dl->fd = keep ? fd : -1;
1050
1051 dl->size = 0;
1052 if (get_dev_size(fd, devname, &dsize))
1053 dl->size = dsize >> 9;
1054 /* If the disks have different sizes, the LBAs will differ
1055 * between phys disks.
1056 * At this point here, the values in super->active must be valid
1057 * for this phys disk. */
1058 dl->primary_lba = super->active->primary_lba;
1059 dl->secondary_lba = super->active->secondary_lba;
1060 dl->workspace_lba = super->active->workspace_lba;
1061 dl->spare = NULL;
1062 for (i = 0 ; i < super->max_part ; i++)
1063 dl->vlist[i] = NULL;
1064 super->dlist = dl;
1065 dl->pdnum = -1;
1066 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1067 if (memcmp(super->phys->entries[i].guid,
1068 dl->disk.guid, DDF_GUID_LEN) == 0)
1069 dl->pdnum = i;
1070
1071 /* Now the config list. */
1072 /* 'conf' is an array of config entries, some of which are
1073 * probably invalid. Those which are good need to be copied into
1074 * the conflist
1075 */
1076
1077 conf = load_section(fd, super, super->conf,
1078 super->active->config_section_offset,
1079 super->active->config_section_length,
1080 0);
1081 super->conf = conf;
1082 vnum = 0;
1083 for (confsec = 0;
1084 confsec < be32_to_cpu(super->active->config_section_length);
1085 confsec += super->conf_rec_len) {
1086 struct vd_config *vd =
1087 (struct vd_config *)((char*)conf + confsec*512);
1088 struct vcl *vcl;
1089
1090 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1091 if (dl->spare)
1092 continue;
1093 if (posix_memalign((void**)&dl->spare, 512,
1094 super->conf_rec_len*512) != 0) {
1095 pr_err("%s could not allocate spare info buf\n",
1096 __func__);
1097 return 1;
1098 }
1099
1100 memcpy(dl->spare, vd, super->conf_rec_len*512);
1101 continue;
1102 }
1103 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1104 /* Must be vendor-unique - I cannot handle those */
1105 continue;
1106
1107 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1108 if (memcmp(vcl->conf.guid,
1109 vd->guid, DDF_GUID_LEN) == 0)
1110 break;
1111 }
1112
1113 if (vcl) {
1114 dl->vlist[vnum++] = vcl;
1115 if (vcl->other_bvds != NULL &&
1116 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1117 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1118 continue;
1119 }
1120 if (be32_to_cpu(vd->seqnum) <=
1121 be32_to_cpu(vcl->conf.seqnum))
1122 continue;
1123 } else {
1124 if (posix_memalign((void**)&vcl, 512,
1125 (super->conf_rec_len*512 +
1126 offsetof(struct vcl, conf))) != 0) {
1127 pr_err("%s could not allocate vcl buf\n",
1128 __func__);
1129 return 1;
1130 }
1131 vcl->next = super->conflist;
1132 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1133 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1134 if (alloc_other_bvds(super, vcl) != 0) {
1135 pr_err("%s could not allocate other bvds\n",
1136 __func__);
1137 free(vcl);
1138 return 1;
1139 };
1140 super->conflist = vcl;
1141 dl->vlist[vnum++] = vcl;
1142 }
1143 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1144 for (i=0; i < max_virt_disks ; i++)
1145 if (memcmp(super->virt->entries[i].guid,
1146 vcl->conf.guid, DDF_GUID_LEN)==0)
1147 break;
1148 if (i < max_virt_disks)
1149 vcl->vcnum = i;
1150 }
1151
1152 return 0;
1153 }
1154
1155 static int load_super_ddf(struct supertype *st, int fd,
1156 char *devname)
1157 {
1158 unsigned long long dsize;
1159 struct ddf_super *super;
1160 int rv;
1161
1162 if (get_dev_size(fd, devname, &dsize) == 0)
1163 return 1;
1164
1165 if (test_partition(fd))
1166 /* DDF is not allowed on partitions */
1167 return 1;
1168
1169 /* 32M is a lower bound */
1170 if (dsize <= 32*1024*1024) {
1171 if (devname)
1172 pr_err("%s is too small for ddf: "
1173 "size is %llu sectors.\n",
1174 devname, dsize>>9);
1175 return 1;
1176 }
1177 if (dsize & 511) {
1178 if (devname)
1179 pr_err("%s is an odd size for ddf: "
1180 "size is %llu bytes.\n",
1181 devname, dsize);
1182 return 1;
1183 }
1184
1185 free_super_ddf(st);
1186
1187 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1188 pr_err("malloc of %zu failed.\n",
1189 sizeof(*super));
1190 return 1;
1191 }
1192 memset(super, 0, sizeof(*super));
1193
1194 rv = load_ddf_headers(fd, super, devname);
1195 if (rv) {
1196 free(super);
1197 return rv;
1198 }
1199
1200 /* Have valid headers and have chosen the best. Let's read in the rest*/
1201
1202 rv = load_ddf_global(fd, super, devname);
1203
1204 if (rv) {
1205 if (devname)
1206 pr_err("Failed to load all information "
1207 "sections on %s\n", devname);
1208 free(super);
1209 return rv;
1210 }
1211
1212 rv = load_ddf_local(fd, super, devname, 0);
1213
1214 if (rv) {
1215 if (devname)
1216 pr_err("Failed to load all information "
1217 "sections on %s\n", devname);
1218 free(super);
1219 return rv;
1220 }
1221
1222 /* Should possibly check the sections .... */
1223
1224 st->sb = super;
1225 if (st->ss == NULL) {
1226 st->ss = &super_ddf;
1227 st->minor_version = 0;
1228 st->max_devs = 512;
1229 }
1230 return 0;
1231
1232 }
1233
1234 static void free_super_ddf(struct supertype *st)
1235 {
1236 struct ddf_super *ddf = st->sb;
1237 if (ddf == NULL)
1238 return;
1239 free(ddf->phys);
1240 free(ddf->virt);
1241 free(ddf->conf);
1242 while (ddf->conflist) {
1243 struct vcl *v = ddf->conflist;
1244 ddf->conflist = v->next;
1245 if (v->block_sizes)
1246 free(v->block_sizes);
1247 if (v->other_bvds)
1248 /*
1249 v->other_bvds[0] points to beginning of buffer,
1250 see alloc_other_bvds()
1251 */
1252 free(v->other_bvds[0]);
1253 free(v);
1254 }
1255 while (ddf->dlist) {
1256 struct dl *d = ddf->dlist;
1257 ddf->dlist = d->next;
1258 if (d->fd >= 0)
1259 close(d->fd);
1260 if (d->spare)
1261 free(d->spare);
1262 free(d);
1263 }
1264 while (ddf->add_list) {
1265 struct dl *d = ddf->add_list;
1266 ddf->add_list = d->next;
1267 if (d->fd >= 0)
1268 close(d->fd);
1269 if (d->spare)
1270 free(d->spare);
1271 free(d);
1272 }
1273 free(ddf);
1274 st->sb = NULL;
1275 }
1276
1277 static struct supertype *match_metadata_desc_ddf(char *arg)
1278 {
1279 /* 'ddf' only supports containers */
1280 struct supertype *st;
1281 if (strcmp(arg, "ddf") != 0 &&
1282 strcmp(arg, "default") != 0
1283 )
1284 return NULL;
1285
1286 st = xcalloc(1, sizeof(*st));
1287 st->ss = &super_ddf;
1288 st->max_devs = 512;
1289 st->minor_version = 0;
1290 st->sb = NULL;
1291 return st;
1292 }
1293
1294 #ifndef MDASSEMBLE
1295
1296 static mapping_t ddf_state[] = {
1297 { "Optimal", 0},
1298 { "Degraded", 1},
1299 { "Deleted", 2},
1300 { "Missing", 3},
1301 { "Failed", 4},
1302 { "Partially Optimal", 5},
1303 { "-reserved-", 6},
1304 { "-reserved-", 7},
1305 { NULL, 0}
1306 };
1307
1308 static mapping_t ddf_init_state[] = {
1309 { "Not Initialised", 0},
1310 { "QuickInit in Progress", 1},
1311 { "Fully Initialised", 2},
1312 { "*UNKNOWN*", 3},
1313 { NULL, 0}
1314 };
1315 static mapping_t ddf_access[] = {
1316 { "Read/Write", 0},
1317 { "Reserved", 1},
1318 { "Read Only", 2},
1319 { "Blocked (no access)", 3},
1320 { NULL ,0}
1321 };
1322
1323 static mapping_t ddf_level[] = {
1324 { "RAID0", DDF_RAID0},
1325 { "RAID1", DDF_RAID1},
1326 { "RAID3", DDF_RAID3},
1327 { "RAID4", DDF_RAID4},
1328 { "RAID5", DDF_RAID5},
1329 { "RAID1E",DDF_RAID1E},
1330 { "JBOD", DDF_JBOD},
1331 { "CONCAT",DDF_CONCAT},
1332 { "RAID5E",DDF_RAID5E},
1333 { "RAID5EE",DDF_RAID5EE},
1334 { "RAID6", DDF_RAID6},
1335 { NULL, 0}
1336 };
1337 static mapping_t ddf_sec_level[] = {
1338 { "Striped", DDF_2STRIPED},
1339 { "Mirrored", DDF_2MIRRORED},
1340 { "Concat", DDF_2CONCAT},
1341 { "Spanned", DDF_2SPANNED},
1342 { NULL, 0}
1343 };
1344 #endif
1345
1346 static int all_ff(const char *guid)
1347 {
1348 int i;
1349 for (i = 0; i < DDF_GUID_LEN; i++)
1350 if (guid[i] != (char)0xff)
1351 return 0;
1352 return 1;
1353 }
1354
1355 static const char *guid_str(const char *guid)
1356 {
1357 static char buf[DDF_GUID_LEN*2+1];
1358 int i;
1359 char *p = buf;
1360 for (i = 0; i < DDF_GUID_LEN; i++) {
1361 unsigned char c = guid[i];
1362 if (c >= 32 && c < 127)
1363 p += sprintf(p, "%c", c);
1364 else
1365 p += sprintf(p, "%02x", c);
1366 }
1367 *p = '\0';
1368 return (const char *) buf;
1369 }
1370
1371 #ifndef MDASSEMBLE
1372 static void print_guid(char *guid, int tstamp)
1373 {
1374 /* A GUIDs are part (or all) ASCII and part binary.
1375 * They tend to be space padded.
1376 * We print the GUID in HEX, then in parentheses add
1377 * any initial ASCII sequence, and a possible
1378 * time stamp from bytes 16-19
1379 */
1380 int l = DDF_GUID_LEN;
1381 int i;
1382
1383 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1384 if ((i&3)==0 && i != 0) printf(":");
1385 printf("%02X", guid[i]&255);
1386 }
1387
1388 printf("\n (");
1389 while (l && guid[l-1] == ' ')
1390 l--;
1391 for (i=0 ; i<l ; i++) {
1392 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1393 fputc(guid[i], stdout);
1394 else
1395 break;
1396 }
1397 if (tstamp) {
1398 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1399 char tbuf[100];
1400 struct tm *tm;
1401 tm = localtime(&then);
1402 strftime(tbuf, 100, " %D %T",tm);
1403 fputs(tbuf, stdout);
1404 }
1405 printf(")");
1406 }
1407
1408 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1409 {
1410 int crl = sb->conf_rec_len;
1411 struct vcl *vcl;
1412
1413 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1414 unsigned int i;
1415 struct vd_config *vc = &vcl->conf;
1416
1417 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1418 continue;
1419 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1420 continue;
1421
1422 /* Ok, we know about this VD, let's give more details */
1423 printf(" Raid Devices[%d] : %d (", n,
1424 be16_to_cpu(vc->prim_elmnt_count));
1425 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1426 int j;
1427 int cnt = be16_to_cpu(sb->phys->used_pdes);
1428 for (j=0; j<cnt; j++)
1429 if (be32_eq(vc->phys_refnum[i],
1430 sb->phys->entries[j].refnum))
1431 break;
1432 if (i) printf(" ");
1433 if (j < cnt)
1434 printf("%d", j);
1435 else
1436 printf("--");
1437 }
1438 printf(")\n");
1439 if (vc->chunk_shift != 255)
1440 printf(" Chunk Size[%d] : %d sectors\n", n,
1441 1 << vc->chunk_shift);
1442 printf(" Raid Level[%d] : %s\n", n,
1443 map_num(ddf_level, vc->prl)?:"-unknown-");
1444 if (vc->sec_elmnt_count != 1) {
1445 printf(" Secondary Position[%d] : %d of %d\n", n,
1446 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1447 printf(" Secondary Level[%d] : %s\n", n,
1448 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1449 }
1450 printf(" Device Size[%d] : %llu\n", n,
1451 be64_to_cpu(vc->blocks)/2);
1452 printf(" Array Size[%d] : %llu\n", n,
1453 be64_to_cpu(vc->array_blocks)/2);
1454 }
1455 }
1456
1457 static void examine_vds(struct ddf_super *sb)
1458 {
1459 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1460 unsigned int i;
1461 printf(" Virtual Disks : %d\n", cnt);
1462
1463 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1464 struct virtual_entry *ve = &sb->virt->entries[i];
1465 if (all_ff(ve->guid))
1466 continue;
1467 printf("\n");
1468 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1469 printf("\n");
1470 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1471 printf(" state[%d] : %s, %s%s\n", i,
1472 map_num(ddf_state, ve->state & 7),
1473 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1474 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1475 printf(" init state[%d] : %s\n", i,
1476 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1477 printf(" access[%d] : %s\n", i,
1478 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1479 printf(" Name[%d] : %.16s\n", i, ve->name);
1480 examine_vd(i, sb, ve->guid);
1481 }
1482 if (cnt) printf("\n");
1483 }
1484
1485 static void examine_pds(struct ddf_super *sb)
1486 {
1487 int cnt = be16_to_cpu(sb->phys->used_pdes);
1488 int i;
1489 struct dl *dl;
1490 printf(" Physical Disks : %d\n", cnt);
1491 printf(" Number RefNo Size Device Type/State\n");
1492
1493 for (i=0 ; i<cnt ; i++) {
1494 struct phys_disk_entry *pd = &sb->phys->entries[i];
1495 int type = be16_to_cpu(pd->type);
1496 int state = be16_to_cpu(pd->state);
1497
1498 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1499 //printf("\n");
1500 printf(" %3d %08x ", i,
1501 be32_to_cpu(pd->refnum));
1502 printf("%8lluK ",
1503 be64_to_cpu(pd->config_size)>>1);
1504 for (dl = sb->dlist; dl ; dl = dl->next) {
1505 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1506 char *dv = map_dev(dl->major, dl->minor, 0);
1507 if (dv) {
1508 printf("%-15s", dv);
1509 break;
1510 }
1511 }
1512 }
1513 if (!dl)
1514 printf("%15s","");
1515 printf(" %s%s%s%s%s",
1516 (type&2) ? "active":"",
1517 (type&4) ? "Global-Spare":"",
1518 (type&8) ? "spare" : "",
1519 (type&16)? ", foreign" : "",
1520 (type&32)? "pass-through" : "");
1521 if (state & DDF_Failed)
1522 /* This over-rides these three */
1523 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1524 printf("/%s%s%s%s%s%s%s",
1525 (state&1)? "Online": "Offline",
1526 (state&2)? ", Failed": "",
1527 (state&4)? ", Rebuilding": "",
1528 (state&8)? ", in-transition": "",
1529 (state&16)? ", SMART-errors": "",
1530 (state&32)? ", Unrecovered-Read-Errors": "",
1531 (state&64)? ", Missing" : "");
1532 printf("\n");
1533 }
1534 }
1535
1536 static void examine_super_ddf(struct supertype *st, char *homehost)
1537 {
1538 struct ddf_super *sb = st->sb;
1539
1540 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1541 printf(" Version : %.8s\n", sb->anchor.revision);
1542 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1543 printf("\n");
1544 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1545 printf("\n");
1546 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1547 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1548 DDF_HEADER_MAGIC)
1549 ?"yes" : "no"));
1550 examine_vds(sb);
1551 examine_pds(sb);
1552 }
1553
1554 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1555 {
1556 /*
1557 * Figure out the VD number for this supertype.
1558 * Returns DDF_CONTAINER for the container itself,
1559 * and DDF_NOTFOUND on error.
1560 */
1561 struct ddf_super *ddf = st->sb;
1562 struct mdinfo *sra;
1563 char *sub, *end;
1564 unsigned int vcnum;
1565
1566 if (*st->container_devnm == '\0')
1567 return DDF_CONTAINER;
1568
1569 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1570 if (!sra || sra->array.major_version != -1 ||
1571 sra->array.minor_version != -2 ||
1572 !is_subarray(sra->text_version))
1573 return DDF_NOTFOUND;
1574
1575 sub = strchr(sra->text_version + 1, '/');
1576 if (sub != NULL)
1577 vcnum = strtoul(sub + 1, &end, 10);
1578 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1579 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1580 return DDF_NOTFOUND;
1581
1582 return vcnum;
1583 }
1584
1585 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1586 {
1587 /* We just write a generic DDF ARRAY entry
1588 */
1589 struct mdinfo info;
1590 char nbuf[64];
1591 getinfo_super_ddf(st, &info, NULL);
1592 fname_from_uuid(st, &info, nbuf, ':');
1593
1594 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1595 }
1596
1597 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1598 {
1599 /* We write a DDF ARRAY member entry for each vd, identifying container
1600 * by uuid and member by unit number and uuid.
1601 */
1602 struct ddf_super *ddf = st->sb;
1603 struct mdinfo info;
1604 unsigned int i;
1605 char nbuf[64];
1606 getinfo_super_ddf(st, &info, NULL);
1607 fname_from_uuid(st, &info, nbuf, ':');
1608
1609 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1610 struct virtual_entry *ve = &ddf->virt->entries[i];
1611 struct vcl vcl;
1612 char nbuf1[64];
1613 char namebuf[17];
1614 if (all_ff(ve->guid))
1615 continue;
1616 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1617 ddf->currentconf =&vcl;
1618 vcl.vcnum = i;
1619 uuid_from_super_ddf(st, info.uuid);
1620 fname_from_uuid(st, &info, nbuf1, ':');
1621 _ddf_array_name(namebuf, ddf, i);
1622 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1623 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1624 nbuf+5, i, nbuf1+5);
1625 }
1626 }
1627
1628 static void export_examine_super_ddf(struct supertype *st)
1629 {
1630 struct mdinfo info;
1631 char nbuf[64];
1632 getinfo_super_ddf(st, &info, NULL);
1633 fname_from_uuid(st, &info, nbuf, ':');
1634 printf("MD_METADATA=ddf\n");
1635 printf("MD_LEVEL=container\n");
1636 printf("MD_UUID=%s\n", nbuf+5);
1637 printf("MD_DEVICES=%u\n",
1638 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1639 }
1640
1641 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1642 {
1643 void *buf;
1644 unsigned long long dsize, offset;
1645 int bytes;
1646 struct ddf_header *ddf;
1647 int written = 0;
1648
1649 /* The meta consists of an anchor, a primary, and a secondary.
1650 * This all lives at the end of the device.
1651 * So it is easiest to find the earliest of primary and
1652 * secondary, and copy everything from there.
1653 *
1654 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1655 * we choose one of those
1656 */
1657
1658 if (posix_memalign(&buf, 4096, 4096) != 0)
1659 return 1;
1660
1661 if (!get_dev_size(from, NULL, &dsize))
1662 goto err;
1663
1664 if (lseek64(from, dsize-512, 0) < 0)
1665 goto err;
1666 if (read(from, buf, 512) != 512)
1667 goto err;
1668 ddf = buf;
1669 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1670 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1671 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1672 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1673 goto err;
1674
1675 offset = dsize - 512;
1676 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1677 offset = be64_to_cpu(ddf->primary_lba) << 9;
1678 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1679 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1680
1681 bytes = dsize - offset;
1682
1683 if (lseek64(from, offset, 0) < 0 ||
1684 lseek64(to, offset, 0) < 0)
1685 goto err;
1686 while (written < bytes) {
1687 int n = bytes - written;
1688 if (n > 4096)
1689 n = 4096;
1690 if (read(from, buf, n) != n)
1691 goto err;
1692 if (write(to, buf, n) != n)
1693 goto err;
1694 written += n;
1695 }
1696 free(buf);
1697 return 0;
1698 err:
1699 free(buf);
1700 return 1;
1701 }
1702
1703 static void detail_super_ddf(struct supertype *st, char *homehost)
1704 {
1705 /* FIXME later
1706 * Could print DDF GUID
1707 * Need to find which array
1708 * If whole, briefly list all arrays
1709 * If one, give name
1710 */
1711 }
1712
1713 static const char *vendors_with_variable_volume_UUID[] = {
1714 "LSI ",
1715 };
1716
1717 static int volume_id_is_reliable(const struct ddf_super *ddf)
1718 {
1719 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1720 int i;
1721 for (i = 0; i < n; i++)
1722 if (!memcmp(ddf->controller.guid,
1723 vendors_with_variable_volume_UUID[i], 8))
1724 return 0;
1725 return 1;
1726 }
1727
1728 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1729 unsigned int vcnum, int uuid[4])
1730 {
1731 char buf[DDF_GUID_LEN+18], sha[20], *p;
1732 struct sha1_ctx ctx;
1733 if (volume_id_is_reliable(ddf)) {
1734 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1735 return;
1736 }
1737 /*
1738 * Some fake RAID BIOSes (in particular, LSI ones) change the
1739 * VD GUID at every boot. These GUIDs are not suitable for
1740 * identifying an array. Luckily the header GUID appears to
1741 * remain constant.
1742 * We construct a pseudo-UUID from the header GUID and those
1743 * properties of the subarray that we expect to remain constant.
1744 */
1745 memset(buf, 0, sizeof(buf));
1746 p = buf;
1747 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1748 p += DDF_GUID_LEN;
1749 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1750 p += 16;
1751 *((__u16 *) p) = vcnum;
1752 sha1_init_ctx(&ctx);
1753 sha1_process_bytes(buf, sizeof(buf), &ctx);
1754 sha1_finish_ctx(&ctx, sha);
1755 memcpy(uuid, sha, 4*4);
1756 }
1757
1758 static void brief_detail_super_ddf(struct supertype *st)
1759 {
1760 struct mdinfo info;
1761 char nbuf[64];
1762 struct ddf_super *ddf = st->sb;
1763 unsigned int vcnum = get_vd_num_of_subarray(st);
1764 if (vcnum == DDF_CONTAINER)
1765 uuid_from_super_ddf(st, info.uuid);
1766 else if (vcnum == DDF_NOTFOUND)
1767 return;
1768 else
1769 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1770 fname_from_uuid(st, &info, nbuf,':');
1771 printf(" UUID=%s", nbuf + 5);
1772 }
1773 #endif
1774
1775 static int match_home_ddf(struct supertype *st, char *homehost)
1776 {
1777 /* It matches 'this' host if the controller is a
1778 * Linux-MD controller with vendor_data matching
1779 * the hostname. It would be nice if we could
1780 * test against controller found in /sys or somewhere...
1781 */
1782 struct ddf_super *ddf = st->sb;
1783 unsigned int len;
1784
1785 if (!homehost)
1786 return 0;
1787 len = strlen(homehost);
1788
1789 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1790 len < sizeof(ddf->controller.vendor_data) &&
1791 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1792 ddf->controller.vendor_data[len] == 0);
1793 }
1794
1795 #ifndef MDASSEMBLE
1796 static int find_index_in_bvd(const struct ddf_super *ddf,
1797 const struct vd_config *conf, unsigned int n,
1798 unsigned int *n_bvd)
1799 {
1800 /*
1801 * Find the index of the n-th valid physical disk in this BVD.
1802 * Unused entries can be sprinkled in with the used entries,
1803 * but don't count.
1804 */
1805 unsigned int i, j;
1806 for (i = 0, j = 0;
1807 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1808 i++) {
1809 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1810 if (n == j) {
1811 *n_bvd = i;
1812 return 1;
1813 }
1814 j++;
1815 }
1816 }
1817 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1818 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1819 return 0;
1820 }
1821
1822 /* Given a member array instance number, and a raid disk within that instance,
1823 * find the vd_config structure. The offset of the given disk in the phys_refnum
1824 * table is returned in n_bvd.
1825 * For two-level members with a secondary raid level the vd_config for
1826 * the appropriate BVD is returned.
1827 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1828 */
1829 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1830 unsigned int n,
1831 unsigned int *n_bvd, struct vcl **vcl)
1832 {
1833 struct vcl *v;
1834
1835 for (v = ddf->conflist; v; v = v->next) {
1836 unsigned int nsec, ibvd = 0;
1837 struct vd_config *conf;
1838 if (inst != v->vcnum)
1839 continue;
1840 conf = &v->conf;
1841 if (conf->sec_elmnt_count == 1) {
1842 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1843 *vcl = v;
1844 return conf;
1845 } else
1846 goto bad;
1847 }
1848 if (v->other_bvds == NULL) {
1849 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1850 __func__, conf->sec_elmnt_count);
1851 goto bad;
1852 }
1853 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1854 if (conf->sec_elmnt_seq != nsec) {
1855 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1856 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1857 == nsec)
1858 break;
1859 }
1860 if (ibvd == conf->sec_elmnt_count)
1861 goto bad;
1862 conf = v->other_bvds[ibvd-1];
1863 }
1864 if (!find_index_in_bvd(ddf, conf,
1865 n - nsec*conf->sec_elmnt_count, n_bvd))
1866 goto bad;
1867 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1868 , __func__, n, *n_bvd, ibvd, inst);
1869 *vcl = v;
1870 return conf;
1871 }
1872 bad:
1873 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1874 return NULL;
1875 }
1876 #endif
1877
1878 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1879 {
1880 /* Find the entry in phys_disk which has the given refnum
1881 * and return it's index
1882 */
1883 unsigned int i;
1884 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1885 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1886 return i;
1887 return -1;
1888 }
1889
1890 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1891 {
1892 char buf[20];
1893 struct sha1_ctx ctx;
1894 sha1_init_ctx(&ctx);
1895 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1896 sha1_finish_ctx(&ctx, buf);
1897 memcpy(uuid, buf, 4*4);
1898 }
1899
1900 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1901 {
1902 /* The uuid returned here is used for:
1903 * uuid to put into bitmap file (Create, Grow)
1904 * uuid for backup header when saving critical section (Grow)
1905 * comparing uuids when re-adding a device into an array
1906 * In these cases the uuid required is that of the data-array,
1907 * not the device-set.
1908 * uuid to recognise same set when adding a missing device back
1909 * to an array. This is a uuid for the device-set.
1910 *
1911 * For each of these we can make do with a truncated
1912 * or hashed uuid rather than the original, as long as
1913 * everyone agrees.
1914 * In the case of SVD we assume the BVD is of interest,
1915 * though that might be the case if a bitmap were made for
1916 * a mirrored SVD - worry about that later.
1917 * So we need to find the VD configuration record for the
1918 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1919 * The first 16 bytes of the sha1 of these is used.
1920 */
1921 struct ddf_super *ddf = st->sb;
1922 struct vcl *vcl = ddf->currentconf;
1923
1924 if (vcl)
1925 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1926 else
1927 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1928 }
1929
1930 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1931 {
1932 struct ddf_super *ddf = st->sb;
1933 int map_disks = info->array.raid_disks;
1934 __u32 *cptr;
1935
1936 if (ddf->currentconf) {
1937 getinfo_super_ddf_bvd(st, info, map);
1938 return;
1939 }
1940 memset(info, 0, sizeof(*info));
1941
1942 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1943 info->array.level = LEVEL_CONTAINER;
1944 info->array.layout = 0;
1945 info->array.md_minor = -1;
1946 cptr = (__u32 *)(ddf->anchor.guid + 16);
1947 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1948
1949 info->array.utime = 0;
1950 info->array.chunk_size = 0;
1951 info->container_enough = 1;
1952
1953 info->disk.major = 0;
1954 info->disk.minor = 0;
1955 if (ddf->dlist) {
1956 struct phys_disk_entry *pde = NULL;
1957 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1958 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1959
1960 info->data_offset = be64_to_cpu(ddf->phys->
1961 entries[info->disk.raid_disk].
1962 config_size);
1963 info->component_size = ddf->dlist->size - info->data_offset;
1964 if (info->disk.raid_disk >= 0)
1965 pde = ddf->phys->entries + info->disk.raid_disk;
1966 if (pde &&
1967 !(be16_to_cpu(pde->state) & DDF_Failed))
1968 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1969 else
1970 info->disk.state = 1 << MD_DISK_FAULTY;
1971
1972 info->events = be32_to_cpu(ddf->active->seq);
1973 } else {
1974 info->disk.number = -1;
1975 info->disk.raid_disk = -1;
1976 // info->disk.raid_disk = find refnum in the table and use index;
1977 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1978 }
1979
1980 info->recovery_start = MaxSector;
1981 info->reshape_active = 0;
1982 info->recovery_blocked = 0;
1983 info->name[0] = 0;
1984
1985 info->array.major_version = -1;
1986 info->array.minor_version = -2;
1987 strcpy(info->text_version, "ddf");
1988 info->safe_mode_delay = 0;
1989
1990 uuid_from_super_ddf(st, info->uuid);
1991
1992 if (map) {
1993 int i;
1994 for (i = 0 ; i < map_disks; i++) {
1995 if (i < info->array.raid_disks &&
1996 !(be16_to_cpu(ddf->phys->entries[i].state)
1997 & DDF_Failed))
1998 map[i] = 1;
1999 else
2000 map[i] = 0;
2001 }
2002 }
2003 }
2004
2005 /* size of name must be at least 17 bytes! */
2006 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2007 {
2008 int j;
2009 memcpy(name, ddf->virt->entries[i].name, 16);
2010 name[16] = 0;
2011 for(j = 0; j < 16; j++)
2012 if (name[j] == ' ')
2013 name[j] = 0;
2014 }
2015
2016 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2017 {
2018 struct ddf_super *ddf = st->sb;
2019 struct vcl *vc = ddf->currentconf;
2020 int cd = ddf->currentdev;
2021 int n_prim;
2022 int j;
2023 struct dl *dl;
2024 int map_disks = info->array.raid_disks;
2025 __u32 *cptr;
2026 struct vd_config *conf;
2027
2028 memset(info, 0, sizeof(*info));
2029 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2030 return;
2031 info->array.md_minor = -1;
2032 cptr = (__u32 *)(vc->conf.guid + 16);
2033 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2034 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2035 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2036 info->custom_array_size = 0;
2037
2038 conf = &vc->conf;
2039 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2040 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2041 int ibvd = cd / n_prim - 1;
2042 cd %= n_prim;
2043 conf = vc->other_bvds[ibvd];
2044 }
2045
2046 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2047 info->data_offset =
2048 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2049 if (vc->block_sizes)
2050 info->component_size = vc->block_sizes[cd];
2051 else
2052 info->component_size = be64_to_cpu(conf->blocks);
2053 }
2054
2055 for (dl = ddf->dlist; dl ; dl = dl->next)
2056 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2057 break;
2058
2059 info->disk.major = 0;
2060 info->disk.minor = 0;
2061 info->disk.state = 0;
2062 if (dl) {
2063 info->disk.major = dl->major;
2064 info->disk.minor = dl->minor;
2065 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2066 * be16_to_cpu(conf->prim_elmnt_count);
2067 info->disk.number = dl->pdnum;
2068 info->disk.state = 0;
2069 if (info->disk.number >= 0 &&
2070 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2071 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2072 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2073 info->events = be32_to_cpu(ddf->active->seq);
2074 }
2075
2076 info->container_member = ddf->currentconf->vcnum;
2077
2078 info->recovery_start = MaxSector;
2079 info->resync_start = 0;
2080 info->reshape_active = 0;
2081 info->recovery_blocked = 0;
2082 if (!(ddf->virt->entries[info->container_member].state
2083 & DDF_state_inconsistent) &&
2084 (ddf->virt->entries[info->container_member].init_state
2085 & DDF_initstate_mask)
2086 == DDF_init_full)
2087 info->resync_start = MaxSector;
2088
2089 uuid_from_super_ddf(st, info->uuid);
2090
2091 info->array.major_version = -1;
2092 info->array.minor_version = -2;
2093 sprintf(info->text_version, "/%s/%d",
2094 st->container_devnm,
2095 info->container_member);
2096 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2097
2098 _ddf_array_name(info->name, ddf, info->container_member);
2099
2100 if (map)
2101 for (j = 0; j < map_disks; j++) {
2102 map[j] = 0;
2103 if (j < info->array.raid_disks) {
2104 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2105 if (i >= 0 &&
2106 (be16_to_cpu(ddf->phys->entries[i].state)
2107 & DDF_Online) &&
2108 !(be16_to_cpu(ddf->phys->entries[i].state)
2109 & DDF_Failed))
2110 map[i] = 1;
2111 }
2112 }
2113 }
2114
2115 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2116 char *update,
2117 char *devname, int verbose,
2118 int uuid_set, char *homehost)
2119 {
2120 /* For 'assemble' and 'force' we need to return non-zero if any
2121 * change was made. For others, the return value is ignored.
2122 * Update options are:
2123 * force-one : This device looks a bit old but needs to be included,
2124 * update age info appropriately.
2125 * assemble: clear any 'faulty' flag to allow this device to
2126 * be assembled.
2127 * force-array: Array is degraded but being forced, mark it clean
2128 * if that will be needed to assemble it.
2129 *
2130 * newdev: not used ????
2131 * grow: Array has gained a new device - this is currently for
2132 * linear only
2133 * resync: mark as dirty so a resync will happen.
2134 * uuid: Change the uuid of the array to match what is given
2135 * homehost: update the recorded homehost
2136 * name: update the name - preserving the homehost
2137 * _reshape_progress: record new reshape_progress position.
2138 *
2139 * Following are not relevant for this version:
2140 * sparc2.2 : update from old dodgey metadata
2141 * super-minor: change the preferred_minor number
2142 * summaries: update redundant counters.
2143 */
2144 int rv = 0;
2145 // struct ddf_super *ddf = st->sb;
2146 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2147 // struct virtual_entry *ve = find_ve(ddf);
2148
2149 /* we don't need to handle "force-*" or "assemble" as
2150 * there is no need to 'trick' the kernel. When the metadata is
2151 * first updated to activate the array, all the implied modifications
2152 * will just happen.
2153 */
2154
2155 if (strcmp(update, "grow") == 0) {
2156 /* FIXME */
2157 } else if (strcmp(update, "resync") == 0) {
2158 // info->resync_checkpoint = 0;
2159 } else if (strcmp(update, "homehost") == 0) {
2160 /* homehost is stored in controller->vendor_data,
2161 * or it is when we are the vendor
2162 */
2163 // if (info->vendor_is_local)
2164 // strcpy(ddf->controller.vendor_data, homehost);
2165 rv = -1;
2166 } else if (strcmp(update, "name") == 0) {
2167 /* name is stored in virtual_entry->name */
2168 // memset(ve->name, ' ', 16);
2169 // strncpy(ve->name, info->name, 16);
2170 rv = -1;
2171 } else if (strcmp(update, "_reshape_progress") == 0) {
2172 /* We don't support reshape yet */
2173 } else if (strcmp(update, "assemble") == 0 ) {
2174 /* Do nothing, just succeed */
2175 rv = 0;
2176 } else
2177 rv = -1;
2178
2179 // update_all_csum(ddf);
2180
2181 return rv;
2182 }
2183
2184 static void make_header_guid(char *guid)
2185 {
2186 be32 stamp;
2187 /* Create a DDF Header of Virtual Disk GUID */
2188
2189 /* 24 bytes of fiction required.
2190 * first 8 are a 'vendor-id' - "Linux-MD"
2191 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2192 * Remaining 8 random number plus timestamp
2193 */
2194 memcpy(guid, T10, sizeof(T10));
2195 stamp = cpu_to_be32(0xdeadbeef);
2196 memcpy(guid+8, &stamp, 4);
2197 stamp = cpu_to_be32(0);
2198 memcpy(guid+12, &stamp, 4);
2199 stamp = cpu_to_be32(time(0) - DECADE);
2200 memcpy(guid+16, &stamp, 4);
2201 stamp._v32 = random32();
2202 memcpy(guid+20, &stamp, 4);
2203 }
2204
2205 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2206 {
2207 unsigned int i;
2208 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2209 if (all_ff(ddf->virt->entries[i].guid))
2210 return i;
2211 }
2212 return DDF_NOTFOUND;
2213 }
2214
2215 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2216 const char *name)
2217 {
2218 unsigned int i;
2219 if (name == NULL)
2220 return DDF_NOTFOUND;
2221 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2222 if (all_ff(ddf->virt->entries[i].guid))
2223 continue;
2224 if (!strncmp(name, ddf->virt->entries[i].name,
2225 sizeof(ddf->virt->entries[i].name)))
2226 return i;
2227 }
2228 return DDF_NOTFOUND;
2229 }
2230
2231 #ifndef MDASSEMBLE
2232 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2233 const char *guid)
2234 {
2235 unsigned int i;
2236 if (guid == NULL || all_ff(guid))
2237 return DDF_NOTFOUND;
2238 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2239 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2240 return i;
2241 return DDF_NOTFOUND;
2242 }
2243 #endif
2244
2245 static int init_super_ddf(struct supertype *st,
2246 mdu_array_info_t *info,
2247 unsigned long long size, char *name, char *homehost,
2248 int *uuid, unsigned long long data_offset)
2249 {
2250 /* This is primarily called by Create when creating a new array.
2251 * We will then get add_to_super called for each component, and then
2252 * write_init_super called to write it out to each device.
2253 * For DDF, Create can create on fresh devices or on a pre-existing
2254 * array.
2255 * To create on a pre-existing array a different method will be called.
2256 * This one is just for fresh drives.
2257 *
2258 * We need to create the entire 'ddf' structure which includes:
2259 * DDF headers - these are easy.
2260 * Controller data - a Sector describing this controller .. not that
2261 * this is a controller exactly.
2262 * Physical Disk Record - one entry per device, so
2263 * leave plenty of space.
2264 * Virtual Disk Records - again, just leave plenty of space.
2265 * This just lists VDs, doesn't give details.
2266 * Config records - describe the VDs that use this disk
2267 * DiskData - describes 'this' device.
2268 * BadBlockManagement - empty
2269 * Diag Space - empty
2270 * Vendor Logs - Could we put bitmaps here?
2271 *
2272 */
2273 struct ddf_super *ddf;
2274 char hostname[17];
2275 int hostlen;
2276 int max_phys_disks, max_virt_disks;
2277 unsigned long long sector;
2278 int clen;
2279 int i;
2280 int pdsize, vdsize;
2281 struct phys_disk *pd;
2282 struct virtual_disk *vd;
2283
2284 if (data_offset != INVALID_SECTORS) {
2285 pr_err("data-offset not supported by DDF\n");
2286 return 0;
2287 }
2288
2289 if (st->sb)
2290 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2291 data_offset);
2292
2293 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2294 pr_err("%s could not allocate superblock\n", __func__);
2295 return 0;
2296 }
2297 memset(ddf, 0, sizeof(*ddf));
2298 st->sb = ddf;
2299
2300 if (info == NULL) {
2301 /* zeroing superblock */
2302 return 0;
2303 }
2304
2305 /* At least 32MB *must* be reserved for the ddf. So let's just
2306 * start 32MB from the end, and put the primary header there.
2307 * Don't do secondary for now.
2308 * We don't know exactly where that will be yet as it could be
2309 * different on each device. So just set up the lengths.
2310 */
2311
2312 ddf->anchor.magic = DDF_HEADER_MAGIC;
2313 make_header_guid(ddf->anchor.guid);
2314
2315 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2316 ddf->anchor.seq = cpu_to_be32(1);
2317 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2318 ddf->anchor.openflag = 0xFF;
2319 ddf->anchor.foreignflag = 0;
2320 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2321 ddf->anchor.pad0 = 0xff;
2322 memset(ddf->anchor.pad1, 0xff, 12);
2323 memset(ddf->anchor.header_ext, 0xff, 32);
2324 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2325 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2326 ddf->anchor.type = DDF_HEADER_ANCHOR;
2327 memset(ddf->anchor.pad2, 0xff, 3);
2328 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2329 /* Put this at bottom of 32M reserved.. */
2330 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2331 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2332 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2333 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2334 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2335 ddf->max_part = 64;
2336 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2337 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2338 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2339 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2340 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2341 memset(ddf->anchor.pad3, 0xff, 54);
2342 /* Controller section is one sector long immediately
2343 * after the ddf header */
2344 sector = 1;
2345 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2346 ddf->anchor.controller_section_length = cpu_to_be32(1);
2347 sector += 1;
2348
2349 /* phys is 8 sectors after that */
2350 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2351 sizeof(struct phys_disk_entry)*max_phys_disks,
2352 512);
2353 switch(pdsize/512) {
2354 case 2: case 8: case 32: case 128: case 512: break;
2355 default: abort();
2356 }
2357 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2358 ddf->anchor.phys_section_length =
2359 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2360 sector += pdsize/512;
2361
2362 /* virt is another 32 sectors */
2363 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2364 sizeof(struct virtual_entry) * max_virt_disks,
2365 512);
2366 switch(vdsize/512) {
2367 case 2: case 8: case 32: case 128: case 512: break;
2368 default: abort();
2369 }
2370 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2371 ddf->anchor.virt_section_length =
2372 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2373 sector += vdsize/512;
2374
2375 clen = ddf->conf_rec_len * (ddf->max_part+1);
2376 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2377 ddf->anchor.config_section_length = cpu_to_be32(clen);
2378 sector += clen;
2379
2380 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2381 ddf->anchor.data_section_length = cpu_to_be32(1);
2382 sector += 1;
2383
2384 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2385 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2386 ddf->anchor.diag_space_length = cpu_to_be32(0);
2387 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2388 ddf->anchor.vendor_length = cpu_to_be32(0);
2389 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2390
2391 memset(ddf->anchor.pad4, 0xff, 256);
2392
2393 memcpy(&ddf->primary, &ddf->anchor, 512);
2394 memcpy(&ddf->secondary, &ddf->anchor, 512);
2395
2396 ddf->primary.openflag = 1; /* I guess.. */
2397 ddf->primary.type = DDF_HEADER_PRIMARY;
2398
2399 ddf->secondary.openflag = 1; /* I guess.. */
2400 ddf->secondary.type = DDF_HEADER_SECONDARY;
2401
2402 ddf->active = &ddf->primary;
2403
2404 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2405
2406 /* 24 more bytes of fiction required.
2407 * first 8 are a 'vendor-id' - "Linux-MD"
2408 * Remaining 16 are serial number.... maybe a hostname would do?
2409 */
2410 memcpy(ddf->controller.guid, T10, sizeof(T10));
2411 gethostname(hostname, sizeof(hostname));
2412 hostname[sizeof(hostname) - 1] = 0;
2413 hostlen = strlen(hostname);
2414 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2415 for (i = strlen(T10) ; i+hostlen < 24; i++)
2416 ddf->controller.guid[i] = ' ';
2417
2418 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2419 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2420 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2421 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2422 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2423 memset(ddf->controller.pad, 0xff, 8);
2424 memset(ddf->controller.vendor_data, 0xff, 448);
2425 if (homehost && strlen(homehost) < 440)
2426 strcpy((char*)ddf->controller.vendor_data, homehost);
2427
2428 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2429 pr_err("%s could not allocate pd\n", __func__);
2430 return 0;
2431 }
2432 ddf->phys = pd;
2433 ddf->pdsize = pdsize;
2434
2435 memset(pd, 0xff, pdsize);
2436 memset(pd, 0, sizeof(*pd));
2437 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2438 pd->used_pdes = cpu_to_be16(0);
2439 pd->max_pdes = cpu_to_be16(max_phys_disks);
2440 memset(pd->pad, 0xff, 52);
2441 for (i = 0; i < max_phys_disks; i++)
2442 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2443
2444 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2445 pr_err("%s could not allocate vd\n", __func__);
2446 return 0;
2447 }
2448 ddf->virt = vd;
2449 ddf->vdsize = vdsize;
2450 memset(vd, 0, vdsize);
2451 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2452 vd->populated_vdes = cpu_to_be16(0);
2453 vd->max_vdes = cpu_to_be16(max_virt_disks);
2454 memset(vd->pad, 0xff, 52);
2455
2456 for (i=0; i<max_virt_disks; i++)
2457 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2458
2459 st->sb = ddf;
2460 ddf_set_updates_pending(ddf);
2461 return 1;
2462 }
2463
2464 static int chunk_to_shift(int chunksize)
2465 {
2466 return ffs(chunksize/512)-1;
2467 }
2468
2469 #ifndef MDASSEMBLE
2470 struct extent {
2471 unsigned long long start, size;
2472 };
2473 static int cmp_extent(const void *av, const void *bv)
2474 {
2475 const struct extent *a = av;
2476 const struct extent *b = bv;
2477 if (a->start < b->start)
2478 return -1;
2479 if (a->start > b->start)
2480 return 1;
2481 return 0;
2482 }
2483
2484 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2485 {
2486 /* Find a list of used extents on the give physical device
2487 * (dnum) of the given ddf.
2488 * Return a malloced array of 'struct extent'
2489 */
2490 struct extent *rv;
2491 int n = 0;
2492 unsigned int i;
2493 __u16 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2494
2495 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2496 return NULL;
2497
2498 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2499
2500 for (i = 0; i < ddf->max_part; i++) {
2501 const struct vd_config *bvd;
2502 unsigned int ibvd;
2503 struct vcl *v = dl->vlist[i];
2504 if (v == NULL ||
2505 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2506 &bvd, &ibvd) == DDF_NOTFOUND)
2507 continue;
2508 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2509 rv[n].size = be64_to_cpu(bvd->blocks);
2510 n++;
2511 }
2512 qsort(rv, n, sizeof(*rv), cmp_extent);
2513
2514 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2515 rv[n].size = 0;
2516 return rv;
2517 }
2518 #endif
2519
2520 static int init_super_ddf_bvd(struct supertype *st,
2521 mdu_array_info_t *info,
2522 unsigned long long size,
2523 char *name, char *homehost,
2524 int *uuid, unsigned long long data_offset)
2525 {
2526 /* We are creating a BVD inside a pre-existing container.
2527 * so st->sb is already set.
2528 * We need to create a new vd_config and a new virtual_entry
2529 */
2530 struct ddf_super *ddf = st->sb;
2531 unsigned int venum, i;
2532 struct virtual_entry *ve;
2533 struct vcl *vcl;
2534 struct vd_config *vc;
2535
2536 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2537 pr_err("This ddf already has an array called %s\n", name);
2538 return 0;
2539 }
2540 venum = find_unused_vde(ddf);
2541 if (venum == DDF_NOTFOUND) {
2542 pr_err("Cannot find spare slot for virtual disk\n");
2543 return 0;
2544 }
2545 ve = &ddf->virt->entries[venum];
2546
2547 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2548 * timestamp, random number
2549 */
2550 make_header_guid(ve->guid);
2551 ve->unit = cpu_to_be16(info->md_minor);
2552 ve->pad0 = 0xFFFF;
2553 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2554 DDF_GUID_LEN);
2555 ve->type = cpu_to_be16(0);
2556 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2557 if (info->state & 1) /* clean */
2558 ve->init_state = DDF_init_full;
2559 else
2560 ve->init_state = DDF_init_not;
2561
2562 memset(ve->pad1, 0xff, 14);
2563 memset(ve->name, ' ', 16);
2564 if (name)
2565 strncpy(ve->name, name, 16);
2566 ddf->virt->populated_vdes =
2567 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2568
2569 /* Now create a new vd_config */
2570 if (posix_memalign((void**)&vcl, 512,
2571 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2572 pr_err("%s could not allocate vd_config\n", __func__);
2573 return 0;
2574 }
2575 vcl->vcnum = venum;
2576 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2577 vc = &vcl->conf;
2578
2579 vc->magic = DDF_VD_CONF_MAGIC;
2580 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2581 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2582 vc->seqnum = cpu_to_be32(1);
2583 memset(vc->pad0, 0xff, 24);
2584 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2585 if (layout_md2ddf(info, vc) == -1 ||
2586 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2587 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2588 __func__, info->level, info->layout, info->raid_disks);
2589 free(vcl);
2590 return 0;
2591 }
2592 vc->sec_elmnt_seq = 0;
2593 if (alloc_other_bvds(ddf, vcl) != 0) {
2594 pr_err("%s could not allocate other bvds\n",
2595 __func__);
2596 free(vcl);
2597 return 0;
2598 }
2599 vc->blocks = cpu_to_be64(info->size * 2);
2600 vc->array_blocks = cpu_to_be64(
2601 calc_array_size(info->level, info->raid_disks, info->layout,
2602 info->chunk_size, info->size*2));
2603 memset(vc->pad1, 0xff, 8);
2604 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2605 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2606 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2607 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2608 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2609 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2610 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2611 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2612 memset(vc->cache_pol, 0, 8);
2613 vc->bg_rate = 0x80;
2614 memset(vc->pad2, 0xff, 3);
2615 memset(vc->pad3, 0xff, 52);
2616 memset(vc->pad4, 0xff, 192);
2617 memset(vc->v0, 0xff, 32);
2618 memset(vc->v1, 0xff, 32);
2619 memset(vc->v2, 0xff, 16);
2620 memset(vc->v3, 0xff, 16);
2621 memset(vc->vendor, 0xff, 32);
2622
2623 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2624 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2625
2626 for (i = 1; i < vc->sec_elmnt_count; i++) {
2627 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2628 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2629 }
2630
2631 vcl->next = ddf->conflist;
2632 ddf->conflist = vcl;
2633 ddf->currentconf = vcl;
2634 ddf_set_updates_pending(ddf);
2635 return 1;
2636 }
2637
2638 #ifndef MDASSEMBLE
2639 static void add_to_super_ddf_bvd(struct supertype *st,
2640 mdu_disk_info_t *dk, int fd, char *devname)
2641 {
2642 /* fd and devname identify a device within the ddf container (st).
2643 * dk identifies a location in the new BVD.
2644 * We need to find suitable free space in that device and update
2645 * the phys_refnum and lba_offset for the newly created vd_config.
2646 * We might also want to update the type in the phys_disk
2647 * section.
2648 *
2649 * Alternately: fd == -1 and we have already chosen which device to
2650 * use and recorded in dlist->raid_disk;
2651 */
2652 struct dl *dl;
2653 struct ddf_super *ddf = st->sb;
2654 struct vd_config *vc;
2655 unsigned int i;
2656 unsigned long long blocks, pos, esize;
2657 struct extent *ex;
2658 unsigned int raid_disk = dk->raid_disk;
2659
2660 if (fd == -1) {
2661 for (dl = ddf->dlist; dl ; dl = dl->next)
2662 if (dl->raiddisk == dk->raid_disk)
2663 break;
2664 } else {
2665 for (dl = ddf->dlist; dl ; dl = dl->next)
2666 if (dl->major == dk->major &&
2667 dl->minor == dk->minor)
2668 break;
2669 }
2670 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
2671 return;
2672
2673 vc = &ddf->currentconf->conf;
2674 if (vc->sec_elmnt_count > 1) {
2675 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2676 if (raid_disk >= n)
2677 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2678 raid_disk %= n;
2679 }
2680
2681 ex = get_extents(ddf, dl);
2682 if (!ex)
2683 return;
2684
2685 i = 0; pos = 0;
2686 blocks = be64_to_cpu(vc->blocks);
2687 if (ddf->currentconf->block_sizes)
2688 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2689
2690 /* First-fit */
2691 do {
2692 esize = ex[i].start - pos;
2693 if (esize >= blocks)
2694 break;
2695 pos = ex[i].start + ex[i].size;
2696 i++;
2697 } while (ex[i-1].size);
2698
2699 free(ex);
2700 if (esize < blocks)
2701 return;
2702
2703 ddf->currentdev = dk->raid_disk;
2704 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2705 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2706
2707 for (i = 0; i < ddf->max_part ; i++)
2708 if (dl->vlist[i] == NULL)
2709 break;
2710 if (i == ddf->max_part)
2711 return;
2712 dl->vlist[i] = ddf->currentconf;
2713
2714 if (fd >= 0)
2715 dl->fd = fd;
2716 if (devname)
2717 dl->devname = devname;
2718
2719 /* Check if we can mark array as optimal yet */
2720 i = ddf->currentconf->vcnum;
2721 ddf->virt->entries[i].state =
2722 (ddf->virt->entries[i].state & ~DDF_state_mask)
2723 | get_svd_state(ddf, ddf->currentconf);
2724 be16_clear(ddf->phys->entries[dl->pdnum].type,
2725 cpu_to_be16(DDF_Global_Spare));
2726 be16_set(ddf->phys->entries[dl->pdnum].type,
2727 cpu_to_be16(DDF_Active_in_VD));
2728 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2729 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2730 ddf->currentconf->vcnum, guid_str(vc->guid),
2731 dk->raid_disk);
2732 ddf_set_updates_pending(ddf);
2733 }
2734
2735 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2736 {
2737 unsigned int i;
2738 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2739 if (all_ff(ddf->phys->entries[i].guid))
2740 return i;
2741 }
2742 return DDF_NOTFOUND;
2743 }
2744
2745 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2746 {
2747 __u64 cfs, t;
2748 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2749 t = be64_to_cpu(dl->secondary_lba);
2750 if (t != ~(__u64)0)
2751 cfs = min(cfs, t);
2752 /*
2753 * Some vendor DDF structures interpret workspace_lba
2754 * very differently than we do: Make a sanity check on the value.
2755 */
2756 t = be64_to_cpu(dl->workspace_lba);
2757 if (t < cfs) {
2758 __u64 wsp = cfs - t;
2759 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2760 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2761 __func__, dl->major, dl->minor, wsp);
2762 } else
2763 cfs = t;
2764 }
2765 pde->config_size = cpu_to_be64(cfs);
2766 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2767 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2768 }
2769
2770 /* Add a device to a container, either while creating it or while
2771 * expanding a pre-existing container
2772 */
2773 static int add_to_super_ddf(struct supertype *st,
2774 mdu_disk_info_t *dk, int fd, char *devname,
2775 unsigned long long data_offset)
2776 {
2777 struct ddf_super *ddf = st->sb;
2778 struct dl *dd;
2779 time_t now;
2780 struct tm *tm;
2781 unsigned long long size;
2782 struct phys_disk_entry *pde;
2783 unsigned int n, i;
2784 struct stat stb;
2785 __u32 *tptr;
2786
2787 if (ddf->currentconf) {
2788 add_to_super_ddf_bvd(st, dk, fd, devname);
2789 return 0;
2790 }
2791
2792 /* This is device numbered dk->number. We need to create
2793 * a phys_disk entry and a more detailed disk_data entry.
2794 */
2795 fstat(fd, &stb);
2796 n = find_unused_pde(ddf);
2797 if (n == DDF_NOTFOUND) {
2798 pr_err("%s: No free slot in array, cannot add disk\n",
2799 __func__);
2800 return 1;
2801 }
2802 pde = &ddf->phys->entries[n];
2803 get_dev_size(fd, NULL, &size);
2804 if (size <= 32*1024*1024) {
2805 pr_err("%s: device size must be at least 32MB\n",
2806 __func__);
2807 return 1;
2808 }
2809 size >>= 9;
2810
2811 if (posix_memalign((void**)&dd, 512,
2812 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2813 pr_err("%s could allocate buffer for new disk, aborting\n",
2814 __func__);
2815 return 1;
2816 }
2817 dd->major = major(stb.st_rdev);
2818 dd->minor = minor(stb.st_rdev);
2819 dd->devname = devname;
2820 dd->fd = fd;
2821 dd->spare = NULL;
2822
2823 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2824 now = time(0);
2825 tm = localtime(&now);
2826 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2827 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2828 tptr = (__u32 *)(dd->disk.guid + 16);
2829 *tptr++ = random32();
2830 *tptr = random32();
2831
2832 do {
2833 /* Cannot be bothered finding a CRC of some irrelevant details*/
2834 dd->disk.refnum._v32 = random32();
2835 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2836 i > 0; i--)
2837 if (be32_eq(ddf->phys->entries[i-1].refnum,
2838 dd->disk.refnum))
2839 break;
2840 } while (i > 0);
2841
2842 dd->disk.forced_ref = 1;
2843 dd->disk.forced_guid = 1;
2844 memset(dd->disk.vendor, ' ', 32);
2845 memcpy(dd->disk.vendor, "Linux", 5);
2846 memset(dd->disk.pad, 0xff, 442);
2847 for (i = 0; i < ddf->max_part ; i++)
2848 dd->vlist[i] = NULL;
2849
2850 dd->pdnum = n;
2851
2852 if (st->update_tail) {
2853 int len = (sizeof(struct phys_disk) +
2854 sizeof(struct phys_disk_entry));
2855 struct phys_disk *pd;
2856
2857 pd = xmalloc(len);
2858 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2859 pd->used_pdes = cpu_to_be16(n);
2860 pde = &pd->entries[0];
2861 dd->mdupdate = pd;
2862 } else
2863 ddf->phys->used_pdes = cpu_to_be16(
2864 1 + be16_to_cpu(ddf->phys->used_pdes));
2865
2866 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2867 pde->refnum = dd->disk.refnum;
2868 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2869 pde->state = cpu_to_be16(DDF_Online);
2870 dd->size = size;
2871 /*
2872 * If there is already a device in dlist, try to reserve the same
2873 * amount of workspace. Otherwise, use 32MB.
2874 * We checked disk size above already.
2875 */
2876 #define __calc_lba(new, old, lba, mb) do { \
2877 unsigned long long dif; \
2878 if ((old) != NULL) \
2879 dif = (old)->size - be64_to_cpu((old)->lba); \
2880 else \
2881 dif = (new)->size; \
2882 if ((new)->size > dif) \
2883 (new)->lba = cpu_to_be64((new)->size - dif); \
2884 else \
2885 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2886 } while (0)
2887 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2888 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2889 if (ddf->dlist == NULL ||
2890 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2891 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2892 _set_config_size(pde, dd);
2893
2894 sprintf(pde->path, "%17.17s","Information: nil") ;
2895 memset(pde->pad, 0xff, 6);
2896
2897 if (st->update_tail) {
2898 dd->next = ddf->add_list;
2899 ddf->add_list = dd;
2900 } else {
2901 dd->next = ddf->dlist;
2902 ddf->dlist = dd;
2903 ddf_set_updates_pending(ddf);
2904 }
2905
2906 return 0;
2907 }
2908
2909 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2910 {
2911 struct ddf_super *ddf = st->sb;
2912 struct dl *dl;
2913
2914 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2915 * disappeared from the container.
2916 * We need to arrange that it disappears from the metadata and
2917 * internal data structures too.
2918 * Most of the work is done by ddf_process_update which edits
2919 * the metadata and closes the file handle and attaches the memory
2920 * where free_updates will free it.
2921 */
2922 for (dl = ddf->dlist; dl ; dl = dl->next)
2923 if (dl->major == dk->major &&
2924 dl->minor == dk->minor)
2925 break;
2926 if (!dl)
2927 return -1;
2928
2929 if (st->update_tail) {
2930 int len = (sizeof(struct phys_disk) +
2931 sizeof(struct phys_disk_entry));
2932 struct phys_disk *pd;
2933
2934 pd = xmalloc(len);
2935 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2936 pd->used_pdes = cpu_to_be16(dl->pdnum);
2937 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2938 append_metadata_update(st, pd, len);
2939 }
2940 return 0;
2941 }
2942 #endif
2943
2944 /*
2945 * This is the write_init_super method for a ddf container. It is
2946 * called when creating a container or adding another device to a
2947 * container.
2948 */
2949
2950 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2951 {
2952 unsigned long long sector;
2953 struct ddf_header *header;
2954 int fd, i, n_config, conf_size, buf_size;
2955 int ret = 0;
2956 char *conf;
2957
2958 fd = d->fd;
2959
2960 switch (type) {
2961 case DDF_HEADER_PRIMARY:
2962 header = &ddf->primary;
2963 sector = be64_to_cpu(header->primary_lba);
2964 break;
2965 case DDF_HEADER_SECONDARY:
2966 header = &ddf->secondary;
2967 sector = be64_to_cpu(header->secondary_lba);
2968 break;
2969 default:
2970 return 0;
2971 }
2972 if (sector == ~(__u64)0)
2973 return 0;
2974
2975 header->type = type;
2976 header->openflag = 1;
2977 header->crc = calc_crc(header, 512);
2978
2979 lseek64(fd, sector<<9, 0);
2980 if (write(fd, header, 512) < 0)
2981 goto out;
2982
2983 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2984 if (write(fd, &ddf->controller, 512) < 0)
2985 goto out;
2986
2987 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2988 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2989 goto out;
2990 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2991 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2992 goto out;
2993
2994 /* Now write lots of config records. */
2995 n_config = ddf->max_part;
2996 conf_size = ddf->conf_rec_len * 512;
2997 conf = ddf->conf;
2998 buf_size = conf_size * (n_config + 1);
2999 if (!conf) {
3000 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
3001 goto out;
3002 ddf->conf = conf;
3003 }
3004 for (i = 0 ; i <= n_config ; i++) {
3005 struct vcl *c;
3006 struct vd_config *vdc = NULL;
3007 if (i == n_config) {
3008 c = (struct vcl *)d->spare;
3009 if (c)
3010 vdc = &c->conf;
3011 } else {
3012 unsigned int dummy;
3013 c = d->vlist[i];
3014 if (c)
3015 get_pd_index_from_refnum(
3016 c, d->disk.refnum,
3017 ddf->mppe,
3018 (const struct vd_config **)&vdc,
3019 &dummy);
3020 }
3021 if (c) {
3022 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3023 i, be32_to_cpu(d->disk.refnum),
3024 guid_str(vdc->guid),
3025 vdc->sec_elmnt_seq);
3026 vdc->seqnum = header->seq;
3027 vdc->crc = calc_crc(vdc, conf_size);
3028 memcpy(conf + i*conf_size, vdc, conf_size);
3029 } else
3030 memset(conf + i*conf_size, 0xff, conf_size);
3031 }
3032 if (write(fd, conf, buf_size) != buf_size)
3033 goto out;
3034
3035 d->disk.crc = calc_crc(&d->disk, 512);
3036 if (write(fd, &d->disk, 512) < 0)
3037 goto out;
3038
3039 ret = 1;
3040 out:
3041 header->openflag = 0;
3042 header->crc = calc_crc(header, 512);
3043
3044 lseek64(fd, sector<<9, 0);
3045 if (write(fd, header, 512) < 0)
3046 ret = 0;
3047
3048 return ret;
3049 }
3050
3051 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3052 {
3053 unsigned long long size;
3054 int fd = d->fd;
3055 if (fd < 0)
3056 return 0;
3057
3058 /* We need to fill in the primary, (secondary) and workspace
3059 * lba's in the headers, set their checksums,
3060 * Also checksum phys, virt....
3061 *
3062 * Then write everything out, finally the anchor is written.
3063 */
3064 get_dev_size(fd, NULL, &size);
3065 size /= 512;
3066 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3067 ddf->anchor.workspace_lba = d->workspace_lba;
3068 else
3069 ddf->anchor.workspace_lba =
3070 cpu_to_be64(size - 32*1024*2);
3071 if (be64_to_cpu(d->primary_lba) != 0ULL)
3072 ddf->anchor.primary_lba = d->primary_lba;
3073 else
3074 ddf->anchor.primary_lba =
3075 cpu_to_be64(size - 16*1024*2);
3076 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3077 ddf->anchor.secondary_lba = d->secondary_lba;
3078 else
3079 ddf->anchor.secondary_lba =
3080 cpu_to_be64(size - 32*1024*2);
3081 ddf->anchor.seq = ddf->active->seq;
3082 memcpy(&ddf->primary, &ddf->anchor, 512);
3083 memcpy(&ddf->secondary, &ddf->anchor, 512);
3084
3085 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3086 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3087 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3088
3089 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3090 return 0;
3091
3092 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3093 return 0;
3094
3095 lseek64(fd, (size-1)*512, SEEK_SET);
3096 if (write(fd, &ddf->anchor, 512) < 0)
3097 return 0;
3098
3099 return 1;
3100 }
3101
3102 #ifndef MDASSEMBLE
3103 static int __write_init_super_ddf(struct supertype *st)
3104 {
3105 struct ddf_super *ddf = st->sb;
3106 struct dl *d;
3107 int attempts = 0;
3108 int successes = 0;
3109
3110 pr_state(ddf, __func__);
3111
3112 /* try to write updated metadata,
3113 * if we catch a failure move on to the next disk
3114 */
3115 for (d = ddf->dlist; d; d=d->next) {
3116 attempts++;
3117 successes += _write_super_to_disk(ddf, d);
3118 }
3119
3120 return attempts != successes;
3121 }
3122
3123 static int write_init_super_ddf(struct supertype *st)
3124 {
3125 struct ddf_super *ddf = st->sb;
3126 struct vcl *currentconf = ddf->currentconf;
3127
3128 /* We are done with currentconf - reset it so st refers to the container */
3129 ddf->currentconf = NULL;
3130
3131 if (st->update_tail) {
3132 /* queue the virtual_disk and vd_config as metadata updates */
3133 struct virtual_disk *vd;
3134 struct vd_config *vc;
3135 int len, tlen;
3136 unsigned int i;
3137
3138 if (!currentconf) {
3139 /* Must be adding a physical disk to the container */
3140 int len = (sizeof(struct phys_disk) +
3141 sizeof(struct phys_disk_entry));
3142
3143 /* adding a disk to the container. */
3144 if (!ddf->add_list)
3145 return 0;
3146
3147 append_metadata_update(st, ddf->add_list->mdupdate, len);
3148 ddf->add_list->mdupdate = NULL;
3149 return 0;
3150 }
3151
3152 /* Newly created VD */
3153
3154 /* First the virtual disk. We have a slightly fake header */
3155 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3156 vd = xmalloc(len);
3157 *vd = *ddf->virt;
3158 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3159 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3160 append_metadata_update(st, vd, len);
3161
3162 /* Then the vd_config */
3163 len = ddf->conf_rec_len * 512;
3164 tlen = len * currentconf->conf.sec_elmnt_count;
3165 vc = xmalloc(tlen);
3166 memcpy(vc, &currentconf->conf, len);
3167 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3168 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3169 len);
3170 append_metadata_update(st, vc, tlen);
3171
3172 /* FIXME I need to close the fds! */
3173 return 0;
3174 } else {
3175 struct dl *d;
3176 if (!currentconf)
3177 for (d = ddf->dlist; d; d=d->next)
3178 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3179 return __write_init_super_ddf(st);
3180 }
3181 }
3182
3183 #endif
3184
3185 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3186 unsigned long long data_offset)
3187 {
3188 /* We must reserve the last 32Meg */
3189 if (devsize <= 32*1024*2)
3190 return 0;
3191 return devsize - 32*1024*2;
3192 }
3193
3194 #ifndef MDASSEMBLE
3195
3196 static int reserve_space(struct supertype *st, int raiddisks,
3197 unsigned long long size, int chunk,
3198 unsigned long long *freesize)
3199 {
3200 /* Find 'raiddisks' spare extents at least 'size' big (but
3201 * only caring about multiples of 'chunk') and remember
3202 * them. If size==0, find the largest size possible.
3203 * Report available size in *freesize
3204 * If space cannot be found, fail.
3205 */
3206 struct dl *dl;
3207 struct ddf_super *ddf = st->sb;
3208 int cnt = 0;
3209
3210 for (dl = ddf->dlist; dl ; dl=dl->next) {
3211 dl->raiddisk = -1;
3212 dl->esize = 0;
3213 }
3214 /* Now find largest extent on each device */
3215 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3216 struct extent *e = get_extents(ddf, dl);
3217 unsigned long long pos = 0;
3218 int i = 0;
3219 int found = 0;
3220 unsigned long long minsize = size;
3221
3222 if (size == 0)
3223 minsize = chunk;
3224
3225 if (!e)
3226 continue;
3227 do {
3228 unsigned long long esize;
3229 esize = e[i].start - pos;
3230 if (esize >= minsize) {
3231 found = 1;
3232 minsize = esize;
3233 }
3234 pos = e[i].start + e[i].size;
3235 i++;
3236 } while (e[i-1].size);
3237 if (found) {
3238 cnt++;
3239 dl->esize = minsize;
3240 }
3241 free(e);
3242 }
3243 if (cnt < raiddisks) {
3244 pr_err("not enough devices with space to create array.\n");
3245 return 0; /* No enough free spaces large enough */
3246 }
3247 if (size == 0) {
3248 /* choose the largest size of which there are at least 'raiddisk' */
3249 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3250 struct dl *dl2;
3251 if (dl->esize <= size)
3252 continue;
3253 /* This is bigger than 'size', see if there are enough */
3254 cnt = 0;
3255 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3256 if (dl2->esize >= dl->esize)
3257 cnt++;
3258 if (cnt >= raiddisks)
3259 size = dl->esize;
3260 }
3261 if (chunk) {
3262 size = size / chunk;
3263 size *= chunk;
3264 }
3265 *freesize = size;
3266 if (size < 32) {
3267 pr_err("not enough spare devices to create array.\n");
3268 return 0;
3269 }
3270 }
3271 /* We have a 'size' of which there are enough spaces.
3272 * We simply do a first-fit */
3273 cnt = 0;
3274 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3275 if (dl->esize < size)
3276 continue;
3277
3278 dl->raiddisk = cnt;
3279 cnt++;
3280 }
3281 return 1;
3282 }
3283
3284 static int validate_geometry_ddf(struct supertype *st,
3285 int level, int layout, int raiddisks,
3286 int *chunk, unsigned long long size,
3287 unsigned long long data_offset,
3288 char *dev, unsigned long long *freesize,
3289 int verbose)
3290 {
3291 int fd;
3292 struct mdinfo *sra;
3293 int cfd;
3294
3295 /* ddf potentially supports lots of things, but it depends on
3296 * what devices are offered (and maybe kernel version?)
3297 * If given unused devices, we will make a container.
3298 * If given devices in a container, we will make a BVD.
3299 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3300 */
3301
3302 if (*chunk == UnSet)
3303 *chunk = DEFAULT_CHUNK;
3304
3305 if (level == LEVEL_NONE)
3306 level = LEVEL_CONTAINER;
3307 if (level == LEVEL_CONTAINER) {
3308 /* Must be a fresh device to add to a container */
3309 return validate_geometry_ddf_container(st, level, layout,
3310 raiddisks, *chunk,
3311 size, data_offset, dev,
3312 freesize,
3313 verbose);
3314 }
3315
3316 if (!dev) {
3317 mdu_array_info_t array = {
3318 .level = level,
3319 .layout = layout,
3320 .raid_disks = raiddisks
3321 };
3322 struct vd_config conf;
3323 if (layout_md2ddf(&array, &conf) == -1) {
3324 if (verbose)
3325 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3326 level, layout, raiddisks);
3327 return 0;
3328 }
3329 /* Should check layout? etc */
3330
3331 if (st->sb && freesize) {
3332 /* --create was given a container to create in.
3333 * So we need to check that there are enough
3334 * free spaces and return the amount of space.
3335 * We may as well remember which drives were
3336 * chosen so that add_to_super/getinfo_super
3337 * can return them.
3338 */
3339 return reserve_space(st, raiddisks, size, *chunk, freesize);
3340 }
3341 return 1;
3342 }
3343
3344 if (st->sb) {
3345 /* A container has already been opened, so we are
3346 * creating in there. Maybe a BVD, maybe an SVD.
3347 * Should make a distinction one day.
3348 */
3349 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3350 chunk, size, data_offset, dev,
3351 freesize,
3352 verbose);
3353 }
3354 /* This is the first device for the array.
3355 * If it is a container, we read it in and do automagic allocations,
3356 * no other devices should be given.
3357 * Otherwise it must be a member device of a container, and we
3358 * do manual allocation.
3359 * Later we should check for a BVD and make an SVD.
3360 */
3361 fd = open(dev, O_RDONLY|O_EXCL, 0);
3362 if (fd >= 0) {
3363 sra = sysfs_read(fd, NULL, GET_VERSION);
3364 close(fd);
3365 if (sra && sra->array.major_version == -1 &&
3366 strcmp(sra->text_version, "ddf") == 0) {
3367 /* load super */
3368 /* find space for 'n' devices. */
3369 /* remember the devices */
3370 /* Somehow return the fact that we have enough */
3371 }
3372
3373 if (verbose)
3374 pr_err("ddf: Cannot create this array "
3375 "on device %s - a container is required.\n",
3376 dev);
3377 return 0;
3378 }
3379 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3380 if (verbose)
3381 pr_err("ddf: Cannot open %s: %s\n",
3382 dev, strerror(errno));
3383 return 0;
3384 }
3385 /* Well, it is in use by someone, maybe a 'ddf' container. */
3386 cfd = open_container(fd);
3387 if (cfd < 0) {
3388 close(fd);
3389 if (verbose)
3390 pr_err("ddf: Cannot use %s: %s\n",
3391 dev, strerror(EBUSY));
3392 return 0;
3393 }
3394 sra = sysfs_read(cfd, NULL, GET_VERSION);
3395 close(fd);
3396 if (sra && sra->array.major_version == -1 &&
3397 strcmp(sra->text_version, "ddf") == 0) {
3398 /* This is a member of a ddf container. Load the container
3399 * and try to create a bvd
3400 */
3401 struct ddf_super *ddf;
3402 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3403 st->sb = ddf;
3404 strcpy(st->container_devnm, fd2devnm(cfd));
3405 close(cfd);
3406 return validate_geometry_ddf_bvd(st, level, layout,
3407 raiddisks, chunk, size,
3408 data_offset,
3409 dev, freesize,
3410 verbose);
3411 }
3412 close(cfd);
3413 } else /* device may belong to a different container */
3414 return 0;
3415
3416 return 1;
3417 }
3418
3419 static int
3420 validate_geometry_ddf_container(struct supertype *st,
3421 int level, int layout, int raiddisks,
3422 int chunk, unsigned long long size,
3423 unsigned long long data_offset,
3424 char *dev, unsigned long long *freesize,
3425 int verbose)
3426 {
3427 int fd;
3428 unsigned long long ldsize;
3429
3430 if (level != LEVEL_CONTAINER)
3431 return 0;
3432 if (!dev)
3433 return 1;
3434
3435 fd = open(dev, O_RDONLY|O_EXCL, 0);
3436 if (fd < 0) {
3437 if (verbose)
3438 pr_err("ddf: Cannot open %s: %s\n",
3439 dev, strerror(errno));
3440 return 0;
3441 }
3442 if (!get_dev_size(fd, dev, &ldsize)) {
3443 close(fd);
3444 return 0;
3445 }
3446 close(fd);
3447
3448 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3449 if (*freesize == 0)
3450 return 0;
3451
3452 return 1;
3453 }
3454
3455 static int validate_geometry_ddf_bvd(struct supertype *st,
3456 int level, int layout, int raiddisks,
3457 int *chunk, unsigned long long size,
3458 unsigned long long data_offset,
3459 char *dev, unsigned long long *freesize,
3460 int verbose)
3461 {
3462 struct stat stb;
3463 struct ddf_super *ddf = st->sb;
3464 struct dl *dl;
3465 unsigned long long pos = 0;
3466 unsigned long long maxsize;
3467 struct extent *e;
3468 int i;
3469 /* ddf/bvd supports lots of things, but not containers */
3470 if (level == LEVEL_CONTAINER) {
3471 if (verbose)
3472 pr_err("DDF cannot create a container within an container\n");
3473 return 0;
3474 }
3475 /* We must have the container info already read in. */
3476 if (!ddf)
3477 return 0;
3478
3479 if (!dev) {
3480 /* General test: make sure there is space for
3481 * 'raiddisks' device extents of size 'size'.
3482 */
3483 unsigned long long minsize = size;
3484 int dcnt = 0;
3485 if (minsize == 0)
3486 minsize = 8;
3487 for (dl = ddf->dlist; dl ; dl = dl->next) {
3488 int found = 0;
3489 pos = 0;
3490
3491 i = 0;
3492 e = get_extents(ddf, dl);
3493 if (!e) continue;
3494 do {
3495 unsigned long long esize;
3496 esize = e[i].start - pos;
3497 if (esize >= minsize)
3498 found = 1;
3499 pos = e[i].start + e[i].size;
3500 i++;
3501 } while (e[i-1].size);
3502 if (found)
3503 dcnt++;
3504 free(e);
3505 }
3506 if (dcnt < raiddisks) {
3507 if (verbose)
3508 pr_err("ddf: Not enough devices with "
3509 "space for this array (%d < %d)\n",
3510 dcnt, raiddisks);
3511 return 0;
3512 }
3513 return 1;
3514 }
3515 /* This device must be a member of the set */
3516 if (stat(dev, &stb) < 0)
3517 return 0;
3518 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3519 return 0;
3520 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3521 if (dl->major == (int)major(stb.st_rdev) &&
3522 dl->minor == (int)minor(stb.st_rdev))
3523 break;
3524 }
3525 if (!dl) {
3526 if (verbose)
3527 pr_err("ddf: %s is not in the "
3528 "same DDF set\n",
3529 dev);
3530 return 0;
3531 }
3532 e = get_extents(ddf, dl);
3533 maxsize = 0;
3534 i = 0;
3535 if (e)
3536 do {
3537 unsigned long long esize;
3538 esize = e[i].start - pos;
3539 if (esize >= maxsize)
3540 maxsize = esize;
3541 pos = e[i].start + e[i].size;
3542 i++;
3543 } while (e[i-1].size);
3544 *freesize = maxsize;
3545 // FIXME here I am
3546
3547 return 1;
3548 }
3549
3550 static int load_super_ddf_all(struct supertype *st, int fd,
3551 void **sbp, char *devname)
3552 {
3553 struct mdinfo *sra;
3554 struct ddf_super *super;
3555 struct mdinfo *sd, *best = NULL;
3556 int bestseq = 0;
3557 int seq;
3558 char nm[20];
3559 int dfd;
3560
3561 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3562 if (!sra)
3563 return 1;
3564 if (sra->array.major_version != -1 ||
3565 sra->array.minor_version != -2 ||
3566 strcmp(sra->text_version, "ddf") != 0)
3567 return 1;
3568
3569 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3570 return 1;
3571 memset(super, 0, sizeof(*super));
3572
3573 /* first, try each device, and choose the best ddf */
3574 for (sd = sra->devs ; sd ; sd = sd->next) {
3575 int rv;
3576 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3577 dfd = dev_open(nm, O_RDONLY);
3578 if (dfd < 0)
3579 return 2;
3580 rv = load_ddf_headers(dfd, super, NULL);
3581 close(dfd);
3582 if (rv == 0) {
3583 seq = be32_to_cpu(super->active->seq);
3584 if (super->active->openflag)
3585 seq--;
3586 if (!best || seq > bestseq) {
3587 bestseq = seq;
3588 best = sd;
3589 }
3590 }
3591 }
3592 if (!best)
3593 return 1;
3594 /* OK, load this ddf */
3595 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3596 dfd = dev_open(nm, O_RDONLY);
3597 if (dfd < 0)
3598 return 1;
3599 load_ddf_headers(dfd, super, NULL);
3600 load_ddf_global(dfd, super, NULL);
3601 close(dfd);
3602 /* Now we need the device-local bits */
3603 for (sd = sra->devs ; sd ; sd = sd->next) {
3604 int rv;
3605
3606 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3607 dfd = dev_open(nm, O_RDWR);
3608 if (dfd < 0)
3609 return 2;
3610 rv = load_ddf_headers(dfd, super, NULL);
3611 if (rv == 0)
3612 rv = load_ddf_local(dfd, super, NULL, 1);
3613 if (rv)
3614 return 1;
3615 }
3616
3617 *sbp = super;
3618 if (st->ss == NULL) {
3619 st->ss = &super_ddf;
3620 st->minor_version = 0;
3621 st->max_devs = 512;
3622 }
3623 strcpy(st->container_devnm, fd2devnm(fd));
3624 return 0;
3625 }
3626
3627 static int load_container_ddf(struct supertype *st, int fd,
3628 char *devname)
3629 {
3630 return load_super_ddf_all(st, fd, &st->sb, devname);
3631 }
3632
3633 #endif /* MDASSEMBLE */
3634
3635 static int check_secondary(const struct vcl *vc)
3636 {
3637 const struct vd_config *conf = &vc->conf;
3638 int i;
3639
3640 /* The only DDF secondary RAID level md can support is
3641 * RAID 10, if the stripe sizes and Basic volume sizes
3642 * are all equal.
3643 * Other configurations could in theory be supported by exposing
3644 * the BVDs to user space and using device mapper for the secondary
3645 * mapping. So far we don't support that.
3646 */
3647
3648 __u64 sec_elements[4] = {0, 0, 0, 0};
3649 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3650 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3651
3652 if (vc->other_bvds == NULL) {
3653 pr_err("No BVDs for secondary RAID found\n");
3654 return -1;
3655 }
3656 if (conf->prl != DDF_RAID1) {
3657 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3658 return -1;
3659 }
3660 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3661 pr_err("Secondary RAID level %d is unsupported\n",
3662 conf->srl);
3663 return -1;
3664 }
3665 __set_sec_seen(conf->sec_elmnt_seq);
3666 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3667 const struct vd_config *bvd = vc->other_bvds[i];
3668 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3669 continue;
3670 if (bvd->srl != conf->srl) {
3671 pr_err("Inconsistent secondary RAID level across BVDs\n");
3672 return -1;
3673 }
3674 if (bvd->prl != conf->prl) {
3675 pr_err("Different RAID levels for BVDs are unsupported\n");
3676 return -1;
3677 }
3678 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3679 pr_err("All BVDs must have the same number of primary elements\n");
3680 return -1;
3681 }
3682 if (bvd->chunk_shift != conf->chunk_shift) {
3683 pr_err("Different strip sizes for BVDs are unsupported\n");
3684 return -1;
3685 }
3686 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3687 pr_err("Different BVD sizes are unsupported\n");
3688 return -1;
3689 }
3690 __set_sec_seen(bvd->sec_elmnt_seq);
3691 }
3692 for (i = 0; i < conf->sec_elmnt_count; i++) {
3693 if (!__was_sec_seen(i)) {
3694 pr_err("BVD %d is missing\n", i);
3695 return -1;
3696 }
3697 }
3698 return 0;
3699 }
3700
3701 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3702 be32 refnum, unsigned int nmax,
3703 const struct vd_config **bvd,
3704 unsigned int *idx)
3705 {
3706 unsigned int i, j, n, sec, cnt;
3707
3708 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3709 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3710
3711 for (i = 0, j = 0 ; i < nmax ; i++) {
3712 /* j counts valid entries for this BVD */
3713 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3714 *bvd = &vc->conf;
3715 *idx = i;
3716 return sec * cnt + j;
3717 }
3718 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3719 j++;
3720 }
3721 if (vc->other_bvds == NULL)
3722 goto bad;
3723
3724 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3725 struct vd_config *vd = vc->other_bvds[n-1];
3726 sec = vd->sec_elmnt_seq;
3727 if (sec == DDF_UNUSED_BVD)
3728 continue;
3729 for (i = 0, j = 0 ; i < nmax ; i++) {
3730 if (be32_eq(vd->phys_refnum[i], refnum)) {
3731 *bvd = vd;
3732 *idx = i;
3733 return sec * cnt + j;
3734 }
3735 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3736 j++;
3737 }
3738 }
3739 bad:
3740 *bvd = NULL;
3741 return DDF_NOTFOUND;
3742 }
3743
3744 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3745 {
3746 /* Given a container loaded by load_super_ddf_all,
3747 * extract information about all the arrays into
3748 * an mdinfo tree.
3749 *
3750 * For each vcl in conflist: create an mdinfo, fill it in,
3751 * then look for matching devices (phys_refnum) in dlist
3752 * and create appropriate device mdinfo.
3753 */
3754 struct ddf_super *ddf = st->sb;
3755 struct mdinfo *rest = NULL;
3756 struct vcl *vc;
3757
3758 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3759 unsigned int i;
3760 struct mdinfo *this;
3761 char *ep;
3762 __u32 *cptr;
3763 unsigned int pd;
3764
3765 if (subarray &&
3766 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3767 *ep != '\0'))
3768 continue;
3769
3770 if (vc->conf.sec_elmnt_count > 1) {
3771 if (check_secondary(vc) != 0)
3772 continue;
3773 }
3774
3775 this = xcalloc(1, sizeof(*this));
3776 this->next = rest;
3777 rest = this;
3778
3779 if (layout_ddf2md(&vc->conf, &this->array))
3780 continue;
3781 this->array.md_minor = -1;
3782 this->array.major_version = -1;
3783 this->array.minor_version = -2;
3784 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3785 cptr = (__u32 *)(vc->conf.guid + 16);
3786 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3787 this->array.utime = DECADE +
3788 be32_to_cpu(vc->conf.timestamp);
3789 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3790
3791 i = vc->vcnum;
3792 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3793 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3794 DDF_init_full) {
3795 this->array.state = 0;
3796 this->resync_start = 0;
3797 } else {
3798 this->array.state = 1;
3799 this->resync_start = MaxSector;
3800 }
3801 _ddf_array_name(this->name, ddf, i);
3802 memset(this->uuid, 0, sizeof(this->uuid));
3803 this->component_size = be64_to_cpu(vc->conf.blocks);
3804 this->array.size = this->component_size / 2;
3805 this->container_member = i;
3806
3807 ddf->currentconf = vc;
3808 uuid_from_super_ddf(st, this->uuid);
3809 if (!subarray)
3810 ddf->currentconf = NULL;
3811
3812 sprintf(this->text_version, "/%s/%d",
3813 st->container_devnm, this->container_member);
3814
3815 for (pd = 0; pd < be16_to_cpu(ddf->phys->used_pdes); pd++) {
3816 struct mdinfo *dev;
3817 struct dl *d;
3818 const struct vd_config *bvd;
3819 unsigned int iphys;
3820 int stt;
3821
3822 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3823 == 0xFFFFFFFF)
3824 continue;
3825
3826 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3827 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3828 != DDF_Online)
3829 continue;
3830
3831 i = get_pd_index_from_refnum(
3832 vc, ddf->phys->entries[pd].refnum,
3833 ddf->mppe, &bvd, &iphys);
3834 if (i == DDF_NOTFOUND)
3835 continue;
3836
3837 this->array.working_disks++;
3838
3839 for (d = ddf->dlist; d ; d=d->next)
3840 if (be32_eq(d->disk.refnum,
3841 ddf->phys->entries[pd].refnum))
3842 break;
3843 if (d == NULL)
3844 /* Haven't found that one yet, maybe there are others */
3845 continue;
3846
3847 dev = xcalloc(1, sizeof(*dev));
3848 dev->next = this->devs;
3849 this->devs = dev;
3850
3851 dev->disk.number = be32_to_cpu(d->disk.refnum);
3852 dev->disk.major = d->major;
3853 dev->disk.minor = d->minor;
3854 dev->disk.raid_disk = i;
3855 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3856 dev->recovery_start = MaxSector;
3857
3858 dev->events = be32_to_cpu(ddf->active->seq);
3859 dev->data_offset =
3860 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3861 dev->component_size = be64_to_cpu(bvd->blocks);
3862 if (d->devname)
3863 strcpy(dev->name, d->devname);
3864 }
3865 }
3866 return rest;
3867 }
3868
3869 static int store_super_ddf(struct supertype *st, int fd)
3870 {
3871 struct ddf_super *ddf = st->sb;
3872 unsigned long long dsize;
3873 void *buf;
3874 int rc;
3875
3876 if (!ddf)
3877 return 1;
3878
3879 if (!get_dev_size(fd, NULL, &dsize))
3880 return 1;
3881
3882 if (ddf->dlist || ddf->conflist) {
3883 struct stat sta;
3884 struct dl *dl;
3885 int ofd, ret;
3886
3887 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3888 pr_err("%s: file descriptor for invalid device\n",
3889 __func__);
3890 return 1;
3891 }
3892 for (dl = ddf->dlist; dl; dl = dl->next)
3893 if (dl->major == (int)major(sta.st_rdev) &&
3894 dl->minor == (int)minor(sta.st_rdev))
3895 break;
3896 if (!dl) {
3897 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3898 (int)major(sta.st_rdev),
3899 (int)minor(sta.st_rdev));
3900 return 1;
3901 }
3902 ofd = dl->fd;
3903 dl->fd = fd;
3904 ret = (_write_super_to_disk(ddf, dl) != 1);
3905 dl->fd = ofd;
3906 return ret;
3907 }
3908
3909 if (posix_memalign(&buf, 512, 512) != 0)
3910 return 1;
3911 memset(buf, 0, 512);
3912
3913 lseek64(fd, dsize-512, 0);
3914 rc = write(fd, buf, 512);
3915 free(buf);
3916 if (rc < 0)
3917 return 1;
3918 return 0;
3919 }
3920
3921 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3922 {
3923 /*
3924 * return:
3925 * 0 same, or first was empty, and second was copied
3926 * 1 second had wrong magic number - but that isn't possible
3927 * 2 wrong uuid
3928 * 3 wrong other info
3929 */
3930 struct ddf_super *first = st->sb;
3931 struct ddf_super *second = tst->sb;
3932 struct dl *dl1, *dl2;
3933 struct vcl *vl1, *vl2;
3934 unsigned int max_vds, max_pds, pd, vd;
3935
3936 if (!first) {
3937 st->sb = tst->sb;
3938 tst->sb = NULL;
3939 return 0;
3940 }
3941
3942 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3943 return 2;
3944
3945 if (first->max_part != second->max_part ||
3946 !be16_eq(first->phys->used_pdes, second->phys->used_pdes) ||
3947 !be16_eq(first->virt->populated_vdes,
3948 second->virt->populated_vdes)) {
3949 dprintf("%s: PD/VD number mismatch\n", __func__);
3950 return 3;
3951 }
3952
3953 max_pds = be16_to_cpu(first->phys->used_pdes);
3954 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3955 for (pd = 0; pd < max_pds; pd++)
3956 if (be32_eq(first->phys->entries[pd].refnum,
3957 dl2->disk.refnum))
3958 break;
3959 if (pd == max_pds) {
3960 dprintf("%s: no match for disk %08x\n", __func__,
3961 be32_to_cpu(dl2->disk.refnum));
3962 return 3;
3963 }
3964 }
3965
3966 max_vds = be16_to_cpu(first->active->max_vd_entries);
3967 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3968 if (!be32_eq(vl2->conf.magic, DDF_VD_CONF_MAGIC))
3969 continue;
3970 for (vd = 0; vd < max_vds; vd++)
3971 if (!memcmp(first->virt->entries[vd].guid,
3972 vl2->conf.guid, DDF_GUID_LEN))
3973 break;
3974 if (vd == max_vds) {
3975 dprintf("%s: no match for VD config\n", __func__);
3976 return 3;
3977 }
3978 }
3979 /* FIXME should I look at anything else? */
3980
3981 /*
3982 * At this point we are fairly sure that the meta data matches.
3983 * But the new disk may contain additional local data.
3984 * Add it to the super block.
3985 */
3986 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3987 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3988 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3989 DDF_GUID_LEN))
3990 break;
3991 if (vl1) {
3992 if (vl1->other_bvds != NULL &&
3993 vl1->conf.sec_elmnt_seq !=
3994 vl2->conf.sec_elmnt_seq) {
3995 dprintf("%s: adding BVD %u\n", __func__,
3996 vl2->conf.sec_elmnt_seq);
3997 add_other_bvd(vl1, &vl2->conf,
3998 first->conf_rec_len*512);
3999 }
4000 continue;
4001 }
4002
4003 if (posix_memalign((void **)&vl1, 512,
4004 (first->conf_rec_len*512 +
4005 offsetof(struct vcl, conf))) != 0) {
4006 pr_err("%s could not allocate vcl buf\n",
4007 __func__);
4008 return 3;
4009 }
4010
4011 vl1->next = first->conflist;
4012 vl1->block_sizes = NULL;
4013 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4014 if (alloc_other_bvds(first, vl1) != 0) {
4015 pr_err("%s could not allocate other bvds\n",
4016 __func__);
4017 free(vl1);
4018 return 3;
4019 }
4020 for (vd = 0; vd < max_vds; vd++)
4021 if (!memcmp(first->virt->entries[vd].guid,
4022 vl1->conf.guid, DDF_GUID_LEN))
4023 break;
4024 vl1->vcnum = vd;
4025 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4026 first->conflist = vl1;
4027 }
4028
4029 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4030 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4031 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4032 break;
4033 if (dl1)
4034 continue;
4035
4036 if (posix_memalign((void **)&dl1, 512,
4037 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4038 != 0) {
4039 pr_err("%s could not allocate disk info buffer\n",
4040 __func__);
4041 return 3;
4042 }
4043 memcpy(dl1, dl2, sizeof(*dl1));
4044 dl1->mdupdate = NULL;
4045 dl1->next = first->dlist;
4046 dl1->fd = -1;
4047 for (pd = 0; pd < max_pds; pd++)
4048 if (be32_eq(first->phys->entries[pd].refnum,
4049 dl1->disk.refnum))
4050 break;
4051 dl1->pdnum = pd;
4052 if (dl2->spare) {
4053 if (posix_memalign((void **)&dl1->spare, 512,
4054 first->conf_rec_len*512) != 0) {
4055 pr_err("%s could not allocate spare info buf\n",
4056 __func__);
4057 return 3;
4058 }
4059 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4060 }
4061 for (vd = 0 ; vd < first->max_part ; vd++) {
4062 if (!dl2->vlist[vd]) {
4063 dl1->vlist[vd] = NULL;
4064 continue;
4065 }
4066 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4067 if (!memcmp(vl1->conf.guid,
4068 dl2->vlist[vd]->conf.guid,
4069 DDF_GUID_LEN))
4070 break;
4071 dl1->vlist[vd] = vl1;
4072 }
4073 }
4074 first->dlist = dl1;
4075 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4076 be32_to_cpu(dl1->disk.refnum));
4077 }
4078
4079 return 0;
4080 }
4081
4082 #ifndef MDASSEMBLE
4083 /*
4084 * A new array 'a' has been started which claims to be instance 'inst'
4085 * within container 'c'.
4086 * We need to confirm that the array matches the metadata in 'c' so
4087 * that we don't corrupt any metadata.
4088 */
4089 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4090 {
4091 struct ddf_super *ddf = c->sb;
4092 int n = atoi(inst);
4093 struct mdinfo *dev;
4094 struct dl *dl;
4095 static const char faulty[] = "faulty";
4096
4097 if (all_ff(ddf->virt->entries[n].guid)) {
4098 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4099 return -ENODEV;
4100 }
4101 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4102 guid_str(ddf->virt->entries[n].guid));
4103 for (dev = a->info.devs; dev; dev = dev->next) {
4104 for (dl = ddf->dlist; dl; dl = dl->next)
4105 if (dl->major == dev->disk.major &&
4106 dl->minor == dev->disk.minor)
4107 break;
4108 if (!dl) {
4109 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4110 __func__, dev->disk.major, dev->disk.minor, n);
4111 return -1;
4112 }
4113 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4114 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4115 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4116 __func__, n, dl->major, dl->minor,
4117 be16_to_cpu(
4118 ddf->phys->entries[dl->pdnum].state));
4119 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4120 sizeof(faulty) - 1)
4121 pr_err("Write to state_fd failed\n");
4122 dev->curr_state = DS_FAULTY;
4123 }
4124 }
4125 a->info.container_member = n;
4126 return 0;
4127 }
4128
4129 static void handle_missing(struct ddf_super *ddf, int inst)
4130 {
4131 /* This member array is being activated. If any devices
4132 * are missing they must now be marked as failed.
4133 */
4134 struct vd_config *vc;
4135 unsigned int n_bvd;
4136 struct vcl *vcl;
4137 struct dl *dl;
4138 int n;
4139
4140 for (n = 0; ; n++) {
4141 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4142 if (!vc)
4143 break;
4144 for (dl = ddf->dlist; dl; dl = dl->next)
4145 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4146 break;
4147 if (dl)
4148 /* Found this disk, so not missing */
4149 continue;
4150 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4151 }
4152 }
4153
4154 /*
4155 * The array 'a' is to be marked clean in the metadata.
4156 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4157 * clean up to the point (in sectors). If that cannot be recorded in the
4158 * metadata, then leave it as dirty.
4159 *
4160 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4161 * !global! virtual_disk.virtual_entry structure.
4162 */
4163 static int ddf_set_array_state(struct active_array *a, int consistent)
4164 {
4165 struct ddf_super *ddf = a->container->sb;
4166 int inst = a->info.container_member;
4167 int old = ddf->virt->entries[inst].state;
4168 if (consistent == 2) {
4169 handle_missing(ddf, inst);
4170 /* Should check if a recovery should be started FIXME */
4171 consistent = 1;
4172 if (!is_resync_complete(&a->info))
4173 consistent = 0;
4174 }
4175 if (consistent)
4176 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4177 else
4178 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4179 if (old != ddf->virt->entries[inst].state)
4180 ddf_set_updates_pending(ddf);
4181
4182 old = ddf->virt->entries[inst].init_state;
4183 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4184 if (is_resync_complete(&a->info))
4185 ddf->virt->entries[inst].init_state |= DDF_init_full;
4186 else if (a->info.resync_start == 0)
4187 ddf->virt->entries[inst].init_state |= DDF_init_not;
4188 else
4189 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4190 if (old != ddf->virt->entries[inst].init_state)
4191 ddf_set_updates_pending(ddf);
4192
4193 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4194 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4195 consistent?"clean":"dirty",
4196 a->info.resync_start);
4197 return consistent;
4198 }
4199
4200 static int get_bvd_state(const struct ddf_super *ddf,
4201 const struct vd_config *vc)
4202 {
4203 unsigned int i, n_bvd, working = 0;
4204 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4205 int pd, st, state;
4206 for (i = 0; i < n_prim; i++) {
4207 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4208 continue;
4209 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4210 if (pd < 0)
4211 continue;
4212 st = be16_to_cpu(ddf->phys->entries[pd].state);
4213 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4214 == DDF_Online)
4215 working++;
4216 }
4217
4218 state = DDF_state_degraded;
4219 if (working == n_prim)
4220 state = DDF_state_optimal;
4221 else
4222 switch (vc->prl) {
4223 case DDF_RAID0:
4224 case DDF_CONCAT:
4225 case DDF_JBOD:
4226 state = DDF_state_failed;
4227 break;
4228 case DDF_RAID1:
4229 if (working == 0)
4230 state = DDF_state_failed;
4231 else if (working >= 2)
4232 state = DDF_state_part_optimal;
4233 break;
4234 case DDF_RAID4:
4235 case DDF_RAID5:
4236 if (working < n_prim - 1)
4237 state = DDF_state_failed;
4238 break;
4239 case DDF_RAID6:
4240 if (working < n_prim - 2)
4241 state = DDF_state_failed;
4242 else if (working == n_prim - 1)
4243 state = DDF_state_part_optimal;
4244 break;
4245 }
4246 return state;
4247 }
4248
4249 static int secondary_state(int state, int other, int seclevel)
4250 {
4251 if (state == DDF_state_optimal && other == DDF_state_optimal)
4252 return DDF_state_optimal;
4253 if (seclevel == DDF_2MIRRORED) {
4254 if (state == DDF_state_optimal || other == DDF_state_optimal)
4255 return DDF_state_part_optimal;
4256 if (state == DDF_state_failed && other == DDF_state_failed)
4257 return DDF_state_failed;
4258 return DDF_state_degraded;
4259 } else {
4260 if (state == DDF_state_failed || other == DDF_state_failed)
4261 return DDF_state_failed;
4262 if (state == DDF_state_degraded || other == DDF_state_degraded)
4263 return DDF_state_degraded;
4264 return DDF_state_part_optimal;
4265 }
4266 }
4267
4268 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4269 {
4270 int state = get_bvd_state(ddf, &vcl->conf);
4271 unsigned int i;
4272 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4273 state = secondary_state(
4274 state,
4275 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4276 vcl->conf.srl);
4277 }
4278 return state;
4279 }
4280
4281 /*
4282 * The state of each disk is stored in the global phys_disk structure
4283 * in phys_disk.entries[n].state.
4284 * This makes various combinations awkward.
4285 * - When a device fails in any array, it must be failed in all arrays
4286 * that include a part of this device.
4287 * - When a component is rebuilding, we cannot include it officially in the
4288 * array unless this is the only array that uses the device.
4289 *
4290 * So: when transitioning:
4291 * Online -> failed, just set failed flag. monitor will propagate
4292 * spare -> online, the device might need to be added to the array.
4293 * spare -> failed, just set failed. Don't worry if in array or not.
4294 */
4295 static void ddf_set_disk(struct active_array *a, int n, int state)
4296 {
4297 struct ddf_super *ddf = a->container->sb;
4298 unsigned int inst = a->info.container_member, n_bvd;
4299 struct vcl *vcl;
4300 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4301 &n_bvd, &vcl);
4302 int pd;
4303 struct mdinfo *mdi;
4304 struct dl *dl;
4305
4306 dprintf("%s: %d to %x\n", __func__, n, state);
4307 if (vc == NULL) {
4308 dprintf("ddf: cannot find instance %d!!\n", inst);
4309 return;
4310 }
4311 /* Find the matching slot in 'info'. */
4312 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4313 if (mdi->disk.raid_disk == n)
4314 break;
4315 if (!mdi) {
4316 pr_err("%s: cannot find raid disk %d\n",
4317 __func__, n);
4318 return;
4319 }
4320
4321 /* and find the 'dl' entry corresponding to that. */
4322 for (dl = ddf->dlist; dl; dl = dl->next)
4323 if (mdi->state_fd >= 0 &&
4324 mdi->disk.major == dl->major &&
4325 mdi->disk.minor == dl->minor)
4326 break;
4327 if (!dl) {
4328 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4329 __func__, n,
4330 mdi->disk.major, mdi->disk.minor);
4331 return;
4332 }
4333
4334 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4335 if (pd < 0 || pd != dl->pdnum) {
4336 /* disk doesn't currently exist or has changed.
4337 * If it is now in_sync, insert it. */
4338 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4339 __func__, dl->pdnum, dl->major, dl->minor,
4340 be32_to_cpu(dl->disk.refnum));
4341 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4342 __func__, inst, n_bvd,
4343 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4344 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4345 pd = dl->pdnum; /* FIXME: is this really correct ? */
4346 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4347 LBA_OFFSET(ddf, vc)[n_bvd] =
4348 cpu_to_be64(mdi->data_offset);
4349 be16_clear(ddf->phys->entries[pd].type,
4350 cpu_to_be16(DDF_Global_Spare));
4351 be16_set(ddf->phys->entries[pd].type,
4352 cpu_to_be16(DDF_Active_in_VD));
4353 ddf_set_updates_pending(ddf);
4354 }
4355 } else {
4356 be16 old = ddf->phys->entries[pd].state;
4357 if (state & DS_FAULTY)
4358 be16_set(ddf->phys->entries[pd].state,
4359 cpu_to_be16(DDF_Failed));
4360 if (state & DS_INSYNC) {
4361 be16_set(ddf->phys->entries[pd].state,
4362 cpu_to_be16(DDF_Online));
4363 be16_clear(ddf->phys->entries[pd].state,
4364 cpu_to_be16(DDF_Rebuilding));
4365 }
4366 if (!be16_eq(old, ddf->phys->entries[pd].state))
4367 ddf_set_updates_pending(ddf);
4368 }
4369
4370 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4371 be32_to_cpu(dl->disk.refnum), state,
4372 be16_to_cpu(ddf->phys->entries[pd].state));
4373
4374 /* Now we need to check the state of the array and update
4375 * virtual_disk.entries[n].state.
4376 * It needs to be one of "optimal", "degraded", "failed".
4377 * I don't understand 'deleted' or 'missing'.
4378 */
4379 state = get_svd_state(ddf, vcl);
4380
4381 if (ddf->virt->entries[inst].state !=
4382 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4383 | state)) {
4384 ddf->virt->entries[inst].state =
4385 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4386 | state;
4387 ddf_set_updates_pending(ddf);
4388 }
4389
4390 }
4391
4392 static void ddf_sync_metadata(struct supertype *st)
4393 {
4394 /*
4395 * Write all data to all devices.
4396 * Later, we might be able to track whether only local changes
4397 * have been made, or whether any global data has been changed,
4398 * but ddf is sufficiently weird that it probably always
4399 * changes global data ....
4400 */
4401 struct ddf_super *ddf = st->sb;
4402 if (!ddf->updates_pending)
4403 return;
4404 ddf->updates_pending = 0;
4405 __write_init_super_ddf(st);
4406 dprintf("ddf: sync_metadata\n");
4407 }
4408
4409 static int del_from_conflist(struct vcl **list, const char *guid)
4410 {
4411 struct vcl **p;
4412 int found = 0;
4413 for (p = list; p && *p; p = &((*p)->next))
4414 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4415 found = 1;
4416 *p = (*p)->next;
4417 }
4418 return found;
4419 }
4420
4421 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4422 {
4423 struct dl *dl;
4424 unsigned int vdnum, i;
4425 vdnum = find_vde_by_guid(ddf, guid);
4426 if (vdnum == DDF_NOTFOUND) {
4427 pr_err("%s: could not find VD %s\n", __func__,
4428 guid_str(guid));
4429 return -1;
4430 }
4431 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4432 pr_err("%s: could not find conf %s\n", __func__,
4433 guid_str(guid));
4434 return -1;
4435 }
4436 for (dl = ddf->dlist; dl; dl = dl->next)
4437 for (i = 0; i < ddf->max_part; i++)
4438 if (dl->vlist[i] != NULL &&
4439 !memcmp(dl->vlist[i]->conf.guid, guid,
4440 DDF_GUID_LEN))
4441 dl->vlist[i] = NULL;
4442 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4443 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4444 return 0;
4445 }
4446
4447 static int kill_subarray_ddf(struct supertype *st)
4448 {
4449 struct ddf_super *ddf = st->sb;
4450 /*
4451 * currentconf is set in container_content_ddf,
4452 * called with subarray arg
4453 */
4454 struct vcl *victim = ddf->currentconf;
4455 struct vd_config *conf;
4456 unsigned int vdnum;
4457
4458 ddf->currentconf = NULL;
4459 if (!victim) {
4460 pr_err("%s: nothing to kill\n", __func__);
4461 return -1;
4462 }
4463 conf = &victim->conf;
4464 vdnum = find_vde_by_guid(ddf, conf->guid);
4465 if (vdnum == DDF_NOTFOUND) {
4466 pr_err("%s: could not find VD %s\n", __func__,
4467 guid_str(conf->guid));
4468 return -1;
4469 }
4470 if (st->update_tail) {
4471 struct virtual_disk *vd;
4472 int len = sizeof(struct virtual_disk)
4473 + sizeof(struct virtual_entry);
4474 vd = xmalloc(len);
4475 if (vd == NULL) {
4476 pr_err("%s: failed to allocate %d bytes\n", __func__,
4477 len);
4478 return -1;
4479 }
4480 memset(vd, 0 , len);
4481 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4482 vd->populated_vdes = cpu_to_be16(0);
4483 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4484 /* we use DDF_state_deleted as marker */
4485 vd->entries[0].state = DDF_state_deleted;
4486 append_metadata_update(st, vd, len);
4487 } else {
4488 _kill_subarray_ddf(ddf, conf->guid);
4489 ddf_set_updates_pending(ddf);
4490 ddf_sync_metadata(st);
4491 }
4492 return 0;
4493 }
4494
4495 static void copy_matching_bvd(struct ddf_super *ddf,
4496 struct vd_config *conf,
4497 const struct metadata_update *update)
4498 {
4499 unsigned int mppe =
4500 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4501 unsigned int len = ddf->conf_rec_len * 512;
4502 char *p;
4503 struct vd_config *vc;
4504 for (p = update->buf; p < update->buf + update->len; p += len) {
4505 vc = (struct vd_config *) p;
4506 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4507 memcpy(conf->phys_refnum, vc->phys_refnum,
4508 mppe * (sizeof(__u32) + sizeof(__u64)));
4509 return;
4510 }
4511 }
4512 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4513 conf->sec_elmnt_seq, guid_str(conf->guid));
4514 }
4515
4516 static void ddf_process_update(struct supertype *st,
4517 struct metadata_update *update)
4518 {
4519 /* Apply this update to the metadata.
4520 * The first 4 bytes are a DDF_*_MAGIC which guides
4521 * our actions.
4522 * Possible update are:
4523 * DDF_PHYS_RECORDS_MAGIC
4524 * Add a new physical device or remove an old one.
4525 * Changes to this record only happen implicitly.
4526 * used_pdes is the device number.
4527 * DDF_VIRT_RECORDS_MAGIC
4528 * Add a new VD. Possibly also change the 'access' bits.
4529 * populated_vdes is the entry number.
4530 * DDF_VD_CONF_MAGIC
4531 * New or updated VD. the VIRT_RECORD must already
4532 * exist. For an update, phys_refnum and lba_offset
4533 * (at least) are updated, and the VD_CONF must
4534 * be written to precisely those devices listed with
4535 * a phys_refnum.
4536 * DDF_SPARE_ASSIGN_MAGIC
4537 * replacement Spare Assignment Record... but for which device?
4538 *
4539 * So, e.g.:
4540 * - to create a new array, we send a VIRT_RECORD and
4541 * a VD_CONF. Then assemble and start the array.
4542 * - to activate a spare we send a VD_CONF to add the phys_refnum
4543 * and offset. This will also mark the spare as active with
4544 * a spare-assignment record.
4545 */
4546 struct ddf_super *ddf = st->sb;
4547 be32 *magic = (be32 *)update->buf;
4548 struct phys_disk *pd;
4549 struct virtual_disk *vd;
4550 struct vd_config *vc;
4551 struct vcl *vcl;
4552 struct dl *dl;
4553 unsigned int ent;
4554 unsigned int pdnum, pd2, len;
4555
4556 dprintf("Process update %x\n", be32_to_cpu(*magic));
4557
4558 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4559 if (update->len != (sizeof(struct phys_disk) +
4560 sizeof(struct phys_disk_entry)))
4561 return;
4562 pd = (struct phys_disk*)update->buf;
4563
4564 ent = be16_to_cpu(pd->used_pdes);
4565 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4566 return;
4567 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4568 struct dl **dlp;
4569 /* removing this disk. */
4570 be16_set(ddf->phys->entries[ent].state,
4571 cpu_to_be16(DDF_Missing));
4572 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4573 struct dl *dl = *dlp;
4574 if (dl->pdnum == (signed)ent) {
4575 close(dl->fd);
4576 dl->fd = -1;
4577 /* FIXME this doesn't free
4578 * dl->devname */
4579 update->space = dl;
4580 *dlp = dl->next;
4581 break;
4582 }
4583 }
4584 ddf_set_updates_pending(ddf);
4585 return;
4586 }
4587 if (!all_ff(ddf->phys->entries[ent].guid))
4588 return;
4589 ddf->phys->entries[ent] = pd->entries[0];
4590 ddf->phys->used_pdes = cpu_to_be16
4591 (1 + be16_to_cpu(ddf->phys->used_pdes));
4592 ddf_set_updates_pending(ddf);
4593 if (ddf->add_list) {
4594 struct active_array *a;
4595 struct dl *al = ddf->add_list;
4596 ddf->add_list = al->next;
4597
4598 al->next = ddf->dlist;
4599 ddf->dlist = al;
4600
4601 /* As a device has been added, we should check
4602 * for any degraded devices that might make
4603 * use of this spare */
4604 for (a = st->arrays ; a; a=a->next)
4605 a->check_degraded = 1;
4606 }
4607 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4608 if (update->len != (sizeof(struct virtual_disk) +
4609 sizeof(struct virtual_entry)))
4610 return;
4611 vd = (struct virtual_disk*)update->buf;
4612
4613 if (vd->entries[0].state == DDF_state_deleted) {
4614 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4615 return;
4616 } else {
4617 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4618 if (ent != DDF_NOTFOUND) {
4619 dprintf("%s: VD %s exists already in slot %d\n",
4620 __func__, guid_str(vd->entries[0].guid),
4621 ent);
4622 return;
4623 }
4624 ent = find_unused_vde(ddf);
4625 if (ent == DDF_NOTFOUND)
4626 return;
4627 ddf->virt->entries[ent] = vd->entries[0];
4628 ddf->virt->populated_vdes =
4629 cpu_to_be16(
4630 1 + be16_to_cpu(
4631 ddf->virt->populated_vdes));
4632 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4633 __func__, guid_str(vd->entries[0].guid), ent,
4634 ddf->virt->entries[ent].state,
4635 ddf->virt->entries[ent].init_state);
4636 }
4637 ddf_set_updates_pending(ddf);
4638 }
4639
4640 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4641 vc = (struct vd_config*)update->buf;
4642 len = ddf->conf_rec_len * 512;
4643 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4644 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4645 __func__, guid_str(vc->guid), update->len,
4646 vc->sec_elmnt_count);
4647 return;
4648 }
4649 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4650 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4651 break;
4652 dprintf("%s: conf update for %s (%s)\n", __func__,
4653 guid_str(vc->guid), (vcl ? "old" : "new"));
4654 if (vcl) {
4655 /* An update, just copy the phys_refnum and lba_offset
4656 * fields
4657 */
4658 unsigned int i;
4659 unsigned int k;
4660 copy_matching_bvd(ddf, &vcl->conf, update);
4661 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4662 dprintf("BVD %u has %08x at %llu\n", 0,
4663 be32_to_cpu(vcl->conf.phys_refnum[k]),
4664 be64_to_cpu(LBA_OFFSET(ddf,
4665 &vcl->conf)[k]));
4666 for (i = 1; i < vc->sec_elmnt_count; i++) {
4667 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4668 update);
4669 for (k = 0; k < be16_to_cpu(
4670 vc->prim_elmnt_count); k++)
4671 dprintf("BVD %u has %08x at %llu\n", i,
4672 be32_to_cpu
4673 (vcl->other_bvds[i-1]->
4674 phys_refnum[k]),
4675 be64_to_cpu
4676 (LBA_OFFSET
4677 (ddf,
4678 vcl->other_bvds[i-1])[k]));
4679 }
4680 } else {
4681 /* A new VD_CONF */
4682 unsigned int i;
4683 if (!update->space)
4684 return;
4685 vcl = update->space;
4686 update->space = NULL;
4687 vcl->next = ddf->conflist;
4688 memcpy(&vcl->conf, vc, len);
4689 ent = find_vde_by_guid(ddf, vc->guid);
4690 if (ent == DDF_NOTFOUND)
4691 return;
4692 vcl->vcnum = ent;
4693 ddf->conflist = vcl;
4694 for (i = 1; i < vc->sec_elmnt_count; i++)
4695 memcpy(vcl->other_bvds[i-1],
4696 update->buf + len * i, len);
4697 }
4698 /* Set DDF_Transition on all Failed devices - to help
4699 * us detect those that are no longer in use
4700 */
4701 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4702 pdnum++)
4703 if (be16_and(ddf->phys->entries[pdnum].state,
4704 cpu_to_be16(DDF_Failed)))
4705 be16_set(ddf->phys->entries[pdnum].state,
4706 cpu_to_be16(DDF_Transition));
4707 /* Now make sure vlist is correct for each dl. */
4708 for (dl = ddf->dlist; dl; dl = dl->next) {
4709 unsigned int vn = 0;
4710 int in_degraded = 0;
4711 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4712 unsigned int dn, ibvd;
4713 const struct vd_config *conf;
4714 int vstate;
4715 dn = get_pd_index_from_refnum(vcl,
4716 dl->disk.refnum,
4717 ddf->mppe,
4718 &conf, &ibvd);
4719 if (dn == DDF_NOTFOUND)
4720 continue;
4721 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4722 dl->pdnum,
4723 be32_to_cpu(dl->disk.refnum),
4724 guid_str(conf->guid),
4725 conf->sec_elmnt_seq, vn);
4726 /* Clear the Transition flag */
4727 if (be16_and
4728 (ddf->phys->entries[dl->pdnum].state,
4729 cpu_to_be16(DDF_Failed)))
4730 be16_clear(ddf->phys
4731 ->entries[dl->pdnum].state,
4732 cpu_to_be16(DDF_Transition));
4733 dl->vlist[vn++] = vcl;
4734 vstate = ddf->virt->entries[vcl->vcnum].state
4735 & DDF_state_mask;
4736 if (vstate == DDF_state_degraded ||
4737 vstate == DDF_state_part_optimal)
4738 in_degraded = 1;
4739 }
4740 while (vn < ddf->max_part)
4741 dl->vlist[vn++] = NULL;
4742 if (dl->vlist[0]) {
4743 be16_clear(ddf->phys->entries[dl->pdnum].type,
4744 cpu_to_be16(DDF_Global_Spare));
4745 if (!be16_and(ddf->phys
4746 ->entries[dl->pdnum].type,
4747 cpu_to_be16(DDF_Active_in_VD))) {
4748 be16_set(ddf->phys
4749 ->entries[dl->pdnum].type,
4750 cpu_to_be16(DDF_Active_in_VD));
4751 if (in_degraded)
4752 be16_set(ddf->phys
4753 ->entries[dl->pdnum]
4754 .state,
4755 cpu_to_be16
4756 (DDF_Rebuilding));
4757 }
4758 }
4759 if (dl->spare) {
4760 be16_clear(ddf->phys->entries[dl->pdnum].type,
4761 cpu_to_be16(DDF_Global_Spare));
4762 be16_set(ddf->phys->entries[dl->pdnum].type,
4763 cpu_to_be16(DDF_Spare));
4764 }
4765 if (!dl->vlist[0] && !dl->spare) {
4766 be16_set(ddf->phys->entries[dl->pdnum].type,
4767 cpu_to_be16(DDF_Global_Spare));
4768 be16_clear(ddf->phys->entries[dl->pdnum].type,
4769 cpu_to_be16(DDF_Spare));
4770 be16_clear(ddf->phys->entries[dl->pdnum].type,
4771 cpu_to_be16(DDF_Active_in_VD));
4772 }
4773 }
4774
4775 /* Now remove any 'Failed' devices that are not part
4776 * of any VD. They will have the Transition flag set.
4777 * Once done, we need to update all dl->pdnum numbers.
4778 */
4779 pd2 = 0;
4780 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->used_pdes);
4781 pdnum++) {
4782 if (be16_and(ddf->phys->entries[pdnum].state,
4783 cpu_to_be16(DDF_Failed))
4784 && be16_and(ddf->phys->entries[pdnum].state,
4785 cpu_to_be16(DDF_Transition))) {
4786 /* skip this one unless in dlist*/
4787 for (dl = ddf->dlist; dl; dl = dl->next)
4788 if (dl->pdnum == (int)pdnum)
4789 break;
4790 if (!dl)
4791 continue;
4792 }
4793 if (pdnum == pd2)
4794 pd2++;
4795 else {
4796 ddf->phys->entries[pd2] =
4797 ddf->phys->entries[pdnum];
4798 for (dl = ddf->dlist; dl; dl = dl->next)
4799 if (dl->pdnum == (int)pdnum)
4800 dl->pdnum = pd2;
4801 pd2++;
4802 }
4803 }
4804 ddf->phys->used_pdes = cpu_to_be16(pd2);
4805 while (pd2 < pdnum) {
4806 memset(ddf->phys->entries[pd2].guid, 0xff,
4807 DDF_GUID_LEN);
4808 pd2++;
4809 }
4810
4811 ddf_set_updates_pending(ddf);
4812 }
4813 /* case DDF_SPARE_ASSIGN_MAGIC */
4814 }
4815
4816 static void ddf_prepare_update(struct supertype *st,
4817 struct metadata_update *update)
4818 {
4819 /* This update arrived at managemon.
4820 * We are about to pass it to monitor.
4821 * If a malloc is needed, do it here.
4822 */
4823 struct ddf_super *ddf = st->sb;
4824 be32 *magic = (be32 *)update->buf;
4825 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4826 struct vcl *vcl;
4827 struct vd_config *conf = (struct vd_config *) update->buf;
4828 if (posix_memalign(&update->space, 512,
4829 offsetof(struct vcl, conf)
4830 + ddf->conf_rec_len * 512) != 0) {
4831 update->space = NULL;
4832 return;
4833 }
4834 vcl = update->space;
4835 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4836 if (alloc_other_bvds(ddf, vcl) != 0) {
4837 free(update->space);
4838 update->space = NULL;
4839 }
4840 }
4841 }
4842
4843 /*
4844 * Check degraded state of a RAID10.
4845 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4846 */
4847 static int raid10_degraded(struct mdinfo *info)
4848 {
4849 int n_prim, n_bvds;
4850 int i;
4851 struct mdinfo *d;
4852 char *found;
4853 int ret = -1;
4854
4855 n_prim = info->array.layout & ~0x100;
4856 n_bvds = info->array.raid_disks / n_prim;
4857 found = xmalloc(n_bvds);
4858 if (found == NULL)
4859 return ret;
4860 memset(found, 0, n_bvds);
4861 for (d = info->devs; d; d = d->next) {
4862 i = d->disk.raid_disk / n_prim;
4863 if (i >= n_bvds) {
4864 pr_err("%s: BUG: invalid raid disk\n", __func__);
4865 goto out;
4866 }
4867 if (d->state_fd > 0)
4868 found[i]++;
4869 }
4870 ret = 2;
4871 for (i = 0; i < n_bvds; i++)
4872 if (!found[i]) {
4873 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4874 ret = 0;
4875 goto out;
4876 } else if (found[i] < n_prim) {
4877 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4878 n_bvds);
4879 ret = 1;
4880 }
4881 out:
4882 free(found);
4883 return ret;
4884 }
4885
4886 /*
4887 * Check if the array 'a' is degraded but not failed.
4888 * If it is, find as many spares as are available and needed and
4889 * arrange for their inclusion.
4890 * We only choose devices which are not already in the array,
4891 * and prefer those with a spare-assignment to this array.
4892 * Otherwise we choose global spares - assuming always that
4893 * there is enough room.
4894 * For each spare that we assign, we return an 'mdinfo' which
4895 * describes the position for the device in the array.
4896 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4897 * the new phys_refnum and lba_offset values.
4898 *
4899 * Only worry about BVDs at the moment.
4900 */
4901 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4902 struct metadata_update **updates)
4903 {
4904 int working = 0;
4905 struct mdinfo *d;
4906 struct ddf_super *ddf = a->container->sb;
4907 int global_ok = 0;
4908 struct mdinfo *rv = NULL;
4909 struct mdinfo *di;
4910 struct metadata_update *mu;
4911 struct dl *dl;
4912 int i;
4913 unsigned int j;
4914 struct vcl *vcl;
4915 struct vd_config *vc;
4916 unsigned int n_bvd;
4917
4918 for (d = a->info.devs ; d ; d = d->next) {
4919 if ((d->curr_state & DS_FAULTY) &&
4920 d->state_fd >= 0)
4921 /* wait for Removal to happen */
4922 return NULL;
4923 if (d->state_fd >= 0)
4924 working ++;
4925 }
4926
4927 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4928 a->info.array.raid_disks,
4929 a->info.array.level);
4930 if (working == a->info.array.raid_disks)
4931 return NULL; /* array not degraded */
4932 switch (a->info.array.level) {
4933 case 1:
4934 if (working == 0)
4935 return NULL; /* failed */
4936 break;
4937 case 4:
4938 case 5:
4939 if (working < a->info.array.raid_disks - 1)
4940 return NULL; /* failed */
4941 break;
4942 case 6:
4943 if (working < a->info.array.raid_disks - 2)
4944 return NULL; /* failed */
4945 break;
4946 case 10:
4947 if (raid10_degraded(&a->info) < 1)
4948 return NULL;
4949 break;
4950 default: /* concat or stripe */
4951 return NULL; /* failed */
4952 }
4953
4954 /* For each slot, if it is not working, find a spare */
4955 dl = ddf->dlist;
4956 for (i = 0; i < a->info.array.raid_disks; i++) {
4957 for (d = a->info.devs ; d ; d = d->next)
4958 if (d->disk.raid_disk == i)
4959 break;
4960 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4961 if (d && (d->state_fd >= 0))
4962 continue;
4963
4964 /* OK, this device needs recovery. Find a spare */
4965 again:
4966 for ( ; dl ; dl = dl->next) {
4967 unsigned long long esize;
4968 unsigned long long pos;
4969 struct mdinfo *d2;
4970 int is_global = 0;
4971 int is_dedicated = 0;
4972 struct extent *ex;
4973 unsigned int j;
4974 be16 state = ddf->phys->entries[dl->pdnum].state;
4975 if (be16_and(state,
4976 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4977 !be16_and(state,
4978 cpu_to_be16(DDF_Online)))
4979 continue;
4980
4981 /* If in this array, skip */
4982 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4983 if (d2->state_fd >= 0 &&
4984 d2->disk.major == dl->major &&
4985 d2->disk.minor == dl->minor) {
4986 dprintf("%x:%x (%08x) already in array\n",
4987 dl->major, dl->minor,
4988 be32_to_cpu(dl->disk.refnum));
4989 break;
4990 }
4991 if (d2)
4992 continue;
4993 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4994 cpu_to_be16(DDF_Spare))) {
4995 /* Check spare assign record */
4996 if (dl->spare) {
4997 if (dl->spare->type & DDF_spare_dedicated) {
4998 /* check spare_ents for guid */
4999 for (j = 0 ;
5000 j < be16_to_cpu
5001 (dl->spare
5002 ->populated);
5003 j++) {
5004 if (memcmp(dl->spare->spare_ents[j].guid,
5005 ddf->virt->entries[a->info.container_member].guid,
5006 DDF_GUID_LEN) == 0)
5007 is_dedicated = 1;
5008 }
5009 } else
5010 is_global = 1;
5011 }
5012 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5013 cpu_to_be16(DDF_Global_Spare))) {
5014 is_global = 1;
5015 } else if (!be16_and(ddf->phys
5016 ->entries[dl->pdnum].state,
5017 cpu_to_be16(DDF_Failed))) {
5018 /* we can possibly use some of this */
5019 is_global = 1;
5020 }
5021 if ( ! (is_dedicated ||
5022 (is_global && global_ok))) {
5023 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5024 is_dedicated, is_global);
5025 continue;
5026 }
5027
5028 /* We are allowed to use this device - is there space?
5029 * We need a->info.component_size sectors */
5030 ex = get_extents(ddf, dl);
5031 if (!ex) {
5032 dprintf("cannot get extents\n");
5033 continue;
5034 }
5035 j = 0; pos = 0;
5036 esize = 0;
5037
5038 do {
5039 esize = ex[j].start - pos;
5040 if (esize >= a->info.component_size)
5041 break;
5042 pos = ex[j].start + ex[j].size;
5043 j++;
5044 } while (ex[j-1].size);
5045
5046 free(ex);
5047 if (esize < a->info.component_size) {
5048 dprintf("%x:%x has no room: %llu %llu\n",
5049 dl->major, dl->minor,
5050 esize, a->info.component_size);
5051 /* No room */
5052 continue;
5053 }
5054
5055 /* Cool, we have a device with some space at pos */
5056 di = xcalloc(1, sizeof(*di));
5057 di->disk.number = i;
5058 di->disk.raid_disk = i;
5059 di->disk.major = dl->major;
5060 di->disk.minor = dl->minor;
5061 di->disk.state = 0;
5062 di->recovery_start = 0;
5063 di->data_offset = pos;
5064 di->component_size = a->info.component_size;
5065 di->container_member = dl->pdnum;
5066 di->next = rv;
5067 rv = di;
5068 dprintf("%x:%x (%08x) to be %d at %llu\n",
5069 dl->major, dl->minor,
5070 be32_to_cpu(dl->disk.refnum), i, pos);
5071
5072 break;
5073 }
5074 if (!dl && ! global_ok) {
5075 /* not enough dedicated spares, try global */
5076 global_ok = 1;
5077 dl = ddf->dlist;
5078 goto again;
5079 }
5080 }
5081
5082 if (!rv)
5083 /* No spares found */
5084 return rv;
5085 /* Now 'rv' has a list of devices to return.
5086 * Create a metadata_update record to update the
5087 * phys_refnum and lba_offset values
5088 */
5089 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5090 &n_bvd, &vcl);
5091 if (vc == NULL)
5092 return NULL;
5093
5094 mu = xmalloc(sizeof(*mu));
5095 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5096 free(mu);
5097 mu = NULL;
5098 }
5099
5100 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5101 mu->buf = xmalloc(mu->len);
5102 mu->space = NULL;
5103 mu->space_list = NULL;
5104 mu->next = *updates;
5105 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5106 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5107 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5108 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5109
5110 vc = (struct vd_config*)mu->buf;
5111 for (di = rv ; di ; di = di->next) {
5112 unsigned int i_sec, i_prim;
5113 i_sec = di->disk.raid_disk
5114 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5115 i_prim = di->disk.raid_disk
5116 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5117 vc = (struct vd_config *)(mu->buf
5118 + i_sec * ddf->conf_rec_len * 512);
5119 for (dl = ddf->dlist; dl; dl = dl->next)
5120 if (dl->major == di->disk.major
5121 && dl->minor == di->disk.minor)
5122 break;
5123 if (!dl) {
5124 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5125 __func__, di->disk.raid_disk,
5126 di->disk.major, di->disk.minor);
5127 return NULL;
5128 }
5129 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5130 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5131 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5132 be32_to_cpu(vc->phys_refnum[i_prim]),
5133 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5134 }
5135 *updates = mu;
5136 return rv;
5137 }
5138 #endif /* MDASSEMBLE */
5139
5140 static int ddf_level_to_layout(int level)
5141 {
5142 switch(level) {
5143 case 0:
5144 case 1:
5145 return 0;
5146 case 5:
5147 return ALGORITHM_LEFT_SYMMETRIC;
5148 case 6:
5149 return ALGORITHM_ROTATING_N_CONTINUE;
5150 case 10:
5151 return 0x102;
5152 default:
5153 return UnSet;
5154 }
5155 }
5156
5157 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5158 {
5159 if (level && *level == UnSet)
5160 *level = LEVEL_CONTAINER;
5161
5162 if (level && layout && *layout == UnSet)
5163 *layout = ddf_level_to_layout(*level);
5164 }
5165
5166 struct superswitch super_ddf = {
5167 #ifndef MDASSEMBLE
5168 .examine_super = examine_super_ddf,
5169 .brief_examine_super = brief_examine_super_ddf,
5170 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5171 .export_examine_super = export_examine_super_ddf,
5172 .detail_super = detail_super_ddf,
5173 .brief_detail_super = brief_detail_super_ddf,
5174 .validate_geometry = validate_geometry_ddf,
5175 .write_init_super = write_init_super_ddf,
5176 .add_to_super = add_to_super_ddf,
5177 .remove_from_super = remove_from_super_ddf,
5178 .load_container = load_container_ddf,
5179 .copy_metadata = copy_metadata_ddf,
5180 .kill_subarray = kill_subarray_ddf,
5181 #endif
5182 .match_home = match_home_ddf,
5183 .uuid_from_super= uuid_from_super_ddf,
5184 .getinfo_super = getinfo_super_ddf,
5185 .update_super = update_super_ddf,
5186
5187 .avail_size = avail_size_ddf,
5188
5189 .compare_super = compare_super_ddf,
5190
5191 .load_super = load_super_ddf,
5192 .init_super = init_super_ddf,
5193 .store_super = store_super_ddf,
5194 .free_super = free_super_ddf,
5195 .match_metadata_desc = match_metadata_desc_ddf,
5196 .container_content = container_content_ddf,
5197 .default_geometry = default_geometry_ddf,
5198
5199 .external = 1,
5200
5201 #ifndef MDASSEMBLE
5202 /* for mdmon */
5203 .open_new = ddf_open_new,
5204 .set_array_state= ddf_set_array_state,
5205 .set_disk = ddf_set_disk,
5206 .sync_metadata = ddf_sync_metadata,
5207 .process_update = ddf_process_update,
5208 .prepare_update = ddf_prepare_update,
5209 .activate_spare = ddf_activate_spare,
5210 #endif
5211 .name = "ddf",
5212 };