]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
imsm: fix first volume autolayout with IMSM_NO_PLATFORM
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33 #include <stddef.h>
34
35 /* a non-official T10 name for creation GUIDs */
36 static char T10[] = "Linux-MD";
37
38 /* DDF timestamps are 1980 based, so we need to add
39 * second-in-decade-of-seventies to convert to linux timestamps.
40 * 10 years with 2 leap years.
41 */
42 #define DECADE (3600*24*(365*10+2))
43 unsigned long crc32(
44 unsigned long crc,
45 const unsigned char *buf,
46 unsigned len);
47
48 #define DDF_NOTFOUND (~0U)
49 #define DDF_CONTAINER (DDF_NOTFOUND-1)
50
51 /* Default for safe_mode_delay. Same value as for IMSM.
52 */
53 static const int DDF_SAFE_MODE_DELAY = 4000;
54
55 /* The DDF metadata handling.
56 * DDF metadata lives at the end of the device.
57 * The last 512 byte block provides an 'anchor' which is used to locate
58 * the rest of the metadata which usually lives immediately behind the anchor.
59 *
60 * Note:
61 * - all multibyte numeric fields are bigendian.
62 * - all strings are space padded.
63 *
64 */
65
66 typedef struct __be16 {
67 __u16 _v16;
68 } be16;
69 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
70 #define be16_and(x, y) ((x)._v16 & (y)._v16)
71 #define be16_or(x, y) ((x)._v16 | (y)._v16)
72 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
73 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
74
75 typedef struct __be32 {
76 __u32 _v32;
77 } be32;
78 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
79
80 typedef struct __be64 {
81 __u64 _v64;
82 } be64;
83 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
84
85 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
86 static inline be16 cpu_to_be16(__u16 x)
87 {
88 be16 be = { ._v16 = __cpu_to_be16(x) };
89 return be;
90 }
91
92 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
93 static inline be32 cpu_to_be32(__u32 x)
94 {
95 be32 be = { ._v32 = __cpu_to_be32(x) };
96 return be;
97 }
98
99 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
100 static inline be64 cpu_to_be64(__u64 x)
101 {
102 be64 be = { ._v64 = __cpu_to_be64(x) };
103 return be;
104 }
105
106 /* Primary Raid Level (PRL) */
107 #define DDF_RAID0 0x00
108 #define DDF_RAID1 0x01
109 #define DDF_RAID3 0x03
110 #define DDF_RAID4 0x04
111 #define DDF_RAID5 0x05
112 #define DDF_RAID1E 0x11
113 #define DDF_JBOD 0x0f
114 #define DDF_CONCAT 0x1f
115 #define DDF_RAID5E 0x15
116 #define DDF_RAID5EE 0x25
117 #define DDF_RAID6 0x06
118
119 /* Raid Level Qualifier (RLQ) */
120 #define DDF_RAID0_SIMPLE 0x00
121 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
122 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
123 #define DDF_RAID3_0 0x00 /* parity in first extent */
124 #define DDF_RAID3_N 0x01 /* parity in last extent */
125 #define DDF_RAID4_0 0x00 /* parity in first extent */
126 #define DDF_RAID4_N 0x01 /* parity in last extent */
127 /* these apply to raid5e and raid5ee as well */
128 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
129 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
130 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
131 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
132
133 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
134 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
135
136 /* Secondary RAID Level (SRL) */
137 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
138 #define DDF_2MIRRORED 0x01
139 #define DDF_2CONCAT 0x02
140 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
141
142 /* Magic numbers */
143 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
144 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
145 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
146 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
147 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
148 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
149 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
150 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
151 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
152 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
153
154 #define DDF_GUID_LEN 24
155 #define DDF_REVISION_0 "01.00.00"
156 #define DDF_REVISION_2 "01.02.00"
157
158 struct ddf_header {
159 be32 magic; /* DDF_HEADER_MAGIC */
160 be32 crc;
161 char guid[DDF_GUID_LEN];
162 char revision[8]; /* 01.02.00 */
163 be32 seq; /* starts at '1' */
164 be32 timestamp;
165 __u8 openflag;
166 __u8 foreignflag;
167 __u8 enforcegroups;
168 __u8 pad0; /* 0xff */
169 __u8 pad1[12]; /* 12 * 0xff */
170 /* 64 bytes so far */
171 __u8 header_ext[32]; /* reserved: fill with 0xff */
172 be64 primary_lba;
173 be64 secondary_lba;
174 __u8 type;
175 __u8 pad2[3]; /* 0xff */
176 be32 workspace_len; /* sectors for vendor space -
177 * at least 32768(sectors) */
178 be64 workspace_lba;
179 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
180 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
181 be16 max_partitions; /* i.e. max num of configuration
182 record entries per disk */
183 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
184 *12/512) */
185 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
186 __u8 pad3[54]; /* 0xff */
187 /* 192 bytes so far */
188 be32 controller_section_offset;
189 be32 controller_section_length;
190 be32 phys_section_offset;
191 be32 phys_section_length;
192 be32 virt_section_offset;
193 be32 virt_section_length;
194 be32 config_section_offset;
195 be32 config_section_length;
196 be32 data_section_offset;
197 be32 data_section_length;
198 be32 bbm_section_offset;
199 be32 bbm_section_length;
200 be32 diag_space_offset;
201 be32 diag_space_length;
202 be32 vendor_offset;
203 be32 vendor_length;
204 /* 256 bytes so far */
205 __u8 pad4[256]; /* 0xff */
206 };
207
208 /* type field */
209 #define DDF_HEADER_ANCHOR 0x00
210 #define DDF_HEADER_PRIMARY 0x01
211 #define DDF_HEADER_SECONDARY 0x02
212
213 /* The content of the 'controller section' - global scope */
214 struct ddf_controller_data {
215 be32 magic; /* DDF_CONTROLLER_MAGIC */
216 be32 crc;
217 char guid[DDF_GUID_LEN];
218 struct controller_type {
219 be16 vendor_id;
220 be16 device_id;
221 be16 sub_vendor_id;
222 be16 sub_device_id;
223 } type;
224 char product_id[16];
225 __u8 pad[8]; /* 0xff */
226 __u8 vendor_data[448];
227 };
228
229 /* The content of phys_section - global scope */
230 struct phys_disk {
231 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
232 be32 crc;
233 be16 used_pdes; /* This is a counter, not a max - the list
234 * of used entries may not be dense */
235 be16 max_pdes;
236 __u8 pad[52];
237 struct phys_disk_entry {
238 char guid[DDF_GUID_LEN];
239 be32 refnum;
240 be16 type;
241 be16 state;
242 be64 config_size; /* DDF structures must be after here */
243 char path[18]; /* Another horrible structure really
244 * but is "used for information
245 * purposes only" */
246 __u8 pad[6];
247 } entries[0];
248 };
249
250 /* phys_disk_entry.type is a bitmap - bigendian remember */
251 #define DDF_Forced_PD_GUID 1
252 #define DDF_Active_in_VD 2
253 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
254 #define DDF_Spare 8 /* overrides Global_spare */
255 #define DDF_Foreign 16
256 #define DDF_Legacy 32 /* no DDF on this device */
257
258 #define DDF_Interface_mask 0xf00
259 #define DDF_Interface_SCSI 0x100
260 #define DDF_Interface_SAS 0x200
261 #define DDF_Interface_SATA 0x300
262 #define DDF_Interface_FC 0x400
263
264 /* phys_disk_entry.state is a bigendian bitmap */
265 #define DDF_Online 1
266 #define DDF_Failed 2 /* overrides 1,4,8 */
267 #define DDF_Rebuilding 4
268 #define DDF_Transition 8
269 #define DDF_SMART 16
270 #define DDF_ReadErrors 32
271 #define DDF_Missing 64
272
273 /* The content of the virt_section global scope */
274 struct virtual_disk {
275 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
276 be32 crc;
277 be16 populated_vdes;
278 be16 max_vdes;
279 __u8 pad[52];
280 struct virtual_entry {
281 char guid[DDF_GUID_LEN];
282 be16 unit;
283 __u16 pad0; /* 0xffff */
284 be16 guid_crc;
285 be16 type;
286 __u8 state;
287 __u8 init_state;
288 __u8 pad1[14];
289 char name[16];
290 } entries[0];
291 };
292
293 /* virtual_entry.type is a bitmap - bigendian */
294 #define DDF_Shared 1
295 #define DDF_Enforce_Groups 2
296 #define DDF_Unicode 4
297 #define DDF_Owner_Valid 8
298
299 /* virtual_entry.state is a bigendian bitmap */
300 #define DDF_state_mask 0x7
301 #define DDF_state_optimal 0x0
302 #define DDF_state_degraded 0x1
303 #define DDF_state_deleted 0x2
304 #define DDF_state_missing 0x3
305 #define DDF_state_failed 0x4
306 #define DDF_state_part_optimal 0x5
307
308 #define DDF_state_morphing 0x8
309 #define DDF_state_inconsistent 0x10
310
311 /* virtual_entry.init_state is a bigendian bitmap */
312 #define DDF_initstate_mask 0x03
313 #define DDF_init_not 0x00
314 #define DDF_init_quick 0x01 /* initialisation is progress.
315 * i.e. 'state_inconsistent' */
316 #define DDF_init_full 0x02
317
318 #define DDF_access_mask 0xc0
319 #define DDF_access_rw 0x00
320 #define DDF_access_ro 0x80
321 #define DDF_access_blocked 0xc0
322
323 /* The content of the config_section - local scope
324 * It has multiple records each config_record_len sectors
325 * They can be vd_config or spare_assign
326 */
327
328 struct vd_config {
329 be32 magic; /* DDF_VD_CONF_MAGIC */
330 be32 crc;
331 char guid[DDF_GUID_LEN];
332 be32 timestamp;
333 be32 seqnum;
334 __u8 pad0[24];
335 be16 prim_elmnt_count;
336 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
337 __u8 prl;
338 __u8 rlq;
339 __u8 sec_elmnt_count;
340 __u8 sec_elmnt_seq;
341 __u8 srl;
342 be64 blocks; /* blocks per component could be different
343 * on different component devices...(only
344 * for concat I hope) */
345 be64 array_blocks; /* blocks in array */
346 __u8 pad1[8];
347 be32 spare_refs[8]; /* This is used to detect missing spares.
348 * As we don't have an interface for that
349 * the values are ignored.
350 */
351 __u8 cache_pol[8];
352 __u8 bg_rate;
353 __u8 pad2[3];
354 __u8 pad3[52];
355 __u8 pad4[192];
356 __u8 v0[32]; /* reserved- 0xff */
357 __u8 v1[32]; /* reserved- 0xff */
358 __u8 v2[16]; /* reserved- 0xff */
359 __u8 v3[16]; /* reserved- 0xff */
360 __u8 vendor[32];
361 be32 phys_refnum[0]; /* refnum of each disk in sequence */
362 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
363 bvd are always the same size */
364 };
365 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
366
367 /* vd_config.cache_pol[7] is a bitmap */
368 #define DDF_cache_writeback 1 /* else writethrough */
369 #define DDF_cache_wadaptive 2 /* only applies if writeback */
370 #define DDF_cache_readahead 4
371 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
372 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
373 #define DDF_cache_wallowed 32 /* enable write caching */
374 #define DDF_cache_rallowed 64 /* enable read caching */
375
376 struct spare_assign {
377 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
378 be32 crc;
379 be32 timestamp;
380 __u8 reserved[7];
381 __u8 type;
382 be16 populated; /* SAEs used */
383 be16 max; /* max SAEs */
384 __u8 pad[8];
385 struct spare_assign_entry {
386 char guid[DDF_GUID_LEN];
387 be16 secondary_element;
388 __u8 pad[6];
389 } spare_ents[0];
390 };
391 /* spare_assign.type is a bitmap */
392 #define DDF_spare_dedicated 0x1 /* else global */
393 #define DDF_spare_revertible 0x2 /* else committable */
394 #define DDF_spare_active 0x4 /* else not active */
395 #define DDF_spare_affinity 0x8 /* enclosure affinity */
396
397 /* The data_section contents - local scope */
398 struct disk_data {
399 be32 magic; /* DDF_PHYS_DATA_MAGIC */
400 be32 crc;
401 char guid[DDF_GUID_LEN];
402 be32 refnum; /* crc of some magic drive data ... */
403 __u8 forced_ref; /* set when above was not result of magic */
404 __u8 forced_guid; /* set if guid was forced rather than magic */
405 __u8 vendor[32];
406 __u8 pad[442];
407 };
408
409 /* bbm_section content */
410 struct bad_block_log {
411 be32 magic;
412 be32 crc;
413 be16 entry_count;
414 be32 spare_count;
415 __u8 pad[10];
416 be64 first_spare;
417 struct mapped_block {
418 be64 defective_start;
419 be32 replacement_start;
420 be16 remap_count;
421 __u8 pad[2];
422 } entries[0];
423 };
424
425 /* Struct for internally holding ddf structures */
426 /* The DDF structure stored on each device is potentially
427 * quite different, as some data is global and some is local.
428 * The global data is:
429 * - ddf header
430 * - controller_data
431 * - Physical disk records
432 * - Virtual disk records
433 * The local data is:
434 * - Configuration records
435 * - Physical Disk data section
436 * ( and Bad block and vendor which I don't care about yet).
437 *
438 * The local data is parsed into separate lists as it is read
439 * and reconstructed for writing. This means that we only need
440 * to make config changes once and they are automatically
441 * propagated to all devices.
442 * The global (config and disk data) records are each in a list
443 * of separate data structures. When writing we find the entry
444 * or entries applicable to the particular device.
445 */
446 struct ddf_super {
447 struct ddf_header anchor, primary, secondary;
448 struct ddf_controller_data controller;
449 struct ddf_header *active;
450 struct phys_disk *phys;
451 struct virtual_disk *virt;
452 char *conf;
453 int pdsize, vdsize;
454 unsigned int max_part, mppe, conf_rec_len;
455 int currentdev;
456 int updates_pending;
457 struct vcl {
458 union {
459 char space[512];
460 struct {
461 struct vcl *next;
462 unsigned int vcnum; /* index into ->virt */
463 /* For an array with a secondary level there are
464 * multiple vd_config structures, all with the same
465 * guid but with different sec_elmnt_seq.
466 * One of these structures is in 'conf' below.
467 * The others are in other_bvds, not in any
468 * particular order.
469 */
470 struct vd_config **other_bvds;
471 __u64 *block_sizes; /* NULL if all the same */
472 };
473 };
474 struct vd_config conf;
475 } *conflist, *currentconf;
476 struct dl {
477 union {
478 char space[512];
479 struct {
480 struct dl *next;
481 int major, minor;
482 char *devname;
483 int fd;
484 unsigned long long size; /* sectors */
485 be64 primary_lba; /* sectors */
486 be64 secondary_lba; /* sectors */
487 be64 workspace_lba; /* sectors */
488 int pdnum; /* index in ->phys */
489 struct spare_assign *spare;
490 void *mdupdate; /* hold metadata update */
491
492 /* These fields used by auto-layout */
493 int raiddisk; /* slot to fill in autolayout */
494 __u64 esize;
495 int displayed;
496 };
497 };
498 struct disk_data disk;
499 struct vcl *vlist[0]; /* max_part in size */
500 } *dlist, *add_list;
501 };
502
503 static int load_super_ddf_all(struct supertype *st, int fd,
504 void **sbp, char *devname);
505 static int get_svd_state(const struct ddf_super *, const struct vcl *);
506
507 static int validate_geometry_ddf_bvd(struct supertype *st,
508 int level, int layout, int raiddisks,
509 int *chunk, unsigned long long size,
510 unsigned long long data_offset,
511 char *dev, unsigned long long *freesize,
512 int verbose);
513
514 static void free_super_ddf(struct supertype *st);
515 static int all_ff(const char *guid);
516 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
517 be32 refnum, unsigned int nmax,
518 const struct vd_config **bvd,
519 unsigned int *idx);
520 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
521 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
522 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
523 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
524 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
525 static int init_super_ddf_bvd(struct supertype *st,
526 mdu_array_info_t *info,
527 unsigned long long size,
528 char *name, char *homehost,
529 int *uuid, unsigned long long data_offset);
530
531 #if DEBUG
532 static void pr_state(struct ddf_super *ddf, const char *msg)
533 {
534 unsigned int i;
535 dprintf("%s: ", msg);
536 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
537 if (all_ff(ddf->virt->entries[i].guid))
538 continue;
539 dprintf_cont("%u(s=%02x i=%02x) ", i,
540 ddf->virt->entries[i].state,
541 ddf->virt->entries[i].init_state);
542 }
543 dprintf_cont("\n");
544 }
545 #else
546 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
547 #endif
548
549 static void _ddf_set_updates_pending(struct ddf_super *ddf, struct vd_config *vc,
550 const char *func)
551 {
552 if (vc) {
553 vc->timestamp = cpu_to_be32(time(0)-DECADE);
554 vc->seqnum = cpu_to_be32(be32_to_cpu(vc->seqnum) + 1);
555 }
556 if (ddf->updates_pending)
557 return;
558 ddf->updates_pending = 1;
559 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
560 pr_state(ddf, func);
561 }
562
563 #define ddf_set_updates_pending(x,v) _ddf_set_updates_pending((x), (v), __func__)
564
565 static be32 calc_crc(void *buf, int len)
566 {
567 /* crcs are always at the same place as in the ddf_header */
568 struct ddf_header *ddf = buf;
569 be32 oldcrc = ddf->crc;
570 __u32 newcrc;
571 ddf->crc = cpu_to_be32(0xffffffff);
572
573 newcrc = crc32(0, buf, len);
574 ddf->crc = oldcrc;
575 /* The crc is stored (like everything) bigendian, so convert
576 * here for simplicity
577 */
578 return cpu_to_be32(newcrc);
579 }
580
581 #define DDF_INVALID_LEVEL 0xff
582 #define DDF_NO_SECONDARY 0xff
583 static int err_bad_md_layout(const mdu_array_info_t *array)
584 {
585 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
586 array->level, array->layout, array->raid_disks);
587 return -1;
588 }
589
590 static int layout_md2ddf(const mdu_array_info_t *array,
591 struct vd_config *conf)
592 {
593 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
594 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
595 __u8 sec_elmnt_count = 1;
596 __u8 srl = DDF_NO_SECONDARY;
597
598 switch (array->level) {
599 case LEVEL_LINEAR:
600 prl = DDF_CONCAT;
601 break;
602 case 0:
603 rlq = DDF_RAID0_SIMPLE;
604 prl = DDF_RAID0;
605 break;
606 case 1:
607 switch (array->raid_disks) {
608 case 2:
609 rlq = DDF_RAID1_SIMPLE;
610 break;
611 case 3:
612 rlq = DDF_RAID1_MULTI;
613 break;
614 default:
615 return err_bad_md_layout(array);
616 }
617 prl = DDF_RAID1;
618 break;
619 case 4:
620 if (array->layout != 0)
621 return err_bad_md_layout(array);
622 rlq = DDF_RAID4_N;
623 prl = DDF_RAID4;
624 break;
625 case 5:
626 switch (array->layout) {
627 case ALGORITHM_LEFT_ASYMMETRIC:
628 rlq = DDF_RAID5_N_RESTART;
629 break;
630 case ALGORITHM_RIGHT_ASYMMETRIC:
631 rlq = DDF_RAID5_0_RESTART;
632 break;
633 case ALGORITHM_LEFT_SYMMETRIC:
634 rlq = DDF_RAID5_N_CONTINUE;
635 break;
636 case ALGORITHM_RIGHT_SYMMETRIC:
637 /* not mentioned in standard */
638 default:
639 return err_bad_md_layout(array);
640 }
641 prl = DDF_RAID5;
642 break;
643 case 6:
644 switch (array->layout) {
645 case ALGORITHM_ROTATING_N_RESTART:
646 rlq = DDF_RAID5_N_RESTART;
647 break;
648 case ALGORITHM_ROTATING_ZERO_RESTART:
649 rlq = DDF_RAID6_0_RESTART;
650 break;
651 case ALGORITHM_ROTATING_N_CONTINUE:
652 rlq = DDF_RAID5_N_CONTINUE;
653 break;
654 default:
655 return err_bad_md_layout(array);
656 }
657 prl = DDF_RAID6;
658 break;
659 case 10:
660 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
661 rlq = DDF_RAID1_SIMPLE;
662 prim_elmnt_count = cpu_to_be16(2);
663 sec_elmnt_count = array->raid_disks / 2;
664 srl = DDF_2SPANNED;
665 prl = DDF_RAID1;
666 } else if (array->raid_disks % 3 == 0 &&
667 array->layout == 0x103) {
668 rlq = DDF_RAID1_MULTI;
669 prim_elmnt_count = cpu_to_be16(3);
670 sec_elmnt_count = array->raid_disks / 3;
671 srl = DDF_2SPANNED;
672 prl = DDF_RAID1;
673 } else if (array->layout == 0x201) {
674 prl = DDF_RAID1E;
675 rlq = DDF_RAID1E_OFFSET;
676 } else if (array->layout == 0x102) {
677 prl = DDF_RAID1E;
678 rlq = DDF_RAID1E_ADJACENT;
679 } else
680 return err_bad_md_layout(array);
681 break;
682 default:
683 return err_bad_md_layout(array);
684 }
685 conf->prl = prl;
686 conf->prim_elmnt_count = prim_elmnt_count;
687 conf->rlq = rlq;
688 conf->srl = srl;
689 conf->sec_elmnt_count = sec_elmnt_count;
690 return 0;
691 }
692
693 static int err_bad_ddf_layout(const struct vd_config *conf)
694 {
695 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
696 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
697 return -1;
698 }
699
700 static int layout_ddf2md(const struct vd_config *conf,
701 mdu_array_info_t *array)
702 {
703 int level = LEVEL_UNSUPPORTED;
704 int layout = 0;
705 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
706
707 if (conf->sec_elmnt_count > 1) {
708 /* see also check_secondary() */
709 if (conf->prl != DDF_RAID1 ||
710 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
711 pr_err("Unsupported secondary RAID level %u/%u\n",
712 conf->prl, conf->srl);
713 return -1;
714 }
715 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
716 layout = 0x102;
717 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
718 layout = 0x103;
719 else
720 return err_bad_ddf_layout(conf);
721 raiddisks *= conf->sec_elmnt_count;
722 level = 10;
723 goto good;
724 }
725
726 switch (conf->prl) {
727 case DDF_CONCAT:
728 level = LEVEL_LINEAR;
729 break;
730 case DDF_RAID0:
731 if (conf->rlq != DDF_RAID0_SIMPLE)
732 return err_bad_ddf_layout(conf);
733 level = 0;
734 break;
735 case DDF_RAID1:
736 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
737 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
738 return err_bad_ddf_layout(conf);
739 level = 1;
740 break;
741 case DDF_RAID1E:
742 if (conf->rlq == DDF_RAID1E_ADJACENT)
743 layout = 0x102;
744 else if (conf->rlq == DDF_RAID1E_OFFSET)
745 layout = 0x201;
746 else
747 return err_bad_ddf_layout(conf);
748 level = 10;
749 break;
750 case DDF_RAID4:
751 if (conf->rlq != DDF_RAID4_N)
752 return err_bad_ddf_layout(conf);
753 level = 4;
754 break;
755 case DDF_RAID5:
756 switch (conf->rlq) {
757 case DDF_RAID5_N_RESTART:
758 layout = ALGORITHM_LEFT_ASYMMETRIC;
759 break;
760 case DDF_RAID5_0_RESTART:
761 layout = ALGORITHM_RIGHT_ASYMMETRIC;
762 break;
763 case DDF_RAID5_N_CONTINUE:
764 layout = ALGORITHM_LEFT_SYMMETRIC;
765 break;
766 default:
767 return err_bad_ddf_layout(conf);
768 }
769 level = 5;
770 break;
771 case DDF_RAID6:
772 switch (conf->rlq) {
773 case DDF_RAID5_N_RESTART:
774 layout = ALGORITHM_ROTATING_N_RESTART;
775 break;
776 case DDF_RAID6_0_RESTART:
777 layout = ALGORITHM_ROTATING_ZERO_RESTART;
778 break;
779 case DDF_RAID5_N_CONTINUE:
780 layout = ALGORITHM_ROTATING_N_CONTINUE;
781 break;
782 default:
783 return err_bad_ddf_layout(conf);
784 }
785 level = 6;
786 break;
787 default:
788 return err_bad_ddf_layout(conf);
789 };
790
791 good:
792 array->level = level;
793 array->layout = layout;
794 array->raid_disks = raiddisks;
795 return 0;
796 }
797
798 static int load_ddf_header(int fd, unsigned long long lba,
799 unsigned long long size,
800 int type,
801 struct ddf_header *hdr, struct ddf_header *anchor)
802 {
803 /* read a ddf header (primary or secondary) from fd/lba
804 * and check that it is consistent with anchor
805 * Need to check:
806 * magic, crc, guid, rev, and LBA's header_type, and
807 * everything after header_type must be the same
808 */
809 if (lba >= size-1)
810 return 0;
811
812 if (lseek64(fd, lba<<9, 0) < 0)
813 return 0;
814
815 if (read(fd, hdr, 512) != 512)
816 return 0;
817
818 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
819 pr_err("bad header magic\n");
820 return 0;
821 }
822 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
823 pr_err("bad CRC\n");
824 return 0;
825 }
826 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
827 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
828 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
829 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
830 hdr->type != type ||
831 memcmp(anchor->pad2, hdr->pad2, 512 -
832 offsetof(struct ddf_header, pad2)) != 0) {
833 pr_err("header mismatch\n");
834 return 0;
835 }
836
837 /* Looks good enough to me... */
838 return 1;
839 }
840
841 static void *load_section(int fd, struct ddf_super *super, void *buf,
842 be32 offset_be, be32 len_be, int check)
843 {
844 unsigned long long offset = be32_to_cpu(offset_be);
845 unsigned long long len = be32_to_cpu(len_be);
846 int dofree = (buf == NULL);
847
848 if (check)
849 if (len != 2 && len != 8 && len != 32 &&
850 len != 128 && len != 512)
851 return NULL;
852
853 if (len > 1024)
854 return NULL;
855 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
856 buf = NULL;
857
858 if (!buf)
859 return NULL;
860
861 if (super->active->type == 1)
862 offset += be64_to_cpu(super->active->primary_lba);
863 else
864 offset += be64_to_cpu(super->active->secondary_lba);
865
866 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
867 if (dofree)
868 free(buf);
869 return NULL;
870 }
871 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
872 if (dofree)
873 free(buf);
874 return NULL;
875 }
876 return buf;
877 }
878
879 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
880 {
881 unsigned long long dsize;
882
883 get_dev_size(fd, NULL, &dsize);
884
885 if (lseek64(fd, dsize-512, 0) < 0) {
886 if (devname)
887 pr_err("Cannot seek to anchor block on %s: %s\n",
888 devname, strerror(errno));
889 return 1;
890 }
891 if (read(fd, &super->anchor, 512) != 512) {
892 if (devname)
893 pr_err("Cannot read anchor block on %s: %s\n",
894 devname, strerror(errno));
895 return 1;
896 }
897 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
898 if (devname)
899 pr_err("no DDF anchor found on %s\n",
900 devname);
901 return 2;
902 }
903 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
904 if (devname)
905 pr_err("bad CRC on anchor on %s\n",
906 devname);
907 return 2;
908 }
909 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
910 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
911 if (devname)
912 pr_err("can only support super revision %.8s and earlier, not %.8s on %s\n",
913 DDF_REVISION_2, super->anchor.revision,devname);
914 return 2;
915 }
916 super->active = NULL;
917 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
918 dsize >> 9, 1,
919 &super->primary, &super->anchor) == 0) {
920 if (devname)
921 pr_err("Failed to load primary DDF header on %s\n", devname);
922 } else
923 super->active = &super->primary;
924
925 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
926 dsize >> 9, 2,
927 &super->secondary, &super->anchor)) {
928 if (super->active == NULL ||
929 (be32_to_cpu(super->primary.seq)
930 < be32_to_cpu(super->secondary.seq) &&
931 !super->secondary.openflag) ||
932 (be32_to_cpu(super->primary.seq) ==
933 be32_to_cpu(super->secondary.seq) &&
934 super->primary.openflag && !super->secondary.openflag))
935 super->active = &super->secondary;
936 } else if (devname &&
937 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
938 pr_err("Failed to load secondary DDF header on %s\n",
939 devname);
940 if (super->active == NULL)
941 return 2;
942 return 0;
943 }
944
945 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
946 {
947 void *ok;
948 ok = load_section(fd, super, &super->controller,
949 super->active->controller_section_offset,
950 super->active->controller_section_length,
951 0);
952 super->phys = load_section(fd, super, NULL,
953 super->active->phys_section_offset,
954 super->active->phys_section_length,
955 1);
956 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
957
958 super->virt = load_section(fd, super, NULL,
959 super->active->virt_section_offset,
960 super->active->virt_section_length,
961 1);
962 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
963 if (!ok ||
964 !super->phys ||
965 !super->virt) {
966 free(super->phys);
967 free(super->virt);
968 super->phys = NULL;
969 super->virt = NULL;
970 return 2;
971 }
972 super->conflist = NULL;
973 super->dlist = NULL;
974
975 super->max_part = be16_to_cpu(super->active->max_partitions);
976 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
977 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
978 return 0;
979 }
980
981 #define DDF_UNUSED_BVD 0xff
982 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
983 {
984 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
985 unsigned int i, vdsize;
986 void *p;
987 if (n_vds == 0) {
988 vcl->other_bvds = NULL;
989 return 0;
990 }
991 vdsize = ddf->conf_rec_len * 512;
992 if (posix_memalign(&p, 512, n_vds *
993 (vdsize + sizeof(struct vd_config *))) != 0)
994 return -1;
995 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
996 for (i = 0; i < n_vds; i++) {
997 vcl->other_bvds[i] = p + i * vdsize;
998 memset(vcl->other_bvds[i], 0, vdsize);
999 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
1000 }
1001 return 0;
1002 }
1003
1004 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
1005 unsigned int len)
1006 {
1007 int i;
1008 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1009 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
1010 break;
1011
1012 if (i < vcl->conf.sec_elmnt_count-1) {
1013 if (be32_to_cpu(vd->seqnum) <=
1014 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1015 return;
1016 } else {
1017 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1018 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1019 break;
1020 if (i == vcl->conf.sec_elmnt_count-1) {
1021 pr_err("no space for sec level config %u, count is %u\n",
1022 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1023 return;
1024 }
1025 }
1026 memcpy(vcl->other_bvds[i], vd, len);
1027 }
1028
1029 static int load_ddf_local(int fd, struct ddf_super *super,
1030 char *devname, int keep)
1031 {
1032 struct dl *dl;
1033 struct stat stb;
1034 char *conf;
1035 unsigned int i;
1036 unsigned int confsec;
1037 int vnum;
1038 unsigned int max_virt_disks =
1039 be16_to_cpu(super->active->max_vd_entries);
1040 unsigned long long dsize;
1041
1042 /* First the local disk info */
1043 if (posix_memalign((void**)&dl, 512,
1044 sizeof(*dl) +
1045 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1046 pr_err("could not allocate disk info buffer\n");
1047 return 1;
1048 }
1049
1050 load_section(fd, super, &dl->disk,
1051 super->active->data_section_offset,
1052 super->active->data_section_length,
1053 0);
1054 dl->devname = devname ? xstrdup(devname) : NULL;
1055
1056 if (fstat(fd, &stb) != 0) {
1057 free(dl);
1058 return 1;
1059 }
1060 dl->major = major(stb.st_rdev);
1061 dl->minor = minor(stb.st_rdev);
1062 dl->next = super->dlist;
1063 dl->fd = keep ? fd : -1;
1064
1065 dl->size = 0;
1066 if (get_dev_size(fd, devname, &dsize))
1067 dl->size = dsize >> 9;
1068 /* If the disks have different sizes, the LBAs will differ
1069 * between phys disks.
1070 * At this point here, the values in super->active must be valid
1071 * for this phys disk. */
1072 dl->primary_lba = super->active->primary_lba;
1073 dl->secondary_lba = super->active->secondary_lba;
1074 dl->workspace_lba = super->active->workspace_lba;
1075 dl->spare = NULL;
1076 for (i = 0 ; i < super->max_part ; i++)
1077 dl->vlist[i] = NULL;
1078 super->dlist = dl;
1079 dl->pdnum = -1;
1080 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1081 if (memcmp(super->phys->entries[i].guid,
1082 dl->disk.guid, DDF_GUID_LEN) == 0)
1083 dl->pdnum = i;
1084
1085 /* Now the config list. */
1086 /* 'conf' is an array of config entries, some of which are
1087 * probably invalid. Those which are good need to be copied into
1088 * the conflist
1089 */
1090
1091 conf = load_section(fd, super, super->conf,
1092 super->active->config_section_offset,
1093 super->active->config_section_length,
1094 0);
1095 super->conf = conf;
1096 vnum = 0;
1097 for (confsec = 0;
1098 confsec < be32_to_cpu(super->active->config_section_length);
1099 confsec += super->conf_rec_len) {
1100 struct vd_config *vd =
1101 (struct vd_config *)((char*)conf + confsec*512);
1102 struct vcl *vcl;
1103
1104 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1105 if (dl->spare)
1106 continue;
1107 if (posix_memalign((void**)&dl->spare, 512,
1108 super->conf_rec_len*512) != 0) {
1109 pr_err("could not allocate spare info buf\n");
1110 return 1;
1111 }
1112
1113 memcpy(dl->spare, vd, super->conf_rec_len*512);
1114 continue;
1115 }
1116 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1117 /* Must be vendor-unique - I cannot handle those */
1118 continue;
1119
1120 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1121 if (memcmp(vcl->conf.guid,
1122 vd->guid, DDF_GUID_LEN) == 0)
1123 break;
1124 }
1125
1126 if (vcl) {
1127 dl->vlist[vnum++] = vcl;
1128 if (vcl->other_bvds != NULL &&
1129 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1130 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1131 continue;
1132 }
1133 if (be32_to_cpu(vd->seqnum) <=
1134 be32_to_cpu(vcl->conf.seqnum))
1135 continue;
1136 } else {
1137 if (posix_memalign((void**)&vcl, 512,
1138 (super->conf_rec_len*512 +
1139 offsetof(struct vcl, conf))) != 0) {
1140 pr_err("could not allocate vcl buf\n");
1141 return 1;
1142 }
1143 vcl->next = super->conflist;
1144 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1145 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1146 if (alloc_other_bvds(super, vcl) != 0) {
1147 pr_err("could not allocate other bvds\n");
1148 free(vcl);
1149 return 1;
1150 };
1151 super->conflist = vcl;
1152 dl->vlist[vnum++] = vcl;
1153 }
1154 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1155 for (i=0; i < max_virt_disks ; i++)
1156 if (memcmp(super->virt->entries[i].guid,
1157 vcl->conf.guid, DDF_GUID_LEN)==0)
1158 break;
1159 if (i < max_virt_disks)
1160 vcl->vcnum = i;
1161 }
1162
1163 return 0;
1164 }
1165
1166 static int load_super_ddf(struct supertype *st, int fd,
1167 char *devname)
1168 {
1169 unsigned long long dsize;
1170 struct ddf_super *super;
1171 int rv;
1172
1173 if (get_dev_size(fd, devname, &dsize) == 0)
1174 return 1;
1175
1176 if (test_partition(fd))
1177 /* DDF is not allowed on partitions */
1178 return 1;
1179
1180 /* 32M is a lower bound */
1181 if (dsize <= 32*1024*1024) {
1182 if (devname)
1183 pr_err("%s is too small for ddf: size is %llu sectors.\n",
1184 devname, dsize>>9);
1185 return 1;
1186 }
1187 if (dsize & 511) {
1188 if (devname)
1189 pr_err("%s is an odd size for ddf: size is %llu bytes.\n",
1190 devname, dsize);
1191 return 1;
1192 }
1193
1194 free_super_ddf(st);
1195
1196 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1197 pr_err("malloc of %zu failed.\n",
1198 sizeof(*super));
1199 return 1;
1200 }
1201 memset(super, 0, sizeof(*super));
1202
1203 rv = load_ddf_headers(fd, super, devname);
1204 if (rv) {
1205 free(super);
1206 return rv;
1207 }
1208
1209 /* Have valid headers and have chosen the best. Let's read in the rest*/
1210
1211 rv = load_ddf_global(fd, super, devname);
1212
1213 if (rv) {
1214 if (devname)
1215 pr_err("Failed to load all information sections on %s\n", devname);
1216 free(super);
1217 return rv;
1218 }
1219
1220 rv = load_ddf_local(fd, super, devname, 0);
1221
1222 if (rv) {
1223 if (devname)
1224 pr_err("Failed to load all information sections on %s\n", devname);
1225 free(super);
1226 return rv;
1227 }
1228
1229 /* Should possibly check the sections .... */
1230
1231 st->sb = super;
1232 if (st->ss == NULL) {
1233 st->ss = &super_ddf;
1234 st->minor_version = 0;
1235 st->max_devs = 512;
1236 }
1237 return 0;
1238
1239 }
1240
1241 static void free_super_ddf(struct supertype *st)
1242 {
1243 struct ddf_super *ddf = st->sb;
1244 if (ddf == NULL)
1245 return;
1246 free(ddf->phys);
1247 free(ddf->virt);
1248 free(ddf->conf);
1249 while (ddf->conflist) {
1250 struct vcl *v = ddf->conflist;
1251 ddf->conflist = v->next;
1252 if (v->block_sizes)
1253 free(v->block_sizes);
1254 if (v->other_bvds)
1255 /*
1256 v->other_bvds[0] points to beginning of buffer,
1257 see alloc_other_bvds()
1258 */
1259 free(v->other_bvds[0]);
1260 free(v);
1261 }
1262 while (ddf->dlist) {
1263 struct dl *d = ddf->dlist;
1264 ddf->dlist = d->next;
1265 if (d->fd >= 0)
1266 close(d->fd);
1267 if (d->spare)
1268 free(d->spare);
1269 free(d);
1270 }
1271 while (ddf->add_list) {
1272 struct dl *d = ddf->add_list;
1273 ddf->add_list = d->next;
1274 if (d->fd >= 0)
1275 close(d->fd);
1276 if (d->spare)
1277 free(d->spare);
1278 free(d);
1279 }
1280 free(ddf);
1281 st->sb = NULL;
1282 }
1283
1284 static struct supertype *match_metadata_desc_ddf(char *arg)
1285 {
1286 /* 'ddf' only supports containers */
1287 struct supertype *st;
1288 if (strcmp(arg, "ddf") != 0 &&
1289 strcmp(arg, "default") != 0
1290 )
1291 return NULL;
1292
1293 st = xcalloc(1, sizeof(*st));
1294 st->ss = &super_ddf;
1295 st->max_devs = 512;
1296 st->minor_version = 0;
1297 st->sb = NULL;
1298 return st;
1299 }
1300
1301 static mapping_t ddf_state[] = {
1302 { "Optimal", 0},
1303 { "Degraded", 1},
1304 { "Deleted", 2},
1305 { "Missing", 3},
1306 { "Failed", 4},
1307 { "Partially Optimal", 5},
1308 { "-reserved-", 6},
1309 { "-reserved-", 7},
1310 { NULL, 0}
1311 };
1312
1313 static mapping_t ddf_init_state[] = {
1314 { "Not Initialised", 0},
1315 { "QuickInit in Progress", 1},
1316 { "Fully Initialised", 2},
1317 { "*UNKNOWN*", 3},
1318 { NULL, 0}
1319 };
1320 static mapping_t ddf_access[] = {
1321 { "Read/Write", 0},
1322 { "Reserved", 1},
1323 { "Read Only", 2},
1324 { "Blocked (no access)", 3},
1325 { NULL ,0}
1326 };
1327
1328 static mapping_t ddf_level[] = {
1329 { "RAID0", DDF_RAID0},
1330 { "RAID1", DDF_RAID1},
1331 { "RAID3", DDF_RAID3},
1332 { "RAID4", DDF_RAID4},
1333 { "RAID5", DDF_RAID5},
1334 { "RAID1E",DDF_RAID1E},
1335 { "JBOD", DDF_JBOD},
1336 { "CONCAT",DDF_CONCAT},
1337 { "RAID5E",DDF_RAID5E},
1338 { "RAID5EE",DDF_RAID5EE},
1339 { "RAID6", DDF_RAID6},
1340 { NULL, 0}
1341 };
1342 static mapping_t ddf_sec_level[] = {
1343 { "Striped", DDF_2STRIPED},
1344 { "Mirrored", DDF_2MIRRORED},
1345 { "Concat", DDF_2CONCAT},
1346 { "Spanned", DDF_2SPANNED},
1347 { NULL, 0}
1348 };
1349
1350 static int all_ff(const char *guid)
1351 {
1352 int i;
1353 for (i = 0; i < DDF_GUID_LEN; i++)
1354 if (guid[i] != (char)0xff)
1355 return 0;
1356 return 1;
1357 }
1358
1359 static const char *guid_str(const char *guid)
1360 {
1361 static char buf[DDF_GUID_LEN*2+1];
1362 int i;
1363 char *p = buf;
1364 for (i = 0; i < DDF_GUID_LEN; i++) {
1365 unsigned char c = guid[i];
1366 if (c >= 32 && c < 127)
1367 p += sprintf(p, "%c", c);
1368 else
1369 p += sprintf(p, "%02x", c);
1370 }
1371 *p = '\0';
1372 return (const char *) buf;
1373 }
1374
1375 static void print_guid(char *guid, int tstamp)
1376 {
1377 /* A GUIDs are part (or all) ASCII and part binary.
1378 * They tend to be space padded.
1379 * We print the GUID in HEX, then in parentheses add
1380 * any initial ASCII sequence, and a possible
1381 * time stamp from bytes 16-19
1382 */
1383 int l = DDF_GUID_LEN;
1384 int i;
1385
1386 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1387 if ((i&3)==0 && i != 0) printf(":");
1388 printf("%02X", guid[i]&255);
1389 }
1390
1391 printf("\n (");
1392 while (l && guid[l-1] == ' ')
1393 l--;
1394 for (i=0 ; i<l ; i++) {
1395 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1396 fputc(guid[i], stdout);
1397 else
1398 break;
1399 }
1400 if (tstamp) {
1401 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1402 char tbuf[100];
1403 struct tm *tm;
1404 tm = localtime(&then);
1405 strftime(tbuf, 100, " %D %T",tm);
1406 fputs(tbuf, stdout);
1407 }
1408 printf(")");
1409 }
1410
1411 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1412 {
1413 int crl = sb->conf_rec_len;
1414 struct vcl *vcl;
1415
1416 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1417 unsigned int i;
1418 struct vd_config *vc = &vcl->conf;
1419
1420 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1421 continue;
1422 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1423 continue;
1424
1425 /* Ok, we know about this VD, let's give more details */
1426 printf(" Raid Devices[%d] : %d (", n,
1427 be16_to_cpu(vc->prim_elmnt_count));
1428 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1429 int j;
1430 int cnt = be16_to_cpu(sb->phys->max_pdes);
1431 for (j=0; j<cnt; j++)
1432 if (be32_eq(vc->phys_refnum[i],
1433 sb->phys->entries[j].refnum))
1434 break;
1435 if (i) printf(" ");
1436 if (j < cnt)
1437 printf("%d", j);
1438 else
1439 printf("--");
1440 printf("@%lluK", (unsigned long long) be64_to_cpu(LBA_OFFSET(sb, vc)[i])/2);
1441 }
1442 printf(")\n");
1443 if (vc->chunk_shift != 255)
1444 printf(" Chunk Size[%d] : %d sectors\n", n,
1445 1 << vc->chunk_shift);
1446 printf(" Raid Level[%d] : %s\n", n,
1447 map_num(ddf_level, vc->prl)?:"-unknown-");
1448 if (vc->sec_elmnt_count != 1) {
1449 printf(" Secondary Position[%d] : %d of %d\n", n,
1450 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1451 printf(" Secondary Level[%d] : %s\n", n,
1452 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1453 }
1454 printf(" Device Size[%d] : %llu\n", n,
1455 be64_to_cpu(vc->blocks)/2);
1456 printf(" Array Size[%d] : %llu\n", n,
1457 be64_to_cpu(vc->array_blocks)/2);
1458 }
1459 }
1460
1461 static void examine_vds(struct ddf_super *sb)
1462 {
1463 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1464 unsigned int i;
1465 printf(" Virtual Disks : %d\n", cnt);
1466
1467 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1468 struct virtual_entry *ve = &sb->virt->entries[i];
1469 if (all_ff(ve->guid))
1470 continue;
1471 printf("\n");
1472 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1473 printf("\n");
1474 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1475 printf(" state[%d] : %s, %s%s\n", i,
1476 map_num_s(ddf_state, ve->state & 7),
1477 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1478 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1479 printf(" init state[%d] : %s\n", i,
1480 map_num_s(ddf_init_state, ve->init_state & DDF_initstate_mask));
1481 printf(" access[%d] : %s\n", i,
1482 map_num_s(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1483 printf(" Name[%d] : %.16s\n", i, ve->name);
1484 examine_vd(i, sb, ve->guid);
1485 }
1486 if (cnt) printf("\n");
1487 }
1488
1489 static void examine_pds(struct ddf_super *sb)
1490 {
1491 int cnt = be16_to_cpu(sb->phys->max_pdes);
1492 int i;
1493 struct dl *dl;
1494 int unlisted = 0;
1495 printf(" Physical Disks : %d\n", cnt);
1496 printf(" Number RefNo Size Device Type/State\n");
1497
1498 for (dl = sb->dlist; dl; dl = dl->next)
1499 dl->displayed = 0;
1500
1501 for (i=0 ; i<cnt ; i++) {
1502 struct phys_disk_entry *pd = &sb->phys->entries[i];
1503 int type = be16_to_cpu(pd->type);
1504 int state = be16_to_cpu(pd->state);
1505
1506 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1507 /* Not in use */
1508 continue;
1509 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1510 //printf("\n");
1511 printf(" %3d %08x ", i,
1512 be32_to_cpu(pd->refnum));
1513 printf("%8lluK ",
1514 be64_to_cpu(pd->config_size)>>1);
1515 for (dl = sb->dlist; dl ; dl = dl->next) {
1516 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1517 char *dv = map_dev(dl->major, dl->minor, 0);
1518 if (dv) {
1519 printf("%-15s", dv);
1520 break;
1521 }
1522 }
1523 }
1524 if (!dl)
1525 printf("%15s","");
1526 else
1527 dl->displayed = 1;
1528 printf(" %s%s%s%s%s",
1529 (type&2) ? "active":"",
1530 (type&4) ? "Global-Spare":"",
1531 (type&8) ? "spare" : "",
1532 (type&16)? ", foreign" : "",
1533 (type&32)? "pass-through" : "");
1534 if (state & DDF_Failed)
1535 /* This over-rides these three */
1536 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1537 printf("/%s%s%s%s%s%s%s",
1538 (state&1)? "Online": "Offline",
1539 (state&2)? ", Failed": "",
1540 (state&4)? ", Rebuilding": "",
1541 (state&8)? ", in-transition": "",
1542 (state&16)? ", SMART-errors": "",
1543 (state&32)? ", Unrecovered-Read-Errors": "",
1544 (state&64)? ", Missing" : "");
1545 printf("\n");
1546 }
1547 for (dl = sb->dlist; dl; dl = dl->next) {
1548 char *dv;
1549 if (dl->displayed)
1550 continue;
1551 if (!unlisted)
1552 printf(" Physical disks not in metadata!:\n");
1553 unlisted = 1;
1554 dv = map_dev(dl->major, dl->minor, 0);
1555 printf(" %08x %s\n", be32_to_cpu(dl->disk.refnum),
1556 dv ? dv : "-unknown-");
1557 }
1558 if (unlisted)
1559 printf("\n");
1560 }
1561
1562 static void examine_super_ddf(struct supertype *st, char *homehost)
1563 {
1564 struct ddf_super *sb = st->sb;
1565
1566 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1567 printf(" Version : %.8s\n", sb->anchor.revision);
1568 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1569 printf("\n");
1570 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1571 printf("\n");
1572 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1573 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1574 DDF_HEADER_MAGIC)
1575 ?"yes" : "no"));
1576 examine_vds(sb);
1577 examine_pds(sb);
1578 }
1579
1580 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1581 {
1582 /*
1583 * Figure out the VD number for this supertype.
1584 * Returns DDF_CONTAINER for the container itself,
1585 * and DDF_NOTFOUND on error.
1586 */
1587 struct ddf_super *ddf = st->sb;
1588 struct mdinfo *sra;
1589 char *sub, *end;
1590 unsigned int vcnum;
1591
1592 if (*st->container_devnm == '\0')
1593 return DDF_CONTAINER;
1594
1595 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1596 if (!sra || sra->array.major_version != -1 ||
1597 sra->array.minor_version != -2 ||
1598 !is_subarray(sra->text_version)) {
1599 if (sra)
1600 sysfs_free(sra);
1601 return DDF_NOTFOUND;
1602 }
1603
1604 sub = strchr(sra->text_version + 1, '/');
1605 if (sub != NULL)
1606 vcnum = strtoul(sub + 1, &end, 10);
1607 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1608 vcnum >= be16_to_cpu(ddf->active->max_vd_entries)) {
1609 sysfs_free(sra);
1610 return DDF_NOTFOUND;
1611 }
1612
1613 return vcnum;
1614 }
1615
1616 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1617 {
1618 /* We just write a generic DDF ARRAY entry
1619 */
1620 struct mdinfo info;
1621 char nbuf[64];
1622 getinfo_super_ddf(st, &info, NULL);
1623 fname_from_uuid(&info, nbuf);
1624
1625 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1626 }
1627
1628 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1629 {
1630 /* We write a DDF ARRAY member entry for each vd, identifying container
1631 * by uuid and member by unit number and uuid.
1632 */
1633 struct ddf_super *ddf = st->sb;
1634 struct mdinfo info;
1635 unsigned int i;
1636 char nbuf[64];
1637 getinfo_super_ddf(st, &info, NULL);
1638 fname_from_uuid(&info, nbuf);
1639
1640 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1641 struct virtual_entry *ve = &ddf->virt->entries[i];
1642 struct vcl vcl;
1643 char nbuf1[64];
1644 char namebuf[17];
1645 if (all_ff(ve->guid))
1646 continue;
1647 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1648 ddf->currentconf =&vcl;
1649 vcl.vcnum = i;
1650 uuid_from_super_ddf(st, info.uuid);
1651 fname_from_uuid(&info, nbuf1);
1652 _ddf_array_name(namebuf, ddf, i);
1653 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1654 namebuf[0] == '\0' ? "" : " " DEV_MD_DIR, namebuf,
1655 nbuf+5, i, nbuf1+5);
1656 }
1657 }
1658
1659 static void export_examine_super_ddf(struct supertype *st)
1660 {
1661 struct mdinfo info;
1662 char nbuf[64];
1663 getinfo_super_ddf(st, &info, NULL);
1664 fname_from_uuid(&info, nbuf);
1665 printf("MD_METADATA=ddf\n");
1666 printf("MD_LEVEL=container\n");
1667 printf("MD_UUID=%s\n", nbuf+5);
1668 printf("MD_DEVICES=%u\n",
1669 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1670 }
1671
1672 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1673 {
1674 void *buf;
1675 unsigned long long dsize, offset;
1676 int bytes;
1677 struct ddf_header *ddf;
1678 int written = 0;
1679
1680 /* The meta consists of an anchor, a primary, and a secondary.
1681 * This all lives at the end of the device.
1682 * So it is easiest to find the earliest of primary and
1683 * secondary, and copy everything from there.
1684 *
1685 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1686 * we choose one of those
1687 */
1688
1689 if (posix_memalign(&buf, 4096, 4096) != 0)
1690 return 1;
1691
1692 if (!get_dev_size(from, NULL, &dsize))
1693 goto err;
1694
1695 if (lseek64(from, dsize-512, 0) < 0)
1696 goto err;
1697 if (read(from, buf, 512) != 512)
1698 goto err;
1699 ddf = buf;
1700 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1701 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1702 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1703 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1704 goto err;
1705
1706 offset = dsize - 512;
1707 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1708 offset = be64_to_cpu(ddf->primary_lba) << 9;
1709 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1710 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1711
1712 bytes = dsize - offset;
1713
1714 if (lseek64(from, offset, 0) < 0 ||
1715 lseek64(to, offset, 0) < 0)
1716 goto err;
1717 while (written < bytes) {
1718 int n = bytes - written;
1719 if (n > 4096)
1720 n = 4096;
1721 if (read(from, buf, n) != n)
1722 goto err;
1723 if (write(to, buf, n) != n)
1724 goto err;
1725 written += n;
1726 }
1727 free(buf);
1728 return 0;
1729 err:
1730 free(buf);
1731 return 1;
1732 }
1733
1734 static void detail_super_ddf(struct supertype *st, char *homehost,
1735 char *subarray)
1736 {
1737 struct ddf_super *sb = st->sb;
1738 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1739
1740 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1741 printf("\n");
1742 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1743 printf(" Virtual Disks : %d\n", cnt);
1744 printf("\n");
1745 }
1746
1747 static const char *vendors_with_variable_volume_UUID[] = {
1748 "LSI ",
1749 };
1750
1751 static int volume_id_is_reliable(const struct ddf_super *ddf)
1752 {
1753 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1754 int i;
1755 for (i = 0; i < n; i++)
1756 if (!memcmp(ddf->controller.guid,
1757 vendors_with_variable_volume_UUID[i], 8))
1758 return 0;
1759 return 1;
1760 }
1761
1762 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1763 unsigned int vcnum, int uuid[4])
1764 {
1765 char buf[DDF_GUID_LEN+18], sha[20], *p;
1766 struct sha1_ctx ctx;
1767 if (volume_id_is_reliable(ddf)) {
1768 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1769 return;
1770 }
1771 /*
1772 * Some fake RAID BIOSes (in particular, LSI ones) change the
1773 * VD GUID at every boot. These GUIDs are not suitable for
1774 * identifying an array. Luckily the header GUID appears to
1775 * remain constant.
1776 * We construct a pseudo-UUID from the header GUID and those
1777 * properties of the subarray that we expect to remain constant.
1778 */
1779 memset(buf, 0, sizeof(buf));
1780 p = buf;
1781 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1782 p += DDF_GUID_LEN;
1783 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1784 p += 16;
1785 *((__u16 *) p) = vcnum;
1786 sha1_init_ctx(&ctx);
1787 sha1_process_bytes(buf, sizeof(buf), &ctx);
1788 sha1_finish_ctx(&ctx, sha);
1789 memcpy(uuid, sha, 4*4);
1790 }
1791
1792 static void brief_detail_super_ddf(struct supertype *st, char *subarray)
1793 {
1794 struct mdinfo info;
1795 char nbuf[64];
1796 struct ddf_super *ddf = st->sb;
1797 unsigned int vcnum = get_vd_num_of_subarray(st);
1798 if (vcnum == DDF_CONTAINER)
1799 uuid_from_super_ddf(st, info.uuid);
1800 else if (vcnum == DDF_NOTFOUND)
1801 return;
1802 else
1803 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1804 fname_from_uuid(&info, nbuf);
1805 printf(" UUID=%s", nbuf + 5);
1806 }
1807
1808 static int match_home_ddf(struct supertype *st, char *homehost)
1809 {
1810 /* It matches 'this' host if the controller is a
1811 * Linux-MD controller with vendor_data matching
1812 * the hostname. It would be nice if we could
1813 * test against controller found in /sys or somewhere...
1814 */
1815 struct ddf_super *ddf = st->sb;
1816 unsigned int len;
1817
1818 if (!homehost)
1819 return 0;
1820 len = strlen(homehost);
1821
1822 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1823 len < sizeof(ddf->controller.vendor_data) &&
1824 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1825 ddf->controller.vendor_data[len] == 0);
1826 }
1827
1828 static int find_index_in_bvd(const struct ddf_super *ddf,
1829 const struct vd_config *conf, unsigned int n,
1830 unsigned int *n_bvd)
1831 {
1832 /*
1833 * Find the index of the n-th valid physical disk in this BVD.
1834 * Unused entries can be sprinkled in with the used entries,
1835 * but don't count.
1836 */
1837 unsigned int i, j;
1838 for (i = 0, j = 0;
1839 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1840 i++) {
1841 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1842 if (n == j) {
1843 *n_bvd = i;
1844 return 1;
1845 }
1846 j++;
1847 }
1848 }
1849 dprintf("couldn't find BVD member %u (total %u)\n",
1850 n, be16_to_cpu(conf->prim_elmnt_count));
1851 return 0;
1852 }
1853
1854 /* Given a member array instance number, and a raid disk within that instance,
1855 * find the vd_config structure. The offset of the given disk in the phys_refnum
1856 * table is returned in n_bvd.
1857 * For two-level members with a secondary raid level the vd_config for
1858 * the appropriate BVD is returned.
1859 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1860 */
1861 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1862 unsigned int n,
1863 unsigned int *n_bvd, struct vcl **vcl)
1864 {
1865 struct vcl *v;
1866
1867 for (v = ddf->conflist; v; v = v->next) {
1868 unsigned int nsec, ibvd = 0;
1869 struct vd_config *conf;
1870 if (inst != v->vcnum)
1871 continue;
1872 conf = &v->conf;
1873 if (conf->sec_elmnt_count == 1) {
1874 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1875 *vcl = v;
1876 return conf;
1877 } else
1878 goto bad;
1879 }
1880 if (v->other_bvds == NULL) {
1881 pr_err("BUG: other_bvds is NULL, nsec=%u\n",
1882 conf->sec_elmnt_count);
1883 goto bad;
1884 }
1885 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1886 if (conf->sec_elmnt_seq != nsec) {
1887 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1888 if (v->other_bvds[ibvd-1]->sec_elmnt_seq ==
1889 nsec)
1890 break;
1891 }
1892 if (ibvd == conf->sec_elmnt_count)
1893 goto bad;
1894 conf = v->other_bvds[ibvd-1];
1895 }
1896 if (!find_index_in_bvd(ddf, conf,
1897 n - nsec*conf->sec_elmnt_count, n_bvd))
1898 goto bad;
1899 dprintf("found disk %u as member %u in bvd %d of array %u\n",
1900 n, *n_bvd, ibvd, inst);
1901 *vcl = v;
1902 return conf;
1903 }
1904 bad:
1905 pr_err("Couldn't find disk %d in array %u\n", n, inst);
1906 return NULL;
1907 }
1908
1909 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1910 {
1911 /* Find the entry in phys_disk which has the given refnum
1912 * and return it's index
1913 */
1914 unsigned int i;
1915 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1916 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1917 return i;
1918 return -1;
1919 }
1920
1921 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1922 {
1923 char buf[20];
1924 struct sha1_ctx ctx;
1925 sha1_init_ctx(&ctx);
1926 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1927 sha1_finish_ctx(&ctx, buf);
1928 memcpy(uuid, buf, 4*4);
1929 }
1930
1931 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1932 {
1933 /* The uuid returned here is used for:
1934 * uuid to put into bitmap file (Create, Grow)
1935 * uuid for backup header when saving critical section (Grow)
1936 * comparing uuids when re-adding a device into an array
1937 * In these cases the uuid required is that of the data-array,
1938 * not the device-set.
1939 * uuid to recognise same set when adding a missing device back
1940 * to an array. This is a uuid for the device-set.
1941 *
1942 * For each of these we can make do with a truncated
1943 * or hashed uuid rather than the original, as long as
1944 * everyone agrees.
1945 * In the case of SVD we assume the BVD is of interest,
1946 * though that might be the case if a bitmap were made for
1947 * a mirrored SVD - worry about that later.
1948 * So we need to find the VD configuration record for the
1949 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1950 * The first 16 bytes of the sha1 of these is used.
1951 */
1952 struct ddf_super *ddf = st->sb;
1953 struct vcl *vcl = ddf->currentconf;
1954
1955 if (vcl)
1956 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1957 else
1958 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1959 }
1960
1961 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1962 {
1963 struct ddf_super *ddf = st->sb;
1964 int map_disks = info->array.raid_disks;
1965 __u32 *cptr;
1966
1967 if (ddf->currentconf) {
1968 getinfo_super_ddf_bvd(st, info, map);
1969 return;
1970 }
1971 memset(info, 0, sizeof(*info));
1972
1973 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1974 info->array.level = LEVEL_CONTAINER;
1975 info->array.layout = 0;
1976 info->array.md_minor = -1;
1977 cptr = (__u32 *)(ddf->anchor.guid + 16);
1978 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1979
1980 info->array.chunk_size = 0;
1981 info->container_enough = 1;
1982
1983 info->disk.major = 0;
1984 info->disk.minor = 0;
1985 if (ddf->dlist) {
1986 struct phys_disk_entry *pde = NULL;
1987 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1988 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1989
1990 if (info->disk.raid_disk < 0)
1991 return;
1992
1993 info->data_offset = be64_to_cpu(ddf->phys->
1994 entries[info->disk.raid_disk].
1995 config_size);
1996 info->component_size = ddf->dlist->size - info->data_offset;
1997 pde = ddf->phys->entries + info->disk.raid_disk;
1998 if (pde &&
1999 !(be16_to_cpu(pde->state) & DDF_Failed) &&
2000 !(be16_to_cpu(pde->state) & DDF_Missing))
2001 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2002 else
2003 info->disk.state = 1 << MD_DISK_FAULTY;
2004
2005 } else {
2006 /* There should always be a dlist, but just in case...*/
2007 info->disk.number = -1;
2008 info->disk.raid_disk = -1;
2009 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2010 }
2011 info->events = be32_to_cpu(ddf->active->seq);
2012 info->array.utime = DECADE + be32_to_cpu(ddf->active->timestamp);
2013
2014 info->recovery_start = MaxSector;
2015 info->reshape_active = 0;
2016 info->recovery_blocked = 0;
2017 info->name[0] = 0;
2018
2019 info->array.major_version = -1;
2020 info->array.minor_version = -2;
2021 strcpy(info->text_version, "ddf");
2022 info->safe_mode_delay = 0;
2023
2024 uuid_from_super_ddf(st, info->uuid);
2025
2026 if (map) {
2027 int i, e = 0;
2028 int max = be16_to_cpu(ddf->phys->max_pdes);
2029 for (i = e = 0 ; i < map_disks ; i++, e++) {
2030 while (e < max &&
2031 be32_to_cpu(ddf->phys->entries[e].refnum) == 0xffffffff)
2032 e++;
2033 if (i < info->array.raid_disks && e < max &&
2034 !(be16_to_cpu(ddf->phys->entries[e].state) &
2035 DDF_Failed))
2036 map[i] = 1;
2037 else
2038 map[i] = 0;
2039 }
2040 }
2041 }
2042
2043 /* size of name must be at least 17 bytes! */
2044 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2045 {
2046 int j;
2047 memcpy(name, ddf->virt->entries[i].name, 16);
2048 name[16] = 0;
2049 for(j = 0; j < 16; j++)
2050 if (name[j] == ' ')
2051 name[j] = 0;
2052 }
2053
2054 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2055 {
2056 struct ddf_super *ddf = st->sb;
2057 struct vcl *vc = ddf->currentconf;
2058 int cd = ddf->currentdev;
2059 int n_prim;
2060 int j;
2061 struct dl *dl = NULL;
2062 int map_disks = info->array.raid_disks;
2063 __u32 *cptr;
2064 struct vd_config *conf;
2065
2066 memset(info, 0, sizeof(*info));
2067 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2068 return;
2069 info->array.md_minor = -1;
2070 cptr = (__u32 *)(vc->conf.guid + 16);
2071 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2072 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2073 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2074 info->custom_array_size = be64_to_cpu(vc->conf.array_blocks);
2075
2076 conf = &vc->conf;
2077 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2078 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2079 int ibvd = cd / n_prim - 1;
2080 cd %= n_prim;
2081 conf = vc->other_bvds[ibvd];
2082 }
2083
2084 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2085 info->data_offset =
2086 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2087 if (vc->block_sizes)
2088 info->component_size = vc->block_sizes[cd];
2089 else
2090 info->component_size = be64_to_cpu(conf->blocks);
2091
2092 for (dl = ddf->dlist; dl ; dl = dl->next)
2093 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2094 break;
2095 }
2096
2097 info->disk.major = 0;
2098 info->disk.minor = 0;
2099 info->disk.state = 0;
2100 if (dl && dl->pdnum >= 0) {
2101 info->disk.major = dl->major;
2102 info->disk.minor = dl->minor;
2103 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2104 * be16_to_cpu(conf->prim_elmnt_count);
2105 info->disk.number = dl->pdnum;
2106 info->disk.state = 0;
2107 if (info->disk.number >= 0 &&
2108 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2109 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2110 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2111 info->events = be32_to_cpu(ddf->active->seq);
2112 }
2113
2114 info->container_member = ddf->currentconf->vcnum;
2115
2116 info->recovery_start = MaxSector;
2117 info->resync_start = 0;
2118 info->reshape_active = 0;
2119 info->recovery_blocked = 0;
2120 if (!(ddf->virt->entries[info->container_member].state &
2121 DDF_state_inconsistent) &&
2122 (ddf->virt->entries[info->container_member].init_state &
2123 DDF_initstate_mask) == DDF_init_full)
2124 info->resync_start = MaxSector;
2125
2126 uuid_from_super_ddf(st, info->uuid);
2127
2128 info->array.major_version = -1;
2129 info->array.minor_version = -2;
2130 sprintf(info->text_version, "/%s/%d",
2131 st->container_devnm,
2132 info->container_member);
2133 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2134
2135 _ddf_array_name(info->name, ddf, info->container_member);
2136
2137 if (map)
2138 for (j = 0; j < map_disks; j++) {
2139 map[j] = 0;
2140 if (j < info->array.raid_disks) {
2141 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2142 if (i >= 0 &&
2143 (be16_to_cpu(ddf->phys->entries[i].state)
2144 & DDF_Online) &&
2145 !(be16_to_cpu(ddf->phys->entries[i].state)
2146 & DDF_Failed))
2147 map[i] = 1;
2148 }
2149 }
2150 }
2151
2152 static void make_header_guid(char *guid)
2153 {
2154 be32 stamp;
2155 /* Create a DDF Header of Virtual Disk GUID */
2156
2157 /* 24 bytes of fiction required.
2158 * first 8 are a 'vendor-id' - "Linux-MD"
2159 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2160 * Remaining 8 random number plus timestamp
2161 */
2162 memcpy(guid, T10, sizeof(T10));
2163 stamp = cpu_to_be32(0xdeadbeef);
2164 memcpy(guid+8, &stamp, 4);
2165 stamp = cpu_to_be32(0);
2166 memcpy(guid+12, &stamp, 4);
2167 stamp = cpu_to_be32(time(0) - DECADE);
2168 memcpy(guid+16, &stamp, 4);
2169 stamp._v32 = random32();
2170 memcpy(guid+20, &stamp, 4);
2171 }
2172
2173 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2174 {
2175 unsigned int i;
2176 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2177 if (all_ff(ddf->virt->entries[i].guid))
2178 return i;
2179 }
2180 return DDF_NOTFOUND;
2181 }
2182
2183 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2184 const char *name)
2185 {
2186 unsigned int i;
2187 if (name == NULL)
2188 return DDF_NOTFOUND;
2189 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2190 if (all_ff(ddf->virt->entries[i].guid))
2191 continue;
2192 if (!strncmp(name, ddf->virt->entries[i].name,
2193 sizeof(ddf->virt->entries[i].name)))
2194 return i;
2195 }
2196 return DDF_NOTFOUND;
2197 }
2198
2199 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2200 const char *guid)
2201 {
2202 unsigned int i;
2203 if (guid == NULL || all_ff(guid))
2204 return DDF_NOTFOUND;
2205 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2206 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2207 return i;
2208 return DDF_NOTFOUND;
2209 }
2210
2211 static int init_super_ddf(struct supertype *st,
2212 mdu_array_info_t *info,
2213 struct shape *s, char *name, char *homehost,
2214 int *uuid, unsigned long long data_offset)
2215 {
2216 /* This is primarily called by Create when creating a new array.
2217 * We will then get add_to_super called for each component, and then
2218 * write_init_super called to write it out to each device.
2219 * For DDF, Create can create on fresh devices or on a pre-existing
2220 * array.
2221 * To create on a pre-existing array a different method will be called.
2222 * This one is just for fresh drives.
2223 *
2224 * We need to create the entire 'ddf' structure which includes:
2225 * DDF headers - these are easy.
2226 * Controller data - a Sector describing this controller .. not that
2227 * this is a controller exactly.
2228 * Physical Disk Record - one entry per device, so
2229 * leave plenty of space.
2230 * Virtual Disk Records - again, just leave plenty of space.
2231 * This just lists VDs, doesn't give details.
2232 * Config records - describe the VDs that use this disk
2233 * DiskData - describes 'this' device.
2234 * BadBlockManagement - empty
2235 * Diag Space - empty
2236 * Vendor Logs - Could we put bitmaps here?
2237 *
2238 */
2239 struct ddf_super *ddf;
2240 char hostname[17];
2241 int hostlen;
2242 int max_phys_disks, max_virt_disks;
2243 unsigned long long sector;
2244 int clen;
2245 int i;
2246 int pdsize, vdsize;
2247 struct phys_disk *pd;
2248 struct virtual_disk *vd;
2249
2250 if (st->sb)
2251 return init_super_ddf_bvd(st, info, s->size, name, homehost, uuid,
2252 data_offset);
2253
2254 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2255 pr_err("could not allocate superblock\n");
2256 return 0;
2257 }
2258 memset(ddf, 0, sizeof(*ddf));
2259 st->sb = ddf;
2260
2261 if (info == NULL) {
2262 /* zeroing superblock */
2263 return 0;
2264 }
2265
2266 /* At least 32MB *must* be reserved for the ddf. So let's just
2267 * start 32MB from the end, and put the primary header there.
2268 * Don't do secondary for now.
2269 * We don't know exactly where that will be yet as it could be
2270 * different on each device. So just set up the lengths.
2271 */
2272
2273 ddf->anchor.magic = DDF_HEADER_MAGIC;
2274 make_header_guid(ddf->anchor.guid);
2275
2276 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2277 ddf->anchor.seq = cpu_to_be32(1);
2278 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2279 ddf->anchor.openflag = 0xFF;
2280 ddf->anchor.foreignflag = 0;
2281 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2282 ddf->anchor.pad0 = 0xff;
2283 memset(ddf->anchor.pad1, 0xff, 12);
2284 memset(ddf->anchor.header_ext, 0xff, 32);
2285 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2286 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2287 ddf->anchor.type = DDF_HEADER_ANCHOR;
2288 memset(ddf->anchor.pad2, 0xff, 3);
2289 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2290 /* Put this at bottom of 32M reserved.. */
2291 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2292 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2293 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2294 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2295 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2296 ddf->max_part = 64;
2297 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2298 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2299 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2300 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2301 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2302 memset(ddf->anchor.pad3, 0xff, 54);
2303 /* Controller section is one sector long immediately
2304 * after the ddf header */
2305 sector = 1;
2306 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2307 ddf->anchor.controller_section_length = cpu_to_be32(1);
2308 sector += 1;
2309
2310 /* phys is 8 sectors after that */
2311 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2312 sizeof(struct phys_disk_entry)*max_phys_disks,
2313 512);
2314 switch(pdsize/512) {
2315 case 2: case 8: case 32: case 128: case 512: break;
2316 default: abort();
2317 }
2318 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2319 ddf->anchor.phys_section_length =
2320 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2321 sector += pdsize/512;
2322
2323 /* virt is another 32 sectors */
2324 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2325 sizeof(struct virtual_entry) * max_virt_disks,
2326 512);
2327 switch(vdsize/512) {
2328 case 2: case 8: case 32: case 128: case 512: break;
2329 default: abort();
2330 }
2331 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2332 ddf->anchor.virt_section_length =
2333 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2334 sector += vdsize/512;
2335
2336 clen = ddf->conf_rec_len * (ddf->max_part+1);
2337 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2338 ddf->anchor.config_section_length = cpu_to_be32(clen);
2339 sector += clen;
2340
2341 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2342 ddf->anchor.data_section_length = cpu_to_be32(1);
2343 sector += 1;
2344
2345 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2346 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2347 ddf->anchor.diag_space_length = cpu_to_be32(0);
2348 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2349 ddf->anchor.vendor_length = cpu_to_be32(0);
2350 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2351
2352 memset(ddf->anchor.pad4, 0xff, 256);
2353
2354 memcpy(&ddf->primary, &ddf->anchor, 512);
2355 memcpy(&ddf->secondary, &ddf->anchor, 512);
2356
2357 ddf->primary.openflag = 1; /* I guess.. */
2358 ddf->primary.type = DDF_HEADER_PRIMARY;
2359
2360 ddf->secondary.openflag = 1; /* I guess.. */
2361 ddf->secondary.type = DDF_HEADER_SECONDARY;
2362
2363 ddf->active = &ddf->primary;
2364
2365 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2366
2367 /* 24 more bytes of fiction required.
2368 * first 8 are a 'vendor-id' - "Linux-MD"
2369 * Remaining 16 are serial number.... maybe a hostname would do?
2370 */
2371 memcpy(ddf->controller.guid, T10, sizeof(T10));
2372 s_gethostname(hostname, sizeof(hostname));
2373 hostlen = strlen(hostname);
2374 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2375 for (i = strlen(T10) ; i+hostlen < 24; i++)
2376 ddf->controller.guid[i] = ' ';
2377
2378 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2379 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2380 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2381 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2382 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2383 memset(ddf->controller.pad, 0xff, 8);
2384 memset(ddf->controller.vendor_data, 0xff, 448);
2385 if (homehost && strlen(homehost) < 440)
2386 strcpy((char*)ddf->controller.vendor_data, homehost);
2387
2388 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2389 pr_err("could not allocate pd\n");
2390 return 0;
2391 }
2392 ddf->phys = pd;
2393 ddf->pdsize = pdsize;
2394
2395 memset(pd, 0xff, pdsize);
2396 memset(pd, 0, sizeof(*pd));
2397 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2398 pd->used_pdes = cpu_to_be16(0);
2399 pd->max_pdes = cpu_to_be16(max_phys_disks);
2400 memset(pd->pad, 0xff, 52);
2401 for (i = 0; i < max_phys_disks; i++)
2402 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2403
2404 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2405 pr_err("could not allocate vd\n");
2406 return 0;
2407 }
2408 ddf->virt = vd;
2409 ddf->vdsize = vdsize;
2410 memset(vd, 0, vdsize);
2411 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2412 vd->populated_vdes = cpu_to_be16(0);
2413 vd->max_vdes = cpu_to_be16(max_virt_disks);
2414 memset(vd->pad, 0xff, 52);
2415
2416 for (i=0; i<max_virt_disks; i++)
2417 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2418
2419 st->sb = ddf;
2420 ddf_set_updates_pending(ddf, NULL);
2421 return 1;
2422 }
2423
2424 static int chunk_to_shift(int chunksize)
2425 {
2426 return ffs(chunksize/512)-1;
2427 }
2428
2429 struct extent {
2430 unsigned long long start, size;
2431 };
2432 static int cmp_extent(const void *av, const void *bv)
2433 {
2434 const struct extent *a = av;
2435 const struct extent *b = bv;
2436 if (a->start < b->start)
2437 return -1;
2438 if (a->start > b->start)
2439 return 1;
2440 return 0;
2441 }
2442
2443 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2444 {
2445 /* Find a list of used extents on the given physical device
2446 * (dnum) of the given ddf.
2447 * Return a malloced array of 'struct extent'
2448 */
2449 struct extent *rv;
2450 int n = 0;
2451 unsigned int i;
2452 __u16 state;
2453
2454 if (dl->pdnum < 0)
2455 return NULL;
2456 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2457
2458 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2459 return NULL;
2460
2461 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2462
2463 for (i = 0; i < ddf->max_part; i++) {
2464 const struct vd_config *bvd;
2465 unsigned int ibvd;
2466 struct vcl *v = dl->vlist[i];
2467 if (v == NULL ||
2468 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2469 &bvd, &ibvd) == DDF_NOTFOUND)
2470 continue;
2471 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2472 rv[n].size = be64_to_cpu(bvd->blocks);
2473 n++;
2474 }
2475 qsort(rv, n, sizeof(*rv), cmp_extent);
2476
2477 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2478 rv[n].size = 0;
2479 return rv;
2480 }
2481
2482 static unsigned long long find_space(
2483 struct ddf_super *ddf, struct dl *dl,
2484 unsigned long long data_offset,
2485 unsigned long long *size)
2486 {
2487 /* Find if the requested amount of space is available.
2488 * If it is, return start.
2489 * If not, set *size to largest space.
2490 * If data_offset != INVALID_SECTORS, then the space must start
2491 * at this location.
2492 */
2493 struct extent *e = get_extents(ddf, dl);
2494 int i = 0;
2495 unsigned long long pos = 0;
2496 unsigned long long max_size = 0;
2497
2498 if (!e) {
2499 *size = 0;
2500 return INVALID_SECTORS;
2501 }
2502 do {
2503 unsigned long long esize = e[i].start - pos;
2504 if (data_offset != INVALID_SECTORS &&
2505 pos <= data_offset &&
2506 e[i].start > data_offset) {
2507 pos = data_offset;
2508 esize = e[i].start - pos;
2509 }
2510 if (data_offset != INVALID_SECTORS &&
2511 pos != data_offset) {
2512 i++;
2513 continue;
2514 }
2515 if (esize >= *size) {
2516 /* Found! */
2517 free(e);
2518 return pos;
2519 }
2520 if (esize > max_size)
2521 max_size = esize;
2522 pos = e[i].start + e[i].size;
2523 i++;
2524 } while (e[i-1].size);
2525 *size = max_size;
2526 free(e);
2527 return INVALID_SECTORS;
2528 }
2529
2530 static int init_super_ddf_bvd(struct supertype *st,
2531 mdu_array_info_t *info,
2532 unsigned long long size,
2533 char *name, char *homehost,
2534 int *uuid, unsigned long long data_offset)
2535 {
2536 /* We are creating a BVD inside a pre-existing container.
2537 * so st->sb is already set.
2538 * We need to create a new vd_config and a new virtual_entry
2539 */
2540 struct ddf_super *ddf = st->sb;
2541 unsigned int venum, i;
2542 struct virtual_entry *ve;
2543 struct vcl *vcl;
2544 struct vd_config *vc;
2545
2546 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2547 pr_err("This ddf already has an array called %s\n", name);
2548 return 0;
2549 }
2550 venum = find_unused_vde(ddf);
2551 if (venum == DDF_NOTFOUND) {
2552 pr_err("Cannot find spare slot for virtual disk\n");
2553 return 0;
2554 }
2555 ve = &ddf->virt->entries[venum];
2556
2557 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2558 * timestamp, random number
2559 */
2560 make_header_guid(ve->guid);
2561 ve->unit = cpu_to_be16(info->md_minor);
2562 ve->pad0 = 0xFFFF;
2563 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2564 DDF_GUID_LEN);
2565 ve->type = cpu_to_be16(0);
2566 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2567 if (info->state & 1) /* clean */
2568 ve->init_state = DDF_init_full;
2569 else
2570 ve->init_state = DDF_init_not;
2571
2572 memset(ve->pad1, 0xff, 14);
2573 memset(ve->name, '\0', sizeof(ve->name));
2574 if (name) {
2575 int l = strnlen(name, sizeof(ve->name));
2576 memcpy(ve->name, name, l);
2577 }
2578 ddf->virt->populated_vdes =
2579 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2580
2581 /* Now create a new vd_config */
2582 if (posix_memalign((void**)&vcl, 512,
2583 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2584 pr_err("could not allocate vd_config\n");
2585 return 0;
2586 }
2587 vcl->vcnum = venum;
2588 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2589 vc = &vcl->conf;
2590
2591 vc->magic = DDF_VD_CONF_MAGIC;
2592 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2593 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2594 vc->seqnum = cpu_to_be32(1);
2595 memset(vc->pad0, 0xff, 24);
2596 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2597 if (layout_md2ddf(info, vc) == -1 ||
2598 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2599 pr_err("unsupported RAID level/layout %d/%d with %d disks\n",
2600 info->level, info->layout, info->raid_disks);
2601 free(vcl);
2602 return 0;
2603 }
2604 vc->sec_elmnt_seq = 0;
2605 if (alloc_other_bvds(ddf, vcl) != 0) {
2606 pr_err("could not allocate other bvds\n");
2607 free(vcl);
2608 return 0;
2609 }
2610 vc->blocks = cpu_to_be64(size * 2);
2611 vc->array_blocks = cpu_to_be64(
2612 calc_array_size(info->level, info->raid_disks, info->layout,
2613 info->chunk_size, size * 2));
2614 memset(vc->pad1, 0xff, 8);
2615 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2616 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2617 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2618 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2619 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2620 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2621 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2622 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2623 memset(vc->cache_pol, 0, 8);
2624 vc->bg_rate = 0x80;
2625 memset(vc->pad2, 0xff, 3);
2626 memset(vc->pad3, 0xff, 52);
2627 memset(vc->pad4, 0xff, 192);
2628 memset(vc->v0, 0xff, 32);
2629 memset(vc->v1, 0xff, 32);
2630 memset(vc->v2, 0xff, 16);
2631 memset(vc->v3, 0xff, 16);
2632 memset(vc->vendor, 0xff, 32);
2633
2634 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2635 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2636
2637 for (i = 1; i < vc->sec_elmnt_count; i++) {
2638 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2639 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2640 }
2641
2642 vcl->next = ddf->conflist;
2643 ddf->conflist = vcl;
2644 ddf->currentconf = vcl;
2645 ddf_set_updates_pending(ddf, NULL);
2646 return 1;
2647 }
2648
2649 static void add_to_super_ddf_bvd(struct supertype *st,
2650 mdu_disk_info_t *dk, int fd, char *devname,
2651 unsigned long long data_offset)
2652 {
2653 /* fd and devname identify a device within the ddf container (st).
2654 * dk identifies a location in the new BVD.
2655 * We need to find suitable free space in that device and update
2656 * the phys_refnum and lba_offset for the newly created vd_config.
2657 * We might also want to update the type in the phys_disk
2658 * section.
2659 *
2660 * Alternately: fd == -1 and we have already chosen which device to
2661 * use and recorded in dlist->raid_disk;
2662 */
2663 struct dl *dl;
2664 struct ddf_super *ddf = st->sb;
2665 struct vd_config *vc;
2666 unsigned int i;
2667 unsigned long long blocks, pos;
2668 unsigned int raid_disk = dk->raid_disk;
2669
2670 if (fd == -1) {
2671 for (dl = ddf->dlist; dl ; dl = dl->next)
2672 if (dl->raiddisk == dk->raid_disk)
2673 break;
2674 } else {
2675 for (dl = ddf->dlist; dl ; dl = dl->next)
2676 if (dl->major == dk->major &&
2677 dl->minor == dk->minor)
2678 break;
2679 }
2680 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2681 return;
2682
2683 vc = &ddf->currentconf->conf;
2684 if (vc->sec_elmnt_count > 1) {
2685 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2686 if (raid_disk >= n)
2687 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2688 raid_disk %= n;
2689 }
2690
2691 blocks = be64_to_cpu(vc->blocks);
2692 if (ddf->currentconf->block_sizes)
2693 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2694
2695 pos = find_space(ddf, dl, data_offset, &blocks);
2696 if (pos == INVALID_SECTORS)
2697 return;
2698
2699 ddf->currentdev = dk->raid_disk;
2700 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2701 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2702
2703 for (i = 0; i < ddf->max_part ; i++)
2704 if (dl->vlist[i] == NULL)
2705 break;
2706 if (i == ddf->max_part)
2707 return;
2708 dl->vlist[i] = ddf->currentconf;
2709
2710 if (fd >= 0)
2711 dl->fd = fd;
2712 if (devname)
2713 dl->devname = devname;
2714
2715 /* Check if we can mark array as optimal yet */
2716 i = ddf->currentconf->vcnum;
2717 ddf->virt->entries[i].state =
2718 (ddf->virt->entries[i].state & ~DDF_state_mask)
2719 | get_svd_state(ddf, ddf->currentconf);
2720 be16_clear(ddf->phys->entries[dl->pdnum].type,
2721 cpu_to_be16(DDF_Global_Spare));
2722 be16_set(ddf->phys->entries[dl->pdnum].type,
2723 cpu_to_be16(DDF_Active_in_VD));
2724 dprintf("added disk %d/%08x to VD %d/%s as disk %d\n",
2725 dl->pdnum, be32_to_cpu(dl->disk.refnum),
2726 ddf->currentconf->vcnum, guid_str(vc->guid),
2727 dk->raid_disk);
2728 ddf_set_updates_pending(ddf, vc);
2729 }
2730
2731 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2732 {
2733 unsigned int i;
2734 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2735 if (all_ff(ddf->phys->entries[i].guid))
2736 return i;
2737 }
2738 return DDF_NOTFOUND;
2739 }
2740
2741 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2742 {
2743 __u64 cfs, t;
2744 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2745 t = be64_to_cpu(dl->secondary_lba);
2746 if (t != ~(__u64)0)
2747 cfs = min(cfs, t);
2748 /*
2749 * Some vendor DDF structures interpret workspace_lba
2750 * very differently than we do: Make a sanity check on the value.
2751 */
2752 t = be64_to_cpu(dl->workspace_lba);
2753 if (t < cfs) {
2754 __u64 wsp = cfs - t;
2755 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2756 pr_err("%x:%x: workspace size 0x%llx too big, ignoring\n",
2757 dl->major, dl->minor, (unsigned long long)wsp);
2758 } else
2759 cfs = t;
2760 }
2761 pde->config_size = cpu_to_be64(cfs);
2762 dprintf("%x:%x config_size %llx, DDF structure is %llx blocks\n",
2763 dl->major, dl->minor,
2764 (unsigned long long)cfs, (unsigned long long)(dl->size-cfs));
2765 }
2766
2767 /* Add a device to a container, either while creating it or while
2768 * expanding a pre-existing container
2769 */
2770 static int add_to_super_ddf(struct supertype *st,
2771 mdu_disk_info_t *dk, int fd, char *devname,
2772 unsigned long long data_offset)
2773 {
2774 struct ddf_super *ddf = st->sb;
2775 struct dl *dd;
2776 time_t now;
2777 struct tm *tm;
2778 unsigned long long size;
2779 struct phys_disk_entry *pde;
2780 unsigned int n, i;
2781 struct stat stb;
2782 __u32 *tptr;
2783
2784 if (ddf->currentconf) {
2785 add_to_super_ddf_bvd(st, dk, fd, devname, data_offset);
2786 return 0;
2787 }
2788
2789 /* This is device numbered dk->number. We need to create
2790 * a phys_disk entry and a more detailed disk_data entry.
2791 */
2792 if (fstat(fd, &stb) != 0)
2793 return 1;
2794 n = find_unused_pde(ddf);
2795 if (n == DDF_NOTFOUND) {
2796 pr_err("No free slot in array, cannot add disk\n");
2797 return 1;
2798 }
2799 pde = &ddf->phys->entries[n];
2800 get_dev_size(fd, NULL, &size);
2801 if (size <= 32*1024*1024) {
2802 pr_err("device size must be at least 32MB\n");
2803 return 1;
2804 }
2805 size >>= 9;
2806
2807 if (posix_memalign((void**)&dd, 512,
2808 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2809 pr_err("could allocate buffer for new disk, aborting\n");
2810 return 1;
2811 }
2812 dd->major = major(stb.st_rdev);
2813 dd->minor = minor(stb.st_rdev);
2814 dd->devname = devname;
2815 dd->fd = fd;
2816 dd->spare = NULL;
2817
2818 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2819 now = time(0);
2820 tm = localtime(&now);
2821 sprintf(dd->disk.guid, "%8s%04d%02d%02d", T10,
2822 (__u16)tm->tm_year+1900,
2823 (__u8)tm->tm_mon+1, (__u8)tm->tm_mday);
2824 tptr = (__u32 *)(dd->disk.guid + 16);
2825 *tptr++ = random32();
2826 *tptr = random32();
2827
2828 do {
2829 /* Cannot be bothered finding a CRC of some irrelevant details*/
2830 dd->disk.refnum._v32 = random32();
2831 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2832 i > 0; i--)
2833 if (be32_eq(ddf->phys->entries[i-1].refnum,
2834 dd->disk.refnum))
2835 break;
2836 } while (i > 0);
2837
2838 dd->disk.forced_ref = 1;
2839 dd->disk.forced_guid = 1;
2840 memset(dd->disk.vendor, ' ', 32);
2841 memcpy(dd->disk.vendor, "Linux", 5);
2842 memset(dd->disk.pad, 0xff, 442);
2843 for (i = 0; i < ddf->max_part ; i++)
2844 dd->vlist[i] = NULL;
2845
2846 dd->pdnum = n;
2847
2848 if (st->update_tail) {
2849 int len = (sizeof(struct phys_disk) +
2850 sizeof(struct phys_disk_entry));
2851 struct phys_disk *pd;
2852
2853 pd = xmalloc(len);
2854 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2855 pd->used_pdes = cpu_to_be16(n);
2856 pde = &pd->entries[0];
2857 dd->mdupdate = pd;
2858 } else
2859 ddf->phys->used_pdes = cpu_to_be16(
2860 1 + be16_to_cpu(ddf->phys->used_pdes));
2861
2862 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2863 pde->refnum = dd->disk.refnum;
2864 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2865 pde->state = cpu_to_be16(DDF_Online);
2866 dd->size = size;
2867 /*
2868 * If there is already a device in dlist, try to reserve the same
2869 * amount of workspace. Otherwise, use 32MB.
2870 * We checked disk size above already.
2871 */
2872 #define __calc_lba(new, old, lba, mb) do { \
2873 unsigned long long dif; \
2874 if ((old) != NULL) \
2875 dif = (old)->size - be64_to_cpu((old)->lba); \
2876 else \
2877 dif = (new)->size; \
2878 if ((new)->size > dif) \
2879 (new)->lba = cpu_to_be64((new)->size - dif); \
2880 else \
2881 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2882 } while (0)
2883 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2884 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2885 if (ddf->dlist == NULL ||
2886 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2887 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2888 _set_config_size(pde, dd);
2889
2890 sprintf(pde->path, "%17.17s","Information: nil") ;
2891 memset(pde->pad, 0xff, 6);
2892
2893 if (st->update_tail) {
2894 dd->next = ddf->add_list;
2895 ddf->add_list = dd;
2896 } else {
2897 dd->next = ddf->dlist;
2898 ddf->dlist = dd;
2899 ddf_set_updates_pending(ddf, NULL);
2900 }
2901
2902 return 0;
2903 }
2904
2905 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2906 {
2907 struct ddf_super *ddf = st->sb;
2908 struct dl *dl;
2909
2910 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2911 * disappeared from the container.
2912 * We need to arrange that it disappears from the metadata and
2913 * internal data structures too.
2914 * Most of the work is done by ddf_process_update which edits
2915 * the metadata and closes the file handle and attaches the memory
2916 * where free_updates will free it.
2917 */
2918 for (dl = ddf->dlist; dl ; dl = dl->next)
2919 if (dl->major == dk->major &&
2920 dl->minor == dk->minor)
2921 break;
2922 if (!dl || dl->pdnum < 0)
2923 return -1;
2924
2925 if (st->update_tail) {
2926 int len = (sizeof(struct phys_disk) +
2927 sizeof(struct phys_disk_entry));
2928 struct phys_disk *pd;
2929
2930 pd = xmalloc(len);
2931 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2932 pd->used_pdes = cpu_to_be16(dl->pdnum);
2933 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2934 append_metadata_update(st, pd, len);
2935 }
2936 return 0;
2937 }
2938
2939 /*
2940 * This is the write_init_super method for a ddf container. It is
2941 * called when creating a container or adding another device to a
2942 * container.
2943 */
2944
2945 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2946 {
2947 unsigned long long sector;
2948 struct ddf_header *header;
2949 int fd, i, n_config, conf_size, buf_size;
2950 int ret = 0;
2951 char *conf;
2952
2953 fd = d->fd;
2954
2955 switch (type) {
2956 case DDF_HEADER_PRIMARY:
2957 header = &ddf->primary;
2958 sector = be64_to_cpu(header->primary_lba);
2959 break;
2960 case DDF_HEADER_SECONDARY:
2961 header = &ddf->secondary;
2962 sector = be64_to_cpu(header->secondary_lba);
2963 break;
2964 default:
2965 return 0;
2966 }
2967 if (sector == ~(__u64)0)
2968 return 0;
2969
2970 header->type = type;
2971 header->openflag = 1;
2972 header->crc = calc_crc(header, 512);
2973
2974 lseek64(fd, sector<<9, 0);
2975 if (write(fd, header, 512) < 0)
2976 goto out;
2977
2978 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2979 if (write(fd, &ddf->controller, 512) < 0)
2980 goto out;
2981
2982 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2983 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2984 goto out;
2985 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2986 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2987 goto out;
2988
2989 /* Now write lots of config records. */
2990 n_config = ddf->max_part;
2991 conf_size = ddf->conf_rec_len * 512;
2992 conf = ddf->conf;
2993 buf_size = conf_size * (n_config + 1);
2994 if (!conf) {
2995 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2996 goto out;
2997 ddf->conf = conf;
2998 }
2999 for (i = 0 ; i <= n_config ; i++) {
3000 struct vcl *c;
3001 struct vd_config *vdc = NULL;
3002 if (i == n_config) {
3003 c = (struct vcl *)d->spare;
3004 if (c)
3005 vdc = &c->conf;
3006 } else {
3007 unsigned int dummy;
3008 c = d->vlist[i];
3009 if (c)
3010 get_pd_index_from_refnum(
3011 c, d->disk.refnum,
3012 ddf->mppe,
3013 (const struct vd_config **)&vdc,
3014 &dummy);
3015 }
3016 if (vdc) {
3017 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3018 i, be32_to_cpu(d->disk.refnum),
3019 guid_str(vdc->guid),
3020 vdc->sec_elmnt_seq);
3021 vdc->crc = calc_crc(vdc, conf_size);
3022 memcpy(conf + i*conf_size, vdc, conf_size);
3023 } else
3024 memset(conf + i*conf_size, 0xff, conf_size);
3025 }
3026 if (write(fd, conf, buf_size) != buf_size)
3027 goto out;
3028
3029 d->disk.crc = calc_crc(&d->disk, 512);
3030 if (write(fd, &d->disk, 512) < 0)
3031 goto out;
3032
3033 ret = 1;
3034 out:
3035 header->openflag = 0;
3036 header->crc = calc_crc(header, 512);
3037
3038 lseek64(fd, sector<<9, 0);
3039 if (write(fd, header, 512) < 0)
3040 ret = 0;
3041
3042 return ret;
3043 }
3044
3045 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3046 {
3047 unsigned long long size;
3048 int fd = d->fd;
3049 if (fd < 0)
3050 return 0;
3051
3052 /* We need to fill in the primary, (secondary) and workspace
3053 * lba's in the headers, set their checksums,
3054 * Also checksum phys, virt....
3055 *
3056 * Then write everything out, finally the anchor is written.
3057 */
3058 get_dev_size(fd, NULL, &size);
3059 size /= 512;
3060 memcpy(&ddf->anchor, ddf->active, 512);
3061 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3062 ddf->anchor.workspace_lba = d->workspace_lba;
3063 else
3064 ddf->anchor.workspace_lba =
3065 cpu_to_be64(size - 32*1024*2);
3066 if (be64_to_cpu(d->primary_lba) != 0ULL)
3067 ddf->anchor.primary_lba = d->primary_lba;
3068 else
3069 ddf->anchor.primary_lba =
3070 cpu_to_be64(size - 16*1024*2);
3071 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3072 ddf->anchor.secondary_lba = d->secondary_lba;
3073 else
3074 ddf->anchor.secondary_lba =
3075 cpu_to_be64(size - 32*1024*2);
3076 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
3077 memcpy(&ddf->primary, &ddf->anchor, 512);
3078 memcpy(&ddf->secondary, &ddf->anchor, 512);
3079
3080 ddf->anchor.type = DDF_HEADER_ANCHOR;
3081 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3082 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3083 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3084
3085 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3086 return 0;
3087
3088 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3089 return 0;
3090
3091 lseek64(fd, (size-1)*512, SEEK_SET);
3092 if (write(fd, &ddf->anchor, 512) < 0)
3093 return 0;
3094
3095 return 1;
3096 }
3097
3098 static int __write_init_super_ddf(struct supertype *st)
3099 {
3100 struct ddf_super *ddf = st->sb;
3101 struct dl *d;
3102 int attempts = 0;
3103 int successes = 0;
3104
3105 pr_state(ddf, __func__);
3106
3107 /* try to write updated metadata,
3108 * if we catch a failure move on to the next disk
3109 */
3110 for (d = ddf->dlist; d; d=d->next) {
3111 attempts++;
3112 successes += _write_super_to_disk(ddf, d);
3113 }
3114
3115 return attempts != successes;
3116 }
3117
3118 static int write_init_super_ddf(struct supertype *st)
3119 {
3120 struct ddf_super *ddf = st->sb;
3121 struct vcl *currentconf = ddf->currentconf;
3122
3123 /* We are done with currentconf - reset it so st refers to the container */
3124 ddf->currentconf = NULL;
3125
3126 if (st->update_tail) {
3127 /* queue the virtual_disk and vd_config as metadata updates */
3128 struct virtual_disk *vd;
3129 struct vd_config *vc;
3130 int len, tlen;
3131 unsigned int i;
3132
3133 if (!currentconf) {
3134 /* Must be adding a physical disk to the container */
3135 int len = (sizeof(struct phys_disk) +
3136 sizeof(struct phys_disk_entry));
3137
3138 /* adding a disk to the container. */
3139 if (!ddf->add_list)
3140 return 0;
3141
3142 append_metadata_update(st, ddf->add_list->mdupdate, len);
3143 ddf->add_list->mdupdate = NULL;
3144 return 0;
3145 }
3146
3147 /* Newly created VD */
3148
3149 /* First the virtual disk. We have a slightly fake header */
3150 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3151 vd = xmalloc(len);
3152 *vd = *ddf->virt;
3153 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3154 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3155 append_metadata_update(st, vd, len);
3156
3157 /* Then the vd_config */
3158 len = ddf->conf_rec_len * 512;
3159 tlen = len * currentconf->conf.sec_elmnt_count;
3160 vc = xmalloc(tlen);
3161 memcpy(vc, &currentconf->conf, len);
3162 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3163 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3164 len);
3165 append_metadata_update(st, vc, tlen);
3166
3167 return 0;
3168 } else {
3169 struct dl *d;
3170 if (!currentconf)
3171 for (d = ddf->dlist; d; d=d->next)
3172 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3173 /* Note: we don't close the fd's now, but a subsequent
3174 * ->free_super() will
3175 */
3176 return __write_init_super_ddf(st);
3177 }
3178 }
3179
3180 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3181 unsigned long long data_offset)
3182 {
3183 /* We must reserve the last 32Meg */
3184 if (devsize <= 32*1024*2)
3185 return 0;
3186 return devsize - 32*1024*2;
3187 }
3188
3189 static int reserve_space(struct supertype *st, int raiddisks,
3190 unsigned long long size, int chunk,
3191 unsigned long long data_offset,
3192 unsigned long long *freesize)
3193 {
3194 /* Find 'raiddisks' spare extents at least 'size' big (but
3195 * only caring about multiples of 'chunk') and remember
3196 * them. If size==0, find the largest size possible.
3197 * Report available size in *freesize
3198 * If space cannot be found, fail.
3199 */
3200 struct dl *dl;
3201 struct ddf_super *ddf = st->sb;
3202 int cnt = 0;
3203
3204 for (dl = ddf->dlist; dl ; dl=dl->next) {
3205 dl->raiddisk = -1;
3206 dl->esize = 0;
3207 }
3208 /* Now find largest extent on each device */
3209 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3210 unsigned long long minsize = ULLONG_MAX;
3211
3212 find_space(ddf, dl, data_offset, &minsize);
3213 if (minsize >= size && minsize >= (unsigned)chunk) {
3214 cnt++;
3215 dl->esize = minsize;
3216 }
3217 }
3218 if (cnt < raiddisks) {
3219 pr_err("not enough devices with space to create array.\n");
3220 return 0; /* No enough free spaces large enough */
3221 }
3222 if (size == 0) {
3223 /* choose the largest size of which there are at least 'raiddisk' */
3224 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3225 struct dl *dl2;
3226 if (dl->esize <= size)
3227 continue;
3228 /* This is bigger than 'size', see if there are enough */
3229 cnt = 0;
3230 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3231 if (dl2->esize >= dl->esize)
3232 cnt++;
3233 if (cnt >= raiddisks)
3234 size = dl->esize;
3235 }
3236 if (chunk) {
3237 size = size / chunk;
3238 size *= chunk;
3239 }
3240 *freesize = size;
3241 if (size < 32) {
3242 pr_err("not enough spare devices to create array.\n");
3243 return 0;
3244 }
3245 }
3246 /* We have a 'size' of which there are enough spaces.
3247 * We simply do a first-fit */
3248 cnt = 0;
3249 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3250 if (dl->esize < size)
3251 continue;
3252
3253 dl->raiddisk = cnt;
3254 cnt++;
3255 }
3256 return 1;
3257 }
3258
3259 static int
3260 validate_geometry_ddf_container(struct supertype *st,
3261 int level, int raiddisks,
3262 unsigned long long data_offset,
3263 char *dev, unsigned long long *freesize,
3264 int verbose)
3265 {
3266 int fd;
3267 unsigned long long ldsize;
3268
3269 if (!is_container(level))
3270 return 0;
3271 if (!dev)
3272 return 1;
3273
3274 fd = dev_open(dev, O_RDONLY|O_EXCL);
3275 if (fd < 0) {
3276 if (verbose)
3277 pr_err("ddf: Cannot open %s: %s\n",
3278 dev, strerror(errno));
3279 return 0;
3280 }
3281 if (!get_dev_size(fd, dev, &ldsize)) {
3282 close(fd);
3283 return 0;
3284 }
3285 close(fd);
3286 if (freesize) {
3287 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3288 if (*freesize == 0)
3289 return 0;
3290 }
3291
3292 return 1;
3293 }
3294
3295 static int validate_geometry_ddf(struct supertype *st,
3296 int level, int layout, int raiddisks,
3297 int *chunk, unsigned long long size,
3298 unsigned long long data_offset,
3299 char *dev, unsigned long long *freesize,
3300 int consistency_policy, int verbose)
3301 {
3302 int fd;
3303 struct mdinfo *sra;
3304 int cfd;
3305
3306 /* ddf potentially supports lots of things, but it depends on
3307 * what devices are offered (and maybe kernel version?)
3308 * If given unused devices, we will make a container.
3309 * If given devices in a container, we will make a BVD.
3310 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3311 */
3312
3313 if (level == LEVEL_NONE)
3314 level = LEVEL_CONTAINER;
3315 if (is_container(level)) {
3316 /* Must be a fresh device to add to a container */
3317 return validate_geometry_ddf_container(st, level, raiddisks,
3318 data_offset, dev,
3319 freesize, verbose);
3320 }
3321
3322 if (*chunk == UnSet)
3323 *chunk = DEFAULT_CHUNK;
3324
3325 if (!dev) {
3326 mdu_array_info_t array = {
3327 .level = level,
3328 .layout = layout,
3329 .raid_disks = raiddisks
3330 };
3331 struct vd_config conf;
3332 if (layout_md2ddf(&array, &conf) == -1) {
3333 if (verbose)
3334 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3335 level, layout, raiddisks);
3336 return 0;
3337 }
3338 /* Should check layout? etc */
3339
3340 if (st->sb && freesize) {
3341 /* --create was given a container to create in.
3342 * So we need to check that there are enough
3343 * free spaces and return the amount of space.
3344 * We may as well remember which drives were
3345 * chosen so that add_to_super/getinfo_super
3346 * can return them.
3347 */
3348 return reserve_space(st, raiddisks, size, *chunk,
3349 data_offset, freesize);
3350 }
3351 return 1;
3352 }
3353
3354 if (st->sb) {
3355 /* A container has already been opened, so we are
3356 * creating in there. Maybe a BVD, maybe an SVD.
3357 * Should make a distinction one day.
3358 */
3359 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3360 chunk, size, data_offset, dev,
3361 freesize,
3362 verbose);
3363 }
3364 /* This is the first device for the array.
3365 * If it is a container, we read it in and do automagic allocations,
3366 * no other devices should be given.
3367 * Otherwise it must be a member device of a container, and we
3368 * do manual allocation.
3369 * Later we should check for a BVD and make an SVD.
3370 */
3371 fd = open(dev, O_RDONLY|O_EXCL, 0);
3372 if (fd >= 0) {
3373 close(fd);
3374 /* Just a bare device, no good to us */
3375 if (verbose)
3376 pr_err("ddf: Cannot create this array on device %s - a container is required.\n",
3377 dev);
3378 return 0;
3379 }
3380 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3381 if (verbose)
3382 pr_err("ddf: Cannot open %s: %s\n",
3383 dev, strerror(errno));
3384 return 0;
3385 }
3386 /* Well, it is in use by someone, maybe a 'ddf' container. */
3387 cfd = open_container(fd);
3388 if (cfd < 0) {
3389 close(fd);
3390 if (verbose)
3391 pr_err("ddf: Cannot use %s: %s\n",
3392 dev, strerror(EBUSY));
3393 return 0;
3394 }
3395 sra = sysfs_read(cfd, NULL, GET_VERSION);
3396 close(fd);
3397 if (sra && sra->array.major_version == -1 &&
3398 strcmp(sra->text_version, "ddf") == 0) {
3399 /* This is a member of a ddf container. Load the container
3400 * and try to create a bvd
3401 */
3402 struct ddf_super *ddf;
3403 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3404 st->sb = ddf;
3405 strcpy(st->container_devnm, fd2devnm(cfd));
3406 close(cfd);
3407 return validate_geometry_ddf_bvd(st, level, layout,
3408 raiddisks, chunk, size,
3409 data_offset,
3410 dev, freesize,
3411 verbose);
3412 }
3413 close(cfd);
3414 } else /* device may belong to a different container */
3415 return 0;
3416
3417 return 1;
3418 }
3419
3420 static int validate_geometry_ddf_bvd(struct supertype *st,
3421 int level, int layout, int raiddisks,
3422 int *chunk, unsigned long long size,
3423 unsigned long long data_offset,
3424 char *dev, unsigned long long *freesize,
3425 int verbose)
3426 {
3427 dev_t rdev;
3428 struct ddf_super *ddf = st->sb;
3429 struct dl *dl;
3430 unsigned long long maxsize;
3431 /* ddf/bvd supports lots of things, but not containers */
3432 if (is_container(level)) {
3433 if (verbose)
3434 pr_err("DDF cannot create a container within an container\n");
3435 return 0;
3436 }
3437 /* We must have the container info already read in. */
3438 if (!ddf)
3439 return 0;
3440
3441 if (!dev) {
3442 /* General test: make sure there is space for
3443 * 'raiddisks' device extents of size 'size'.
3444 */
3445 unsigned long long minsize = size;
3446 int dcnt = 0;
3447 if (minsize == 0)
3448 minsize = 8;
3449 for (dl = ddf->dlist; dl ; dl = dl->next) {
3450 if (find_space(ddf, dl, data_offset, &minsize) !=
3451 INVALID_SECTORS)
3452 dcnt++;
3453 }
3454 if (dcnt < raiddisks) {
3455 if (verbose)
3456 pr_err("ddf: Not enough devices with space for this array (%d < %d)\n",
3457 dcnt, raiddisks);
3458 return 0;
3459 }
3460 return 1;
3461 }
3462 /* This device must be a member of the set */
3463 if (!stat_is_blkdev(dev, &rdev))
3464 return 0;
3465 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3466 if (dl->major == (int)major(rdev) &&
3467 dl->minor == (int)minor(rdev))
3468 break;
3469 }
3470 if (!dl) {
3471 if (verbose)
3472 pr_err("ddf: %s is not in the same DDF set\n",
3473 dev);
3474 return 0;
3475 }
3476 maxsize = ULLONG_MAX;
3477 find_space(ddf, dl, data_offset, &maxsize);
3478 *freesize = maxsize;
3479
3480 return 1;
3481 }
3482
3483 static int load_super_ddf_all(struct supertype *st, int fd,
3484 void **sbp, char *devname)
3485 {
3486 struct mdinfo *sra;
3487 struct ddf_super *super;
3488 struct mdinfo *sd, *best = NULL;
3489 int bestseq = 0;
3490 int seq;
3491 char nm[20];
3492 int dfd;
3493
3494 sra = sysfs_read(fd, NULL, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3495 if (!sra)
3496 return 1;
3497 if (sra->array.major_version != -1 ||
3498 sra->array.minor_version != -2 ||
3499 strcmp(sra->text_version, "ddf") != 0)
3500 return 1;
3501
3502 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3503 return 1;
3504 memset(super, 0, sizeof(*super));
3505
3506 /* first, try each device, and choose the best ddf */
3507 for (sd = sra->devs ; sd ; sd = sd->next) {
3508 int rv;
3509 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3510 dfd = dev_open(nm, O_RDONLY);
3511 if (dfd < 0)
3512 return 2;
3513 rv = load_ddf_headers(dfd, super, NULL);
3514 close(dfd);
3515 if (rv == 0) {
3516 seq = be32_to_cpu(super->active->seq);
3517 if (super->active->openflag)
3518 seq--;
3519 if (!best || seq > bestseq) {
3520 bestseq = seq;
3521 best = sd;
3522 }
3523 }
3524 }
3525 if (!best)
3526 return 1;
3527 /* OK, load this ddf */
3528 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3529 dfd = dev_open(nm, O_RDONLY);
3530 if (dfd < 0)
3531 return 1;
3532 load_ddf_headers(dfd, super, NULL);
3533 load_ddf_global(dfd, super, NULL);
3534 close(dfd);
3535 /* Now we need the device-local bits */
3536 for (sd = sra->devs ; sd ; sd = sd->next) {
3537 int rv;
3538
3539 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3540 dfd = dev_open(nm, O_RDWR);
3541 if (dfd < 0)
3542 return 2;
3543 rv = load_ddf_headers(dfd, super, NULL);
3544 if (rv == 0)
3545 rv = load_ddf_local(dfd, super, NULL, 1);
3546 if (rv)
3547 return 1;
3548 }
3549
3550 *sbp = super;
3551 if (st->ss == NULL) {
3552 st->ss = &super_ddf;
3553 st->minor_version = 0;
3554 st->max_devs = 512;
3555 }
3556 strcpy(st->container_devnm, fd2devnm(fd));
3557 return 0;
3558 }
3559
3560 static int load_container_ddf(struct supertype *st, int fd,
3561 char *devname)
3562 {
3563 return load_super_ddf_all(st, fd, &st->sb, devname);
3564 }
3565
3566 static int check_secondary(const struct vcl *vc)
3567 {
3568 const struct vd_config *conf = &vc->conf;
3569 int i;
3570
3571 /* The only DDF secondary RAID level md can support is
3572 * RAID 10, if the stripe sizes and Basic volume sizes
3573 * are all equal.
3574 * Other configurations could in theory be supported by exposing
3575 * the BVDs to user space and using device mapper for the secondary
3576 * mapping. So far we don't support that.
3577 */
3578
3579 __u64 sec_elements[4] = {0, 0, 0, 0};
3580 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3581 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3582
3583 if (vc->other_bvds == NULL) {
3584 pr_err("No BVDs for secondary RAID found\n");
3585 return -1;
3586 }
3587 if (conf->prl != DDF_RAID1) {
3588 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3589 return -1;
3590 }
3591 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3592 pr_err("Secondary RAID level %d is unsupported\n",
3593 conf->srl);
3594 return -1;
3595 }
3596 __set_sec_seen(conf->sec_elmnt_seq);
3597 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3598 const struct vd_config *bvd = vc->other_bvds[i];
3599 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3600 continue;
3601 if (bvd->srl != conf->srl) {
3602 pr_err("Inconsistent secondary RAID level across BVDs\n");
3603 return -1;
3604 }
3605 if (bvd->prl != conf->prl) {
3606 pr_err("Different RAID levels for BVDs are unsupported\n");
3607 return -1;
3608 }
3609 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3610 pr_err("All BVDs must have the same number of primary elements\n");
3611 return -1;
3612 }
3613 if (bvd->chunk_shift != conf->chunk_shift) {
3614 pr_err("Different strip sizes for BVDs are unsupported\n");
3615 return -1;
3616 }
3617 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3618 pr_err("Different BVD sizes are unsupported\n");
3619 return -1;
3620 }
3621 __set_sec_seen(bvd->sec_elmnt_seq);
3622 }
3623 for (i = 0; i < conf->sec_elmnt_count; i++) {
3624 if (!__was_sec_seen(i)) {
3625 /* pr_err("BVD %d is missing\n", i); */
3626 return -1;
3627 }
3628 }
3629 return 0;
3630 }
3631
3632 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3633 be32 refnum, unsigned int nmax,
3634 const struct vd_config **bvd,
3635 unsigned int *idx)
3636 {
3637 unsigned int i, j, n, sec, cnt;
3638
3639 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3640 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3641
3642 for (i = 0, j = 0 ; i < nmax ; i++) {
3643 /* j counts valid entries for this BVD */
3644 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3645 *bvd = &vc->conf;
3646 *idx = i;
3647 return sec * cnt + j;
3648 }
3649 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3650 j++;
3651 }
3652 if (vc->other_bvds == NULL)
3653 goto bad;
3654
3655 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3656 struct vd_config *vd = vc->other_bvds[n-1];
3657 sec = vd->sec_elmnt_seq;
3658 if (sec == DDF_UNUSED_BVD)
3659 continue;
3660 for (i = 0, j = 0 ; i < nmax ; i++) {
3661 if (be32_eq(vd->phys_refnum[i], refnum)) {
3662 *bvd = vd;
3663 *idx = i;
3664 return sec * cnt + j;
3665 }
3666 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3667 j++;
3668 }
3669 }
3670 bad:
3671 *bvd = NULL;
3672 return DDF_NOTFOUND;
3673 }
3674
3675 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3676 {
3677 /* Given a container loaded by load_super_ddf_all,
3678 * extract information about all the arrays into
3679 * an mdinfo tree.
3680 *
3681 * For each vcl in conflist: create an mdinfo, fill it in,
3682 * then look for matching devices (phys_refnum) in dlist
3683 * and create appropriate device mdinfo.
3684 */
3685 struct ddf_super *ddf = st->sb;
3686 struct mdinfo *rest = NULL;
3687 struct vcl *vc;
3688
3689 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3690 unsigned int i;
3691 struct mdinfo *this;
3692 char *ep;
3693 __u32 *cptr;
3694 unsigned int pd;
3695
3696 if (subarray &&
3697 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3698 *ep != '\0'))
3699 continue;
3700
3701 if (vc->conf.sec_elmnt_count > 1) {
3702 if (check_secondary(vc) != 0)
3703 continue;
3704 }
3705
3706 this = xcalloc(1, sizeof(*this));
3707 this->next = rest;
3708 rest = this;
3709
3710 if (layout_ddf2md(&vc->conf, &this->array))
3711 continue;
3712 this->array.md_minor = -1;
3713 this->array.major_version = -1;
3714 this->array.minor_version = -2;
3715 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3716 cptr = (__u32 *)(vc->conf.guid + 16);
3717 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3718 this->array.utime = DECADE +
3719 be32_to_cpu(vc->conf.timestamp);
3720 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3721
3722 i = vc->vcnum;
3723 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3724 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3725 DDF_init_full) {
3726 this->array.state = 0;
3727 this->resync_start = 0;
3728 } else {
3729 this->array.state = 1;
3730 this->resync_start = MaxSector;
3731 }
3732 _ddf_array_name(this->name, ddf, i);
3733 memset(this->uuid, 0, sizeof(this->uuid));
3734 this->component_size = be64_to_cpu(vc->conf.blocks);
3735 this->array.size = this->component_size / 2;
3736 this->container_member = i;
3737
3738 ddf->currentconf = vc;
3739 uuid_from_super_ddf(st, this->uuid);
3740 if (!subarray)
3741 ddf->currentconf = NULL;
3742
3743 sprintf(this->text_version, "/%s/%d",
3744 st->container_devnm, this->container_member);
3745
3746 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3747 struct mdinfo *dev;
3748 struct dl *d;
3749 const struct vd_config *bvd;
3750 unsigned int iphys;
3751 int stt;
3752
3753 if (be32_to_cpu(ddf->phys->entries[pd].refnum) ==
3754 0xffffffff)
3755 continue;
3756
3757 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3758 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding)) !=
3759 DDF_Online)
3760 continue;
3761
3762 i = get_pd_index_from_refnum(
3763 vc, ddf->phys->entries[pd].refnum,
3764 ddf->mppe, &bvd, &iphys);
3765 if (i == DDF_NOTFOUND)
3766 continue;
3767
3768 this->array.working_disks++;
3769
3770 for (d = ddf->dlist; d ; d=d->next)
3771 if (be32_eq(d->disk.refnum,
3772 ddf->phys->entries[pd].refnum))
3773 break;
3774 if (d == NULL)
3775 /* Haven't found that one yet, maybe there are others */
3776 continue;
3777
3778 dev = xcalloc(1, sizeof(*dev));
3779 dev->next = this->devs;
3780 this->devs = dev;
3781
3782 dev->disk.number = be32_to_cpu(d->disk.refnum);
3783 dev->disk.major = d->major;
3784 dev->disk.minor = d->minor;
3785 dev->disk.raid_disk = i;
3786 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3787 dev->recovery_start = MaxSector;
3788
3789 dev->events = be32_to_cpu(ddf->active->seq);
3790 dev->data_offset =
3791 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3792 dev->component_size = be64_to_cpu(bvd->blocks);
3793 if (d->devname)
3794 strcpy(dev->name, d->devname);
3795 }
3796 }
3797 return rest;
3798 }
3799
3800 static int store_super_ddf(struct supertype *st, int fd)
3801 {
3802 struct ddf_super *ddf = st->sb;
3803 unsigned long long dsize;
3804 void *buf;
3805 int rc;
3806
3807 if (!ddf)
3808 return 1;
3809
3810 if (!get_dev_size(fd, NULL, &dsize))
3811 return 1;
3812
3813 if (ddf->dlist || ddf->conflist) {
3814 struct stat sta;
3815 struct dl *dl;
3816 int ofd, ret;
3817
3818 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3819 pr_err("file descriptor for invalid device\n");
3820 return 1;
3821 }
3822 for (dl = ddf->dlist; dl; dl = dl->next)
3823 if (dl->major == (int)major(sta.st_rdev) &&
3824 dl->minor == (int)minor(sta.st_rdev))
3825 break;
3826 if (!dl) {
3827 pr_err("couldn't find disk %d/%d\n",
3828 (int)major(sta.st_rdev),
3829 (int)minor(sta.st_rdev));
3830 return 1;
3831 }
3832 ofd = dl->fd;
3833 dl->fd = fd;
3834 ret = (_write_super_to_disk(ddf, dl) != 1);
3835 dl->fd = ofd;
3836 return ret;
3837 }
3838
3839 if (posix_memalign(&buf, 512, 512) != 0)
3840 return 1;
3841 memset(buf, 0, 512);
3842
3843 lseek64(fd, dsize-512, 0);
3844 rc = write(fd, buf, 512);
3845 free(buf);
3846 if (rc < 0)
3847 return 1;
3848 return 0;
3849 }
3850
3851 static int compare_super_ddf(struct supertype *st, struct supertype *tst,
3852 int verbose)
3853 {
3854 /*
3855 * return:
3856 * 0 same, or first was empty, and second was copied
3857 * 1 second had wrong magic number - but that isn't possible
3858 * 2 wrong uuid
3859 * 3 wrong other info
3860 */
3861 struct ddf_super *first = st->sb;
3862 struct ddf_super *second = tst->sb;
3863 struct dl *dl1, *dl2;
3864 struct vcl *vl1, *vl2;
3865 unsigned int max_vds, max_pds, pd, vd;
3866
3867 if (!first) {
3868 st->sb = tst->sb;
3869 tst->sb = NULL;
3870 return 0;
3871 }
3872
3873 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3874 return 2;
3875
3876 /* It is only OK to compare info in the anchor. Anything else
3877 * could be changing due to a reconfig so must be ignored.
3878 * guid really should be enough anyway.
3879 */
3880
3881 if (!be32_eq(first->active->seq, second->active->seq)) {
3882 dprintf("sequence number mismatch %u<->%u\n",
3883 be32_to_cpu(first->active->seq),
3884 be32_to_cpu(second->active->seq));
3885 return 0;
3886 }
3887
3888 /*
3889 * At this point we are fairly sure that the meta data matches.
3890 * But the new disk may contain additional local data.
3891 * Add it to the super block.
3892 */
3893 max_vds = be16_to_cpu(first->active->max_vd_entries);
3894 max_pds = be16_to_cpu(first->phys->max_pdes);
3895 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3896 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3897 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3898 DDF_GUID_LEN))
3899 break;
3900 if (vl1) {
3901 if (vl1->other_bvds != NULL &&
3902 vl1->conf.sec_elmnt_seq !=
3903 vl2->conf.sec_elmnt_seq) {
3904 dprintf("adding BVD %u\n",
3905 vl2->conf.sec_elmnt_seq);
3906 add_other_bvd(vl1, &vl2->conf,
3907 first->conf_rec_len*512);
3908 }
3909 continue;
3910 }
3911
3912 if (posix_memalign((void **)&vl1, 512,
3913 (first->conf_rec_len*512 +
3914 offsetof(struct vcl, conf))) != 0) {
3915 pr_err("could not allocate vcl buf\n");
3916 return 3;
3917 }
3918
3919 vl1->next = first->conflist;
3920 vl1->block_sizes = NULL;
3921 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3922 if (alloc_other_bvds(first, vl1) != 0) {
3923 pr_err("could not allocate other bvds\n");
3924 free(vl1);
3925 return 3;
3926 }
3927 for (vd = 0; vd < max_vds; vd++)
3928 if (!memcmp(first->virt->entries[vd].guid,
3929 vl1->conf.guid, DDF_GUID_LEN))
3930 break;
3931 vl1->vcnum = vd;
3932 dprintf("added config for VD %u\n", vl1->vcnum);
3933 first->conflist = vl1;
3934 }
3935
3936 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3937 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3938 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3939 break;
3940 if (dl1)
3941 continue;
3942
3943 if (posix_memalign((void **)&dl1, 512,
3944 sizeof(*dl1) + (first->max_part) *
3945 sizeof(dl1->vlist[0])) != 0) {
3946 pr_err("could not allocate disk info buffer\n");
3947 return 3;
3948 }
3949 memcpy(dl1, dl2, sizeof(*dl1));
3950 dl1->mdupdate = NULL;
3951 dl1->next = first->dlist;
3952 dl1->fd = -1;
3953 for (pd = 0; pd < max_pds; pd++)
3954 if (be32_eq(first->phys->entries[pd].refnum,
3955 dl1->disk.refnum))
3956 break;
3957 dl1->pdnum = pd < max_pds ? (int)pd : -1;
3958 if (dl2->spare) {
3959 if (posix_memalign((void **)&dl1->spare, 512,
3960 first->conf_rec_len*512) != 0) {
3961 pr_err("could not allocate spare info buf\n");
3962 return 3;
3963 }
3964 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3965 }
3966 for (vd = 0 ; vd < first->max_part ; vd++) {
3967 if (!dl2->vlist[vd]) {
3968 dl1->vlist[vd] = NULL;
3969 continue;
3970 }
3971 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3972 if (!memcmp(vl1->conf.guid,
3973 dl2->vlist[vd]->conf.guid,
3974 DDF_GUID_LEN))
3975 break;
3976 dl1->vlist[vd] = vl1;
3977 }
3978 }
3979 first->dlist = dl1;
3980 dprintf("added disk %d: %08x\n", dl1->pdnum,
3981 be32_to_cpu(dl1->disk.refnum));
3982 }
3983
3984 return 0;
3985 }
3986
3987 /*
3988 * A new array 'a' has been started which claims to be instance 'inst'
3989 * within container 'c'.
3990 * We need to confirm that the array matches the metadata in 'c' so
3991 * that we don't corrupt any metadata.
3992 */
3993 static int ddf_open_new(struct supertype *c, struct active_array *a, int inst)
3994 {
3995 struct ddf_super *ddf = c->sb;
3996 struct mdinfo *dev;
3997 struct dl *dl;
3998 static const char faulty[] = "faulty";
3999
4000 if (all_ff(ddf->virt->entries[inst].guid)) {
4001 pr_err("subarray %d doesn't exist\n", inst);
4002 return -ENODEV;
4003 }
4004 dprintf("new subarray %d, GUID: %s\n", inst,
4005 guid_str(ddf->virt->entries[inst].guid));
4006 for (dev = a->info.devs; dev; dev = dev->next) {
4007 for (dl = ddf->dlist; dl; dl = dl->next)
4008 if (dl->major == dev->disk.major &&
4009 dl->minor == dev->disk.minor)
4010 break;
4011 if (!dl || dl->pdnum < 0) {
4012 pr_err("device %d/%d of subarray %d not found in meta data\n",
4013 dev->disk.major, dev->disk.minor, inst);
4014 return -1;
4015 }
4016 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4017 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4018 pr_err("new subarray %d contains broken device %d/%d (%02x)\n",
4019 inst, dl->major, dl->minor,
4020 be16_to_cpu(ddf->phys->entries[dl->pdnum].state));
4021 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4022 sizeof(faulty) - 1)
4023 pr_err("Write to state_fd failed\n");
4024 dev->curr_state = DS_FAULTY;
4025 }
4026 }
4027 a->info.container_member = inst;
4028 return 0;
4029 }
4030
4031 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4032 {
4033 /* This member array is being activated. If any devices
4034 * are missing they must now be marked as failed.
4035 */
4036 struct vd_config *vc;
4037 unsigned int n_bvd;
4038 struct vcl *vcl;
4039 struct dl *dl;
4040 int pd;
4041 int n;
4042 int state;
4043
4044 for (n = 0; ; n++) {
4045 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4046 if (!vc)
4047 break;
4048 for (dl = ddf->dlist; dl; dl = dl->next)
4049 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4050 break;
4051 if (dl)
4052 /* Found this disk, so not missing */
4053 continue;
4054
4055 /* Mark the device as failed/missing. */
4056 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4057 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4058 cpu_to_be16(DDF_Online))) {
4059 be16_clear(ddf->phys->entries[pd].state,
4060 cpu_to_be16(DDF_Online));
4061 be16_set(ddf->phys->entries[pd].state,
4062 cpu_to_be16(DDF_Failed|DDF_Missing));
4063 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4064 ddf_set_updates_pending(ddf, vc);
4065 }
4066
4067 /* Mark the array as Degraded */
4068 state = get_svd_state(ddf, vcl);
4069 if (ddf->virt->entries[inst].state !=
4070 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4071 | state)) {
4072 ddf->virt->entries[inst].state =
4073 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4074 | state;
4075 a->check_degraded = 1;
4076 ddf_set_updates_pending(ddf, vc);
4077 }
4078 }
4079 }
4080
4081 /*
4082 * The array 'a' is to be marked clean in the metadata.
4083 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4084 * clean up to the point (in sectors). If that cannot be recorded in the
4085 * metadata, then leave it as dirty.
4086 *
4087 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4088 * !global! virtual_disk.virtual_entry structure.
4089 */
4090 static int ddf_set_array_state(struct active_array *a, int consistent)
4091 {
4092 struct ddf_super *ddf = a->container->sb;
4093 int inst = a->info.container_member;
4094 int old = ddf->virt->entries[inst].state;
4095 if (consistent == 2) {
4096 handle_missing(ddf, a, inst);
4097 consistent = 1;
4098 if (!is_resync_complete(&a->info))
4099 consistent = 0;
4100 }
4101 if (consistent)
4102 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4103 else
4104 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4105 if (old != ddf->virt->entries[inst].state)
4106 ddf_set_updates_pending(ddf, NULL);
4107
4108 old = ddf->virt->entries[inst].init_state;
4109 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4110 if (is_resync_complete(&a->info))
4111 ddf->virt->entries[inst].init_state |= DDF_init_full;
4112 else if (a->info.resync_start == 0)
4113 ddf->virt->entries[inst].init_state |= DDF_init_not;
4114 else
4115 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4116 if (old != ddf->virt->entries[inst].init_state)
4117 ddf_set_updates_pending(ddf, NULL);
4118
4119 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4120 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4121 consistent?"clean":"dirty",
4122 a->info.resync_start);
4123 return consistent;
4124 }
4125
4126 static int get_bvd_state(const struct ddf_super *ddf,
4127 const struct vd_config *vc)
4128 {
4129 unsigned int i, n_bvd, working = 0;
4130 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4131 int pd, st, state;
4132 char *avail = xcalloc(1, n_prim);
4133 mdu_array_info_t array;
4134
4135 layout_ddf2md(vc, &array);
4136
4137 for (i = 0; i < n_prim; i++) {
4138 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4139 continue;
4140 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4141 if (pd < 0)
4142 continue;
4143 st = be16_to_cpu(ddf->phys->entries[pd].state);
4144 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding)) ==
4145 DDF_Online) {
4146 working++;
4147 avail[i] = 1;
4148 }
4149 }
4150
4151 state = DDF_state_degraded;
4152 if (working == n_prim)
4153 state = DDF_state_optimal;
4154 else
4155 switch (vc->prl) {
4156 case DDF_RAID0:
4157 case DDF_CONCAT:
4158 case DDF_JBOD:
4159 state = DDF_state_failed;
4160 break;
4161 case DDF_RAID1:
4162 if (working == 0)
4163 state = DDF_state_failed;
4164 else if (working >= 2)
4165 state = DDF_state_part_optimal;
4166 break;
4167 case DDF_RAID1E:
4168 if (!enough(10, n_prim, array.layout, 1, avail))
4169 state = DDF_state_failed;
4170 break;
4171 case DDF_RAID4:
4172 case DDF_RAID5:
4173 if (working < n_prim - 1)
4174 state = DDF_state_failed;
4175 break;
4176 case DDF_RAID6:
4177 if (working < n_prim - 2)
4178 state = DDF_state_failed;
4179 else if (working == n_prim - 1)
4180 state = DDF_state_part_optimal;
4181 break;
4182 }
4183 return state;
4184 }
4185
4186 static int secondary_state(int state, int other, int seclevel)
4187 {
4188 if (state == DDF_state_optimal && other == DDF_state_optimal)
4189 return DDF_state_optimal;
4190 if (seclevel == DDF_2MIRRORED) {
4191 if (state == DDF_state_optimal || other == DDF_state_optimal)
4192 return DDF_state_part_optimal;
4193 if (state == DDF_state_failed && other == DDF_state_failed)
4194 return DDF_state_failed;
4195 return DDF_state_degraded;
4196 } else {
4197 if (state == DDF_state_failed || other == DDF_state_failed)
4198 return DDF_state_failed;
4199 if (state == DDF_state_degraded || other == DDF_state_degraded)
4200 return DDF_state_degraded;
4201 return DDF_state_part_optimal;
4202 }
4203 }
4204
4205 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4206 {
4207 int state = get_bvd_state(ddf, &vcl->conf);
4208 unsigned int i;
4209 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4210 state = secondary_state(
4211 state,
4212 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4213 vcl->conf.srl);
4214 }
4215 return state;
4216 }
4217
4218 /*
4219 * The state of each disk is stored in the global phys_disk structure
4220 * in phys_disk.entries[n].state.
4221 * This makes various combinations awkward.
4222 * - When a device fails in any array, it must be failed in all arrays
4223 * that include a part of this device.
4224 * - When a component is rebuilding, we cannot include it officially in the
4225 * array unless this is the only array that uses the device.
4226 *
4227 * So: when transitioning:
4228 * Online -> failed, just set failed flag. monitor will propagate
4229 * spare -> online, the device might need to be added to the array.
4230 * spare -> failed, just set failed. Don't worry if in array or not.
4231 */
4232 static void ddf_set_disk(struct active_array *a, int n, int state)
4233 {
4234 struct ddf_super *ddf = a->container->sb;
4235 unsigned int inst = a->info.container_member, n_bvd;
4236 struct vcl *vcl;
4237 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4238 &n_bvd, &vcl);
4239 int pd;
4240 struct mdinfo *mdi;
4241 struct dl *dl;
4242 int update = 0;
4243
4244 dprintf("%d to %x\n", n, state);
4245 if (vc == NULL) {
4246 dprintf("ddf: cannot find instance %d!!\n", inst);
4247 return;
4248 }
4249 /* Find the matching slot in 'info'. */
4250 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4251 if (mdi->disk.raid_disk == n)
4252 break;
4253 if (!mdi) {
4254 pr_err("cannot find raid disk %d\n", n);
4255 return;
4256 }
4257
4258 /* and find the 'dl' entry corresponding to that. */
4259 for (dl = ddf->dlist; dl; dl = dl->next)
4260 if (mdi->state_fd >= 0 &&
4261 mdi->disk.major == dl->major &&
4262 mdi->disk.minor == dl->minor)
4263 break;
4264 if (!dl) {
4265 pr_err("cannot find raid disk %d (%d/%d)\n",
4266 n, mdi->disk.major, mdi->disk.minor);
4267 return;
4268 }
4269
4270 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4271 if (pd < 0 || pd != dl->pdnum) {
4272 /* disk doesn't currently exist or has changed.
4273 * If it is now in_sync, insert it. */
4274 dprintf("phys disk not found for %d: %d/%d ref %08x\n",
4275 dl->pdnum, dl->major, dl->minor,
4276 be32_to_cpu(dl->disk.refnum));
4277 dprintf("array %u disk %u ref %08x pd %d\n",
4278 inst, n_bvd,
4279 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4280 if ((state & DS_INSYNC) && ! (state & DS_FAULTY) &&
4281 dl->pdnum >= 0) {
4282 pd = dl->pdnum;
4283 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4284 LBA_OFFSET(ddf, vc)[n_bvd] =
4285 cpu_to_be64(mdi->data_offset);
4286 be16_clear(ddf->phys->entries[pd].type,
4287 cpu_to_be16(DDF_Global_Spare));
4288 be16_set(ddf->phys->entries[pd].type,
4289 cpu_to_be16(DDF_Active_in_VD));
4290 update = 1;
4291 }
4292 } else {
4293 be16 old = ddf->phys->entries[pd].state;
4294 if (state & DS_FAULTY)
4295 be16_set(ddf->phys->entries[pd].state,
4296 cpu_to_be16(DDF_Failed));
4297 if (state & DS_INSYNC) {
4298 be16_set(ddf->phys->entries[pd].state,
4299 cpu_to_be16(DDF_Online));
4300 be16_clear(ddf->phys->entries[pd].state,
4301 cpu_to_be16(DDF_Rebuilding));
4302 }
4303 if (!be16_eq(old, ddf->phys->entries[pd].state))
4304 update = 1;
4305 }
4306
4307 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4308 be32_to_cpu(dl->disk.refnum), state,
4309 be16_to_cpu(ddf->phys->entries[pd].state));
4310
4311 /* Now we need to check the state of the array and update
4312 * virtual_disk.entries[n].state.
4313 * It needs to be one of "optimal", "degraded", "failed".
4314 * I don't understand 'deleted' or 'missing'.
4315 */
4316 state = get_svd_state(ddf, vcl);
4317
4318 if (ddf->virt->entries[inst].state !=
4319 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4320 | state)) {
4321 ddf->virt->entries[inst].state =
4322 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4323 | state;
4324 update = 1;
4325 }
4326 if (update)
4327 ddf_set_updates_pending(ddf, vc);
4328 }
4329
4330 static void ddf_sync_metadata(struct supertype *st)
4331 {
4332 /*
4333 * Write all data to all devices.
4334 * Later, we might be able to track whether only local changes
4335 * have been made, or whether any global data has been changed,
4336 * but ddf is sufficiently weird that it probably always
4337 * changes global data ....
4338 */
4339 struct ddf_super *ddf = st->sb;
4340 if (!ddf->updates_pending)
4341 return;
4342 ddf->updates_pending = 0;
4343 __write_init_super_ddf(st);
4344 dprintf("ddf: sync_metadata\n");
4345 }
4346
4347 static int del_from_conflist(struct vcl **list, const char *guid)
4348 {
4349 struct vcl **p;
4350 int found = 0;
4351 for (p = list; p && *p; p = &((*p)->next))
4352 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4353 found = 1;
4354 *p = (*p)->next;
4355 }
4356 return found;
4357 }
4358
4359 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4360 {
4361 struct dl *dl;
4362 unsigned int vdnum, i;
4363 vdnum = find_vde_by_guid(ddf, guid);
4364 if (vdnum == DDF_NOTFOUND) {
4365 pr_err("could not find VD %s\n", guid_str(guid));
4366 return -1;
4367 }
4368 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4369 pr_err("could not find conf %s\n", guid_str(guid));
4370 return -1;
4371 }
4372 for (dl = ddf->dlist; dl; dl = dl->next)
4373 for (i = 0; i < ddf->max_part; i++)
4374 if (dl->vlist[i] != NULL &&
4375 !memcmp(dl->vlist[i]->conf.guid, guid,
4376 DDF_GUID_LEN))
4377 dl->vlist[i] = NULL;
4378 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4379 dprintf("deleted %s\n", guid_str(guid));
4380 return 0;
4381 }
4382
4383 static int kill_subarray_ddf(struct supertype *st, char *subarray_id)
4384 {
4385 struct ddf_super *ddf = st->sb;
4386 /*
4387 * currentconf is set in container_content_ddf,
4388 * called with subarray arg
4389 */
4390 struct vcl *victim = ddf->currentconf;
4391 struct vd_config *conf;
4392 unsigned int vdnum;
4393
4394 ddf->currentconf = NULL;
4395 if (!victim) {
4396 pr_err("nothing to kill\n");
4397 return -1;
4398 }
4399 conf = &victim->conf;
4400 vdnum = find_vde_by_guid(ddf, conf->guid);
4401 if (vdnum == DDF_NOTFOUND) {
4402 pr_err("could not find VD %s\n", guid_str(conf->guid));
4403 return -1;
4404 }
4405 if (st->update_tail) {
4406 struct virtual_disk *vd;
4407 int len = sizeof(struct virtual_disk)
4408 + sizeof(struct virtual_entry);
4409 vd = xmalloc(len);
4410 if (vd == NULL) {
4411 pr_err("failed to allocate %d bytes\n", len);
4412 return -1;
4413 }
4414 memset(vd, 0 , len);
4415 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4416 vd->populated_vdes = cpu_to_be16(0);
4417 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4418 /* we use DDF_state_deleted as marker */
4419 vd->entries[0].state = DDF_state_deleted;
4420 append_metadata_update(st, vd, len);
4421 } else {
4422 _kill_subarray_ddf(ddf, conf->guid);
4423 ddf_set_updates_pending(ddf, NULL);
4424 ddf_sync_metadata(st);
4425 }
4426 return 0;
4427 }
4428
4429 static void copy_matching_bvd(struct ddf_super *ddf,
4430 struct vd_config *conf,
4431 const struct metadata_update *update)
4432 {
4433 unsigned int mppe =
4434 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4435 unsigned int len = ddf->conf_rec_len * 512;
4436 char *p;
4437 struct vd_config *vc;
4438 for (p = update->buf; p < update->buf + update->len; p += len) {
4439 vc = (struct vd_config *) p;
4440 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4441 memcpy(conf->phys_refnum, vc->phys_refnum,
4442 mppe * (sizeof(__u32) + sizeof(__u64)));
4443 return;
4444 }
4445 }
4446 pr_err("no match for BVD %d of %s in update\n",
4447 conf->sec_elmnt_seq, guid_str(conf->guid));
4448 }
4449
4450 static void ddf_process_phys_update(struct supertype *st,
4451 struct metadata_update *update)
4452 {
4453 struct ddf_super *ddf = st->sb;
4454 struct phys_disk *pd;
4455 unsigned int ent;
4456
4457 pd = (struct phys_disk*)update->buf;
4458 ent = be16_to_cpu(pd->used_pdes);
4459 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4460 return;
4461 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4462 struct dl **dlp;
4463 /* removing this disk. */
4464 be16_set(ddf->phys->entries[ent].state,
4465 cpu_to_be16(DDF_Missing));
4466 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4467 struct dl *dl = *dlp;
4468 if (dl->pdnum == (signed)ent) {
4469 close(dl->fd);
4470 dl->fd = -1;
4471 *dlp = dl->next;
4472 update->space = dl->devname;
4473 *(void**)dl = update->space_list;
4474 update->space_list = (void**)dl;
4475 break;
4476 }
4477 }
4478 ddf_set_updates_pending(ddf, NULL);
4479 return;
4480 }
4481 if (!all_ff(ddf->phys->entries[ent].guid))
4482 return;
4483 ddf->phys->entries[ent] = pd->entries[0];
4484 ddf->phys->used_pdes = cpu_to_be16
4485 (1 + be16_to_cpu(ddf->phys->used_pdes));
4486 ddf_set_updates_pending(ddf, NULL);
4487 if (ddf->add_list) {
4488 struct active_array *a;
4489 struct dl *al = ddf->add_list;
4490 ddf->add_list = al->next;
4491
4492 al->next = ddf->dlist;
4493 ddf->dlist = al;
4494
4495 /* As a device has been added, we should check
4496 * for any degraded devices that might make
4497 * use of this spare */
4498 for (a = st->arrays ; a; a=a->next)
4499 a->check_degraded = 1;
4500 }
4501 }
4502
4503 static void ddf_process_virt_update(struct supertype *st,
4504 struct metadata_update *update)
4505 {
4506 struct ddf_super *ddf = st->sb;
4507 struct virtual_disk *vd;
4508 unsigned int ent;
4509
4510 vd = (struct virtual_disk*)update->buf;
4511
4512 if (vd->entries[0].state == DDF_state_deleted) {
4513 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4514 return;
4515 } else {
4516 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4517 if (ent != DDF_NOTFOUND) {
4518 dprintf("VD %s exists already in slot %d\n",
4519 guid_str(vd->entries[0].guid),
4520 ent);
4521 return;
4522 }
4523 ent = find_unused_vde(ddf);
4524 if (ent == DDF_NOTFOUND)
4525 return;
4526 ddf->virt->entries[ent] = vd->entries[0];
4527 ddf->virt->populated_vdes =
4528 cpu_to_be16(
4529 1 + be16_to_cpu(
4530 ddf->virt->populated_vdes));
4531 dprintf("added VD %s in slot %d(s=%02x i=%02x)\n",
4532 guid_str(vd->entries[0].guid), ent,
4533 ddf->virt->entries[ent].state,
4534 ddf->virt->entries[ent].init_state);
4535 }
4536 ddf_set_updates_pending(ddf, NULL);
4537 }
4538
4539 static void ddf_remove_failed(struct ddf_super *ddf)
4540 {
4541 /* Now remove any 'Failed' devices that are not part
4542 * of any VD. They will have the Transition flag set.
4543 * Once done, we need to update all dl->pdnum numbers.
4544 */
4545 unsigned int pdnum;
4546 unsigned int pd2 = 0;
4547 struct dl *dl;
4548
4549 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4550 pdnum++) {
4551 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4552 0xFFFFFFFF)
4553 continue;
4554 if (be16_and(ddf->phys->entries[pdnum].state,
4555 cpu_to_be16(DDF_Failed)) &&
4556 be16_and(ddf->phys->entries[pdnum].state,
4557 cpu_to_be16(DDF_Transition))) {
4558 /* skip this one unless in dlist*/
4559 for (dl = ddf->dlist; dl; dl = dl->next)
4560 if (dl->pdnum == (int)pdnum)
4561 break;
4562 if (!dl)
4563 continue;
4564 }
4565 if (pdnum == pd2)
4566 pd2++;
4567 else {
4568 ddf->phys->entries[pd2] =
4569 ddf->phys->entries[pdnum];
4570 for (dl = ddf->dlist; dl; dl = dl->next)
4571 if (dl->pdnum == (int)pdnum)
4572 dl->pdnum = pd2;
4573 pd2++;
4574 }
4575 }
4576 ddf->phys->used_pdes = cpu_to_be16(pd2);
4577 while (pd2 < pdnum) {
4578 memset(ddf->phys->entries[pd2].guid, 0xff,
4579 DDF_GUID_LEN);
4580 pd2++;
4581 }
4582 }
4583
4584 static void ddf_update_vlist(struct ddf_super *ddf, struct dl *dl)
4585 {
4586 struct vcl *vcl;
4587 unsigned int vn = 0;
4588 int in_degraded = 0;
4589
4590 if (dl->pdnum < 0)
4591 return;
4592 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4593 unsigned int dn, ibvd;
4594 const struct vd_config *conf;
4595 int vstate;
4596 dn = get_pd_index_from_refnum(vcl,
4597 dl->disk.refnum,
4598 ddf->mppe,
4599 &conf, &ibvd);
4600 if (dn == DDF_NOTFOUND)
4601 continue;
4602 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4603 dl->pdnum,
4604 be32_to_cpu(dl->disk.refnum),
4605 guid_str(conf->guid),
4606 conf->sec_elmnt_seq, vn);
4607 /* Clear the Transition flag */
4608 if (be16_and
4609 (ddf->phys->entries[dl->pdnum].state,
4610 cpu_to_be16(DDF_Failed)))
4611 be16_clear(ddf->phys
4612 ->entries[dl->pdnum].state,
4613 cpu_to_be16(DDF_Transition));
4614 dl->vlist[vn++] = vcl;
4615 vstate = ddf->virt->entries[vcl->vcnum].state
4616 & DDF_state_mask;
4617 if (vstate == DDF_state_degraded ||
4618 vstate == DDF_state_part_optimal)
4619 in_degraded = 1;
4620 }
4621 while (vn < ddf->max_part)
4622 dl->vlist[vn++] = NULL;
4623 if (dl->vlist[0]) {
4624 be16_clear(ddf->phys->entries[dl->pdnum].type,
4625 cpu_to_be16(DDF_Global_Spare));
4626 if (!be16_and(ddf->phys
4627 ->entries[dl->pdnum].type,
4628 cpu_to_be16(DDF_Active_in_VD))) {
4629 be16_set(ddf->phys
4630 ->entries[dl->pdnum].type,
4631 cpu_to_be16(DDF_Active_in_VD));
4632 if (in_degraded)
4633 be16_set(ddf->phys
4634 ->entries[dl->pdnum]
4635 .state,
4636 cpu_to_be16
4637 (DDF_Rebuilding));
4638 }
4639 }
4640 if (dl->spare) {
4641 be16_clear(ddf->phys->entries[dl->pdnum].type,
4642 cpu_to_be16(DDF_Global_Spare));
4643 be16_set(ddf->phys->entries[dl->pdnum].type,
4644 cpu_to_be16(DDF_Spare));
4645 }
4646 if (!dl->vlist[0] && !dl->spare) {
4647 be16_set(ddf->phys->entries[dl->pdnum].type,
4648 cpu_to_be16(DDF_Global_Spare));
4649 be16_clear(ddf->phys->entries[dl->pdnum].type,
4650 cpu_to_be16(DDF_Spare));
4651 be16_clear(ddf->phys->entries[dl->pdnum].type,
4652 cpu_to_be16(DDF_Active_in_VD));
4653 }
4654 }
4655
4656 static void ddf_process_conf_update(struct supertype *st,
4657 struct metadata_update *update)
4658 {
4659 struct ddf_super *ddf = st->sb;
4660 struct vd_config *vc;
4661 struct vcl *vcl;
4662 struct dl *dl;
4663 unsigned int ent;
4664 unsigned int pdnum, len;
4665
4666 vc = (struct vd_config*)update->buf;
4667 len = ddf->conf_rec_len * 512;
4668 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4669 pr_err("%s: insufficient data (%d) for %u BVDs\n",
4670 guid_str(vc->guid), update->len,
4671 vc->sec_elmnt_count);
4672 return;
4673 }
4674 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4675 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4676 break;
4677 dprintf("conf update for %s (%s)\n",
4678 guid_str(vc->guid), (vcl ? "old" : "new"));
4679 if (vcl) {
4680 /* An update, just copy the phys_refnum and lba_offset
4681 * fields
4682 */
4683 unsigned int i;
4684 unsigned int k;
4685 copy_matching_bvd(ddf, &vcl->conf, update);
4686 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4687 dprintf("BVD %u has %08x at %llu\n", 0,
4688 be32_to_cpu(vcl->conf.phys_refnum[k]),
4689 be64_to_cpu(LBA_OFFSET(ddf,
4690 &vcl->conf)[k]));
4691 for (i = 1; i < vc->sec_elmnt_count; i++) {
4692 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4693 update);
4694 for (k = 0; k < be16_to_cpu(
4695 vc->prim_elmnt_count); k++)
4696 dprintf("BVD %u has %08x at %llu\n", i,
4697 be32_to_cpu
4698 (vcl->other_bvds[i-1]->
4699 phys_refnum[k]),
4700 be64_to_cpu
4701 (LBA_OFFSET
4702 (ddf,
4703 vcl->other_bvds[i-1])[k]));
4704 }
4705 } else {
4706 /* A new VD_CONF */
4707 unsigned int i;
4708 if (!update->space)
4709 return;
4710 vcl = update->space;
4711 update->space = NULL;
4712 vcl->next = ddf->conflist;
4713 memcpy(&vcl->conf, vc, len);
4714 ent = find_vde_by_guid(ddf, vc->guid);
4715 if (ent == DDF_NOTFOUND)
4716 return;
4717 vcl->vcnum = ent;
4718 ddf->conflist = vcl;
4719 for (i = 1; i < vc->sec_elmnt_count; i++)
4720 memcpy(vcl->other_bvds[i-1],
4721 update->buf + len * i, len);
4722 }
4723 /* Set DDF_Transition on all Failed devices - to help
4724 * us detect those that are no longer in use
4725 */
4726 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4727 pdnum++)
4728 if (be16_and(ddf->phys->entries[pdnum].state,
4729 cpu_to_be16(DDF_Failed)))
4730 be16_set(ddf->phys->entries[pdnum].state,
4731 cpu_to_be16(DDF_Transition));
4732
4733 /* Now make sure vlist is correct for each dl. */
4734 for (dl = ddf->dlist; dl; dl = dl->next)
4735 ddf_update_vlist(ddf, dl);
4736 ddf_remove_failed(ddf);
4737
4738 ddf_set_updates_pending(ddf, vc);
4739 }
4740
4741 static void ddf_process_update(struct supertype *st,
4742 struct metadata_update *update)
4743 {
4744 /* Apply this update to the metadata.
4745 * The first 4 bytes are a DDF_*_MAGIC which guides
4746 * our actions.
4747 * Possible update are:
4748 * DDF_PHYS_RECORDS_MAGIC
4749 * Add a new physical device or remove an old one.
4750 * Changes to this record only happen implicitly.
4751 * used_pdes is the device number.
4752 * DDF_VIRT_RECORDS_MAGIC
4753 * Add a new VD. Possibly also change the 'access' bits.
4754 * populated_vdes is the entry number.
4755 * DDF_VD_CONF_MAGIC
4756 * New or updated VD. the VIRT_RECORD must already
4757 * exist. For an update, phys_refnum and lba_offset
4758 * (at least) are updated, and the VD_CONF must
4759 * be written to precisely those devices listed with
4760 * a phys_refnum.
4761 * DDF_SPARE_ASSIGN_MAGIC
4762 * replacement Spare Assignment Record... but for which device?
4763 *
4764 * So, e.g.:
4765 * - to create a new array, we send a VIRT_RECORD and
4766 * a VD_CONF. Then assemble and start the array.
4767 * - to activate a spare we send a VD_CONF to add the phys_refnum
4768 * and offset. This will also mark the spare as active with
4769 * a spare-assignment record.
4770 */
4771 be32 *magic = (be32 *)update->buf;
4772
4773 dprintf("Process update %x\n", be32_to_cpu(*magic));
4774
4775 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4776 if (update->len == (sizeof(struct phys_disk) +
4777 sizeof(struct phys_disk_entry)))
4778 ddf_process_phys_update(st, update);
4779 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4780 if (update->len == (sizeof(struct virtual_disk) +
4781 sizeof(struct virtual_entry)))
4782 ddf_process_virt_update(st, update);
4783 } else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4784 ddf_process_conf_update(st, update);
4785 }
4786 /* case DDF_SPARE_ASSIGN_MAGIC */
4787 }
4788
4789 static int ddf_prepare_update(struct supertype *st,
4790 struct metadata_update *update)
4791 {
4792 /* This update arrived at managemon.
4793 * We are about to pass it to monitor.
4794 * If a malloc is needed, do it here.
4795 */
4796 struct ddf_super *ddf = st->sb;
4797 be32 *magic;
4798 if (update->len < 4)
4799 return 0;
4800 magic = (be32 *)update->buf;
4801 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4802 struct vcl *vcl;
4803 struct vd_config *conf;
4804 if (update->len < (int)sizeof(*conf))
4805 return 0;
4806 conf = (struct vd_config *) update->buf;
4807 if (posix_memalign(&update->space, 512,
4808 offsetof(struct vcl, conf)
4809 + ddf->conf_rec_len * 512) != 0) {
4810 update->space = NULL;
4811 return 0;
4812 }
4813 vcl = update->space;
4814 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4815 if (alloc_other_bvds(ddf, vcl) != 0) {
4816 free(update->space);
4817 update->space = NULL;
4818 return 0;
4819 }
4820 }
4821 return 1;
4822 }
4823
4824 /*
4825 * Check degraded state of a RAID10.
4826 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4827 */
4828 static int raid10_degraded(struct mdinfo *info)
4829 {
4830 int n_prim, n_bvds;
4831 int i;
4832 struct mdinfo *d;
4833 char *found;
4834 int ret = -1;
4835
4836 n_prim = info->array.layout & ~0x100;
4837 n_bvds = info->array.raid_disks / n_prim;
4838 found = xmalloc(n_bvds);
4839 if (found == NULL)
4840 return ret;
4841 memset(found, 0, n_bvds);
4842 for (d = info->devs; d; d = d->next) {
4843 i = d->disk.raid_disk / n_prim;
4844 if (i >= n_bvds) {
4845 pr_err("BUG: invalid raid disk\n");
4846 goto out;
4847 }
4848 if (is_fd_valid(d->state_fd))
4849 found[i]++;
4850 }
4851 ret = 2;
4852 for (i = 0; i < n_bvds; i++)
4853 if (!found[i]) {
4854 dprintf("BVD %d/%d failed\n", i, n_bvds);
4855 ret = 0;
4856 goto out;
4857 } else if (found[i] < n_prim) {
4858 dprintf("BVD %d/%d degraded\n", i, n_bvds);
4859 ret = 1;
4860 }
4861 out:
4862 free(found);
4863 return ret;
4864 }
4865
4866 /*
4867 * Check if the array 'a' is degraded but not failed.
4868 * If it is, find as many spares as are available and needed and
4869 * arrange for their inclusion.
4870 * We only choose devices which are not already in the array,
4871 * and prefer those with a spare-assignment to this array.
4872 * Otherwise we choose global spares - assuming always that
4873 * there is enough room.
4874 * For each spare that we assign, we return an 'mdinfo' which
4875 * describes the position for the device in the array.
4876 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4877 * the new phys_refnum and lba_offset values.
4878 *
4879 * Only worry about BVDs at the moment.
4880 */
4881 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4882 struct metadata_update **updates)
4883 {
4884 int working = 0;
4885 struct mdinfo *d;
4886 struct ddf_super *ddf = a->container->sb;
4887 int global_ok = 0;
4888 struct mdinfo *rv = NULL;
4889 struct mdinfo *di;
4890 struct metadata_update *mu;
4891 struct dl *dl;
4892 int i;
4893 unsigned int j;
4894 struct vcl *vcl;
4895 struct vd_config *vc;
4896 unsigned int n_bvd;
4897
4898 for (d = a->info.devs ; d ; d = d->next) {
4899 if ((d->curr_state & DS_FAULTY) &&
4900 d->state_fd >= 0)
4901 /* wait for Removal to happen */
4902 return NULL;
4903 if (d->state_fd >= 0)
4904 working ++;
4905 }
4906
4907 dprintf("working=%d (%d) level=%d\n", working,
4908 a->info.array.raid_disks,
4909 a->info.array.level);
4910 if (working == a->info.array.raid_disks)
4911 return NULL; /* array not degraded */
4912 switch (a->info.array.level) {
4913 case 1:
4914 if (working == 0)
4915 return NULL; /* failed */
4916 break;
4917 case 4:
4918 case 5:
4919 if (working < a->info.array.raid_disks - 1)
4920 return NULL; /* failed */
4921 break;
4922 case 6:
4923 if (working < a->info.array.raid_disks - 2)
4924 return NULL; /* failed */
4925 break;
4926 case 10:
4927 if (raid10_degraded(&a->info) < 1)
4928 return NULL;
4929 break;
4930 default: /* concat or stripe */
4931 return NULL; /* failed */
4932 }
4933
4934 /* For each slot, if it is not working, find a spare */
4935 dl = ddf->dlist;
4936 for (i = 0; i < a->info.array.raid_disks; i++) {
4937 for (d = a->info.devs ; d ; d = d->next)
4938 if (d->disk.raid_disk == i)
4939 break;
4940 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4941 if (d && (d->state_fd >= 0))
4942 continue;
4943
4944 /* OK, this device needs recovery. Find a spare */
4945 again:
4946 for ( ; dl ; dl = dl->next) {
4947 unsigned long long esize;
4948 unsigned long long pos;
4949 struct mdinfo *d2;
4950 int is_global = 0;
4951 int is_dedicated = 0;
4952 be16 state;
4953
4954 if (dl->pdnum < 0)
4955 continue;
4956 state = ddf->phys->entries[dl->pdnum].state;
4957 if (be16_and(state,
4958 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4959 !be16_and(state,
4960 cpu_to_be16(DDF_Online)))
4961 continue;
4962
4963 /* If in this array, skip */
4964 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4965 if (d2->state_fd >= 0 &&
4966 d2->disk.major == dl->major &&
4967 d2->disk.minor == dl->minor) {
4968 dprintf("%x:%x (%08x) already in array\n",
4969 dl->major, dl->minor,
4970 be32_to_cpu(dl->disk.refnum));
4971 break;
4972 }
4973 if (d2)
4974 continue;
4975 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4976 cpu_to_be16(DDF_Spare))) {
4977 /* Check spare assign record */
4978 if (dl->spare) {
4979 if (dl->spare->type & DDF_spare_dedicated) {
4980 /* check spare_ents for guid */
4981 unsigned int j;
4982 for (j = 0 ;
4983 j < be16_to_cpu
4984 (dl->spare
4985 ->populated);
4986 j++) {
4987 if (memcmp(dl->spare->spare_ents[j].guid,
4988 ddf->virt->entries[a->info.container_member].guid,
4989 DDF_GUID_LEN) == 0)
4990 is_dedicated = 1;
4991 }
4992 } else
4993 is_global = 1;
4994 }
4995 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4996 cpu_to_be16(DDF_Global_Spare))) {
4997 is_global = 1;
4998 } else if (!be16_and(ddf->phys
4999 ->entries[dl->pdnum].state,
5000 cpu_to_be16(DDF_Failed))) {
5001 /* we can possibly use some of this */
5002 is_global = 1;
5003 }
5004 if ( ! (is_dedicated ||
5005 (is_global && global_ok))) {
5006 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5007 is_dedicated, is_global);
5008 continue;
5009 }
5010
5011 /* We are allowed to use this device - is there space?
5012 * We need a->info.component_size sectors */
5013 esize = a->info.component_size;
5014 pos = find_space(ddf, dl, INVALID_SECTORS, &esize);
5015
5016 if (esize < a->info.component_size) {
5017 dprintf("%x:%x has no room: %llu %llu\n",
5018 dl->major, dl->minor,
5019 esize, a->info.component_size);
5020 /* No room */
5021 continue;
5022 }
5023
5024 /* Cool, we have a device with some space at pos */
5025 di = xcalloc(1, sizeof(*di));
5026 di->disk.number = i;
5027 di->disk.raid_disk = i;
5028 di->disk.major = dl->major;
5029 di->disk.minor = dl->minor;
5030 di->disk.state = 0;
5031 di->recovery_start = 0;
5032 di->data_offset = pos;
5033 di->component_size = a->info.component_size;
5034 di->next = rv;
5035 rv = di;
5036 dprintf("%x:%x (%08x) to be %d at %llu\n",
5037 dl->major, dl->minor,
5038 be32_to_cpu(dl->disk.refnum), i, pos);
5039
5040 break;
5041 }
5042 if (!dl && ! global_ok) {
5043 /* not enough dedicated spares, try global */
5044 global_ok = 1;
5045 dl = ddf->dlist;
5046 goto again;
5047 }
5048 }
5049
5050 if (!rv)
5051 /* No spares found */
5052 return rv;
5053 /* Now 'rv' has a list of devices to return.
5054 * Create a metadata_update record to update the
5055 * phys_refnum and lba_offset values
5056 */
5057 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5058 &n_bvd, &vcl);
5059 if (vc == NULL) {
5060 free(rv);
5061 return NULL;
5062 }
5063
5064 mu = xmalloc(sizeof(*mu));
5065 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5066 free(mu);
5067 free(rv);
5068 return NULL;
5069 }
5070
5071 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5072 mu->buf = xmalloc(mu->len);
5073 mu->space = NULL;
5074 mu->space_list = NULL;
5075 mu->next = *updates;
5076 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5077 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5078 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5079 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5080
5081 vc = (struct vd_config*)mu->buf;
5082 for (di = rv ; di ; di = di->next) {
5083 unsigned int i_sec, i_prim;
5084 i_sec = di->disk.raid_disk
5085 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5086 i_prim = di->disk.raid_disk
5087 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5088 vc = (struct vd_config *)(mu->buf
5089 + i_sec * ddf->conf_rec_len * 512);
5090 for (dl = ddf->dlist; dl; dl = dl->next)
5091 if (dl->major == di->disk.major &&
5092 dl->minor == di->disk.minor)
5093 break;
5094 if (!dl || dl->pdnum < 0) {
5095 pr_err("BUG: can't find disk %d (%d/%d)\n",
5096 di->disk.raid_disk,
5097 di->disk.major, di->disk.minor);
5098 free(mu);
5099 free(rv);
5100 return NULL;
5101 }
5102 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5103 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5104 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5105 be32_to_cpu(vc->phys_refnum[i_prim]),
5106 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5107 }
5108 *updates = mu;
5109 return rv;
5110 }
5111
5112 static int ddf_level_to_layout(int level)
5113 {
5114 switch(level) {
5115 case 0:
5116 case 1:
5117 return 0;
5118 case 5:
5119 return ALGORITHM_LEFT_SYMMETRIC;
5120 case 6:
5121 return ALGORITHM_ROTATING_N_CONTINUE;
5122 case 10:
5123 return 0x102;
5124 default:
5125 return UnSet;
5126 }
5127 }
5128
5129 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5130 {
5131 if (level && *level == UnSet)
5132 *level = LEVEL_CONTAINER;
5133
5134 if (level && layout && *layout == UnSet)
5135 *layout = ddf_level_to_layout(*level);
5136 }
5137
5138 struct superswitch super_ddf = {
5139 .examine_super = examine_super_ddf,
5140 .brief_examine_super = brief_examine_super_ddf,
5141 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5142 .export_examine_super = export_examine_super_ddf,
5143 .detail_super = detail_super_ddf,
5144 .brief_detail_super = brief_detail_super_ddf,
5145 .validate_geometry = validate_geometry_ddf,
5146 .write_init_super = write_init_super_ddf,
5147 .add_to_super = add_to_super_ddf,
5148 .remove_from_super = remove_from_super_ddf,
5149 .load_container = load_container_ddf,
5150 .copy_metadata = copy_metadata_ddf,
5151 .kill_subarray = kill_subarray_ddf,
5152 .match_home = match_home_ddf,
5153 .uuid_from_super= uuid_from_super_ddf,
5154 .getinfo_super = getinfo_super_ddf,
5155
5156 .avail_size = avail_size_ddf,
5157
5158 .compare_super = compare_super_ddf,
5159
5160 .load_super = load_super_ddf,
5161 .init_super = init_super_ddf,
5162 .store_super = store_super_ddf,
5163 .free_super = free_super_ddf,
5164 .match_metadata_desc = match_metadata_desc_ddf,
5165 .container_content = container_content_ddf,
5166 .default_geometry = default_geometry_ddf,
5167
5168 .external = 1,
5169 .swapuuid = 0,
5170
5171 /* for mdmon */
5172 .open_new = ddf_open_new,
5173 .set_array_state= ddf_set_array_state,
5174 .set_disk = ddf_set_disk,
5175 .sync_metadata = ddf_sync_metadata,
5176 .process_update = ddf_process_update,
5177 .prepare_update = ddf_prepare_update,
5178 .activate_spare = ddf_activate_spare,
5179 .name = "ddf",
5180 };