]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
Create.c: fix uclibc build
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33 #include <stddef.h>
34
35 /* a non-official T10 name for creation GUIDs */
36 static char T10[] = "Linux-MD";
37
38 /* DDF timestamps are 1980 based, so we need to add
39 * second-in-decade-of-seventies to convert to linux timestamps.
40 * 10 years with 2 leap years.
41 */
42 #define DECADE (3600*24*(365*10+2))
43 unsigned long crc32(
44 unsigned long crc,
45 const unsigned char *buf,
46 unsigned len);
47
48 #define DDF_NOTFOUND (~0U)
49 #define DDF_CONTAINER (DDF_NOTFOUND-1)
50
51 /* Default for safe_mode_delay. Same value as for IMSM.
52 */
53 static const int DDF_SAFE_MODE_DELAY = 4000;
54
55 /* The DDF metadata handling.
56 * DDF metadata lives at the end of the device.
57 * The last 512 byte block provides an 'anchor' which is used to locate
58 * the rest of the metadata which usually lives immediately behind the anchor.
59 *
60 * Note:
61 * - all multibyte numeric fields are bigendian.
62 * - all strings are space padded.
63 *
64 */
65
66 typedef struct __be16 {
67 __u16 _v16;
68 } be16;
69 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
70 #define be16_and(x, y) ((x)._v16 & (y)._v16)
71 #define be16_or(x, y) ((x)._v16 | (y)._v16)
72 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
73 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
74
75 typedef struct __be32 {
76 __u32 _v32;
77 } be32;
78 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
79
80 typedef struct __be64 {
81 __u64 _v64;
82 } be64;
83 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
84
85 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
86 static inline be16 cpu_to_be16(__u16 x)
87 {
88 be16 be = { ._v16 = __cpu_to_be16(x) };
89 return be;
90 }
91
92 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
93 static inline be32 cpu_to_be32(__u32 x)
94 {
95 be32 be = { ._v32 = __cpu_to_be32(x) };
96 return be;
97 }
98
99 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
100 static inline be64 cpu_to_be64(__u64 x)
101 {
102 be64 be = { ._v64 = __cpu_to_be64(x) };
103 return be;
104 }
105
106 /* Primary Raid Level (PRL) */
107 #define DDF_RAID0 0x00
108 #define DDF_RAID1 0x01
109 #define DDF_RAID3 0x03
110 #define DDF_RAID4 0x04
111 #define DDF_RAID5 0x05
112 #define DDF_RAID1E 0x11
113 #define DDF_JBOD 0x0f
114 #define DDF_CONCAT 0x1f
115 #define DDF_RAID5E 0x15
116 #define DDF_RAID5EE 0x25
117 #define DDF_RAID6 0x06
118
119 /* Raid Level Qualifier (RLQ) */
120 #define DDF_RAID0_SIMPLE 0x00
121 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
122 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
123 #define DDF_RAID3_0 0x00 /* parity in first extent */
124 #define DDF_RAID3_N 0x01 /* parity in last extent */
125 #define DDF_RAID4_0 0x00 /* parity in first extent */
126 #define DDF_RAID4_N 0x01 /* parity in last extent */
127 /* these apply to raid5e and raid5ee as well */
128 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
129 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
130 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
131 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
132
133 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
134 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
135
136 /* Secondary RAID Level (SRL) */
137 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
138 #define DDF_2MIRRORED 0x01
139 #define DDF_2CONCAT 0x02
140 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
141
142 /* Magic numbers */
143 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
144 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
145 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
146 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
147 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
148 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
149 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
150 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
151 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
152 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
153
154 #define DDF_GUID_LEN 24
155 #define DDF_REVISION_0 "01.00.00"
156 #define DDF_REVISION_2 "01.02.00"
157
158 struct ddf_header {
159 be32 magic; /* DDF_HEADER_MAGIC */
160 be32 crc;
161 char guid[DDF_GUID_LEN];
162 char revision[8]; /* 01.02.00 */
163 be32 seq; /* starts at '1' */
164 be32 timestamp;
165 __u8 openflag;
166 __u8 foreignflag;
167 __u8 enforcegroups;
168 __u8 pad0; /* 0xff */
169 __u8 pad1[12]; /* 12 * 0xff */
170 /* 64 bytes so far */
171 __u8 header_ext[32]; /* reserved: fill with 0xff */
172 be64 primary_lba;
173 be64 secondary_lba;
174 __u8 type;
175 __u8 pad2[3]; /* 0xff */
176 be32 workspace_len; /* sectors for vendor space -
177 * at least 32768(sectors) */
178 be64 workspace_lba;
179 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
180 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
181 be16 max_partitions; /* i.e. max num of configuration
182 record entries per disk */
183 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
184 *12/512) */
185 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
186 __u8 pad3[54]; /* 0xff */
187 /* 192 bytes so far */
188 be32 controller_section_offset;
189 be32 controller_section_length;
190 be32 phys_section_offset;
191 be32 phys_section_length;
192 be32 virt_section_offset;
193 be32 virt_section_length;
194 be32 config_section_offset;
195 be32 config_section_length;
196 be32 data_section_offset;
197 be32 data_section_length;
198 be32 bbm_section_offset;
199 be32 bbm_section_length;
200 be32 diag_space_offset;
201 be32 diag_space_length;
202 be32 vendor_offset;
203 be32 vendor_length;
204 /* 256 bytes so far */
205 __u8 pad4[256]; /* 0xff */
206 };
207
208 /* type field */
209 #define DDF_HEADER_ANCHOR 0x00
210 #define DDF_HEADER_PRIMARY 0x01
211 #define DDF_HEADER_SECONDARY 0x02
212
213 /* The content of the 'controller section' - global scope */
214 struct ddf_controller_data {
215 be32 magic; /* DDF_CONTROLLER_MAGIC */
216 be32 crc;
217 char guid[DDF_GUID_LEN];
218 struct controller_type {
219 be16 vendor_id;
220 be16 device_id;
221 be16 sub_vendor_id;
222 be16 sub_device_id;
223 } type;
224 char product_id[16];
225 __u8 pad[8]; /* 0xff */
226 __u8 vendor_data[448];
227 };
228
229 /* The content of phys_section - global scope */
230 struct phys_disk {
231 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
232 be32 crc;
233 be16 used_pdes; /* This is a counter, not a max - the list
234 * of used entries may not be dense */
235 be16 max_pdes;
236 __u8 pad[52];
237 struct phys_disk_entry {
238 char guid[DDF_GUID_LEN];
239 be32 refnum;
240 be16 type;
241 be16 state;
242 be64 config_size; /* DDF structures must be after here */
243 char path[18]; /* Another horrible structure really
244 * but is "used for information
245 * purposes only" */
246 __u8 pad[6];
247 } entries[0];
248 };
249
250 /* phys_disk_entry.type is a bitmap - bigendian remember */
251 #define DDF_Forced_PD_GUID 1
252 #define DDF_Active_in_VD 2
253 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
254 #define DDF_Spare 8 /* overrides Global_spare */
255 #define DDF_Foreign 16
256 #define DDF_Legacy 32 /* no DDF on this device */
257
258 #define DDF_Interface_mask 0xf00
259 #define DDF_Interface_SCSI 0x100
260 #define DDF_Interface_SAS 0x200
261 #define DDF_Interface_SATA 0x300
262 #define DDF_Interface_FC 0x400
263
264 /* phys_disk_entry.state is a bigendian bitmap */
265 #define DDF_Online 1
266 #define DDF_Failed 2 /* overrides 1,4,8 */
267 #define DDF_Rebuilding 4
268 #define DDF_Transition 8
269 #define DDF_SMART 16
270 #define DDF_ReadErrors 32
271 #define DDF_Missing 64
272
273 /* The content of the virt_section global scope */
274 struct virtual_disk {
275 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
276 be32 crc;
277 be16 populated_vdes;
278 be16 max_vdes;
279 __u8 pad[52];
280 struct virtual_entry {
281 char guid[DDF_GUID_LEN];
282 be16 unit;
283 __u16 pad0; /* 0xffff */
284 be16 guid_crc;
285 be16 type;
286 __u8 state;
287 __u8 init_state;
288 __u8 pad1[14];
289 char name[16];
290 } entries[0];
291 };
292
293 /* virtual_entry.type is a bitmap - bigendian */
294 #define DDF_Shared 1
295 #define DDF_Enforce_Groups 2
296 #define DDF_Unicode 4
297 #define DDF_Owner_Valid 8
298
299 /* virtual_entry.state is a bigendian bitmap */
300 #define DDF_state_mask 0x7
301 #define DDF_state_optimal 0x0
302 #define DDF_state_degraded 0x1
303 #define DDF_state_deleted 0x2
304 #define DDF_state_missing 0x3
305 #define DDF_state_failed 0x4
306 #define DDF_state_part_optimal 0x5
307
308 #define DDF_state_morphing 0x8
309 #define DDF_state_inconsistent 0x10
310
311 /* virtual_entry.init_state is a bigendian bitmap */
312 #define DDF_initstate_mask 0x03
313 #define DDF_init_not 0x00
314 #define DDF_init_quick 0x01 /* initialisation is progress.
315 * i.e. 'state_inconsistent' */
316 #define DDF_init_full 0x02
317
318 #define DDF_access_mask 0xc0
319 #define DDF_access_rw 0x00
320 #define DDF_access_ro 0x80
321 #define DDF_access_blocked 0xc0
322
323 /* The content of the config_section - local scope
324 * It has multiple records each config_record_len sectors
325 * They can be vd_config or spare_assign
326 */
327
328 struct vd_config {
329 be32 magic; /* DDF_VD_CONF_MAGIC */
330 be32 crc;
331 char guid[DDF_GUID_LEN];
332 be32 timestamp;
333 be32 seqnum;
334 __u8 pad0[24];
335 be16 prim_elmnt_count;
336 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
337 __u8 prl;
338 __u8 rlq;
339 __u8 sec_elmnt_count;
340 __u8 sec_elmnt_seq;
341 __u8 srl;
342 be64 blocks; /* blocks per component could be different
343 * on different component devices...(only
344 * for concat I hope) */
345 be64 array_blocks; /* blocks in array */
346 __u8 pad1[8];
347 be32 spare_refs[8]; /* This is used to detect missing spares.
348 * As we don't have an interface for that
349 * the values are ignored.
350 */
351 __u8 cache_pol[8];
352 __u8 bg_rate;
353 __u8 pad2[3];
354 __u8 pad3[52];
355 __u8 pad4[192];
356 __u8 v0[32]; /* reserved- 0xff */
357 __u8 v1[32]; /* reserved- 0xff */
358 __u8 v2[16]; /* reserved- 0xff */
359 __u8 v3[16]; /* reserved- 0xff */
360 __u8 vendor[32];
361 be32 phys_refnum[0]; /* refnum of each disk in sequence */
362 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
363 bvd are always the same size */
364 };
365 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
366
367 /* vd_config.cache_pol[7] is a bitmap */
368 #define DDF_cache_writeback 1 /* else writethrough */
369 #define DDF_cache_wadaptive 2 /* only applies if writeback */
370 #define DDF_cache_readahead 4
371 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
372 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
373 #define DDF_cache_wallowed 32 /* enable write caching */
374 #define DDF_cache_rallowed 64 /* enable read caching */
375
376 struct spare_assign {
377 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
378 be32 crc;
379 be32 timestamp;
380 __u8 reserved[7];
381 __u8 type;
382 be16 populated; /* SAEs used */
383 be16 max; /* max SAEs */
384 __u8 pad[8];
385 struct spare_assign_entry {
386 char guid[DDF_GUID_LEN];
387 be16 secondary_element;
388 __u8 pad[6];
389 } spare_ents[0];
390 };
391 /* spare_assign.type is a bitmap */
392 #define DDF_spare_dedicated 0x1 /* else global */
393 #define DDF_spare_revertible 0x2 /* else committable */
394 #define DDF_spare_active 0x4 /* else not active */
395 #define DDF_spare_affinity 0x8 /* enclosure affinity */
396
397 /* The data_section contents - local scope */
398 struct disk_data {
399 be32 magic; /* DDF_PHYS_DATA_MAGIC */
400 be32 crc;
401 char guid[DDF_GUID_LEN];
402 be32 refnum; /* crc of some magic drive data ... */
403 __u8 forced_ref; /* set when above was not result of magic */
404 __u8 forced_guid; /* set if guid was forced rather than magic */
405 __u8 vendor[32];
406 __u8 pad[442];
407 };
408
409 /* bbm_section content */
410 struct bad_block_log {
411 be32 magic;
412 be32 crc;
413 be16 entry_count;
414 be32 spare_count;
415 __u8 pad[10];
416 be64 first_spare;
417 struct mapped_block {
418 be64 defective_start;
419 be32 replacement_start;
420 be16 remap_count;
421 __u8 pad[2];
422 } entries[0];
423 };
424
425 /* Struct for internally holding ddf structures */
426 /* The DDF structure stored on each device is potentially
427 * quite different, as some data is global and some is local.
428 * The global data is:
429 * - ddf header
430 * - controller_data
431 * - Physical disk records
432 * - Virtual disk records
433 * The local data is:
434 * - Configuration records
435 * - Physical Disk data section
436 * ( and Bad block and vendor which I don't care about yet).
437 *
438 * The local data is parsed into separate lists as it is read
439 * and reconstructed for writing. This means that we only need
440 * to make config changes once and they are automatically
441 * propagated to all devices.
442 * The global (config and disk data) records are each in a list
443 * of separate data structures. When writing we find the entry
444 * or entries applicable to the particular device.
445 */
446 struct ddf_super {
447 struct ddf_header anchor, primary, secondary;
448 struct ddf_controller_data controller;
449 struct ddf_header *active;
450 struct phys_disk *phys;
451 struct virtual_disk *virt;
452 char *conf;
453 int pdsize, vdsize;
454 unsigned int max_part, mppe, conf_rec_len;
455 int currentdev;
456 int updates_pending;
457 struct vcl {
458 union {
459 char space[512];
460 struct {
461 struct vcl *next;
462 unsigned int vcnum; /* index into ->virt */
463 /* For an array with a secondary level there are
464 * multiple vd_config structures, all with the same
465 * guid but with different sec_elmnt_seq.
466 * One of these structures is in 'conf' below.
467 * The others are in other_bvds, not in any
468 * particular order.
469 */
470 struct vd_config **other_bvds;
471 __u64 *block_sizes; /* NULL if all the same */
472 };
473 };
474 struct vd_config conf;
475 } *conflist, *currentconf;
476 struct dl {
477 union {
478 char space[512];
479 struct {
480 struct dl *next;
481 int major, minor;
482 char *devname;
483 int fd;
484 unsigned long long size; /* sectors */
485 be64 primary_lba; /* sectors */
486 be64 secondary_lba; /* sectors */
487 be64 workspace_lba; /* sectors */
488 int pdnum; /* index in ->phys */
489 struct spare_assign *spare;
490 void *mdupdate; /* hold metadata update */
491
492 /* These fields used by auto-layout */
493 int raiddisk; /* slot to fill in autolayout */
494 __u64 esize;
495 int displayed;
496 };
497 };
498 struct disk_data disk;
499 struct vcl *vlist[0]; /* max_part in size */
500 } *dlist, *add_list;
501 };
502
503 static int load_super_ddf_all(struct supertype *st, int fd,
504 void **sbp, char *devname);
505 static int get_svd_state(const struct ddf_super *, const struct vcl *);
506
507 static int validate_geometry_ddf_bvd(struct supertype *st,
508 int level, int layout, int raiddisks,
509 int *chunk, unsigned long long size,
510 unsigned long long data_offset,
511 char *dev, unsigned long long *freesize,
512 int verbose);
513
514 static void free_super_ddf(struct supertype *st);
515 static int all_ff(const char *guid);
516 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
517 be32 refnum, unsigned int nmax,
518 const struct vd_config **bvd,
519 unsigned int *idx);
520 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
521 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
522 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
523 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
524 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
525 static int init_super_ddf_bvd(struct supertype *st,
526 mdu_array_info_t *info,
527 unsigned long long size,
528 char *name, char *homehost,
529 int *uuid, unsigned long long data_offset);
530
531 #if DEBUG
532 static void pr_state(struct ddf_super *ddf, const char *msg)
533 {
534 unsigned int i;
535 dprintf("%s: ", msg);
536 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
537 if (all_ff(ddf->virt->entries[i].guid))
538 continue;
539 dprintf_cont("%u(s=%02x i=%02x) ", i,
540 ddf->virt->entries[i].state,
541 ddf->virt->entries[i].init_state);
542 }
543 dprintf_cont("\n");
544 }
545 #else
546 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
547 #endif
548
549 static void _ddf_set_updates_pending(struct ddf_super *ddf, struct vd_config *vc,
550 const char *func)
551 {
552 if (vc) {
553 vc->timestamp = cpu_to_be32(time(0)-DECADE);
554 vc->seqnum = cpu_to_be32(be32_to_cpu(vc->seqnum) + 1);
555 }
556 if (ddf->updates_pending)
557 return;
558 ddf->updates_pending = 1;
559 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
560 pr_state(ddf, func);
561 }
562
563 #define ddf_set_updates_pending(x,v) _ddf_set_updates_pending((x), (v), __func__)
564
565 static be32 calc_crc(void *buf, int len)
566 {
567 /* crcs are always at the same place as in the ddf_header */
568 struct ddf_header *ddf = buf;
569 be32 oldcrc = ddf->crc;
570 __u32 newcrc;
571 ddf->crc = cpu_to_be32(0xffffffff);
572
573 newcrc = crc32(0, buf, len);
574 ddf->crc = oldcrc;
575 /* The crc is stored (like everything) bigendian, so convert
576 * here for simplicity
577 */
578 return cpu_to_be32(newcrc);
579 }
580
581 #define DDF_INVALID_LEVEL 0xff
582 #define DDF_NO_SECONDARY 0xff
583 static int err_bad_md_layout(const mdu_array_info_t *array)
584 {
585 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
586 array->level, array->layout, array->raid_disks);
587 return -1;
588 }
589
590 static int layout_md2ddf(const mdu_array_info_t *array,
591 struct vd_config *conf)
592 {
593 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
594 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
595 __u8 sec_elmnt_count = 1;
596 __u8 srl = DDF_NO_SECONDARY;
597
598 switch (array->level) {
599 case LEVEL_LINEAR:
600 prl = DDF_CONCAT;
601 break;
602 case 0:
603 rlq = DDF_RAID0_SIMPLE;
604 prl = DDF_RAID0;
605 break;
606 case 1:
607 switch (array->raid_disks) {
608 case 2:
609 rlq = DDF_RAID1_SIMPLE;
610 break;
611 case 3:
612 rlq = DDF_RAID1_MULTI;
613 break;
614 default:
615 return err_bad_md_layout(array);
616 }
617 prl = DDF_RAID1;
618 break;
619 case 4:
620 if (array->layout != 0)
621 return err_bad_md_layout(array);
622 rlq = DDF_RAID4_N;
623 prl = DDF_RAID4;
624 break;
625 case 5:
626 switch (array->layout) {
627 case ALGORITHM_LEFT_ASYMMETRIC:
628 rlq = DDF_RAID5_N_RESTART;
629 break;
630 case ALGORITHM_RIGHT_ASYMMETRIC:
631 rlq = DDF_RAID5_0_RESTART;
632 break;
633 case ALGORITHM_LEFT_SYMMETRIC:
634 rlq = DDF_RAID5_N_CONTINUE;
635 break;
636 case ALGORITHM_RIGHT_SYMMETRIC:
637 /* not mentioned in standard */
638 default:
639 return err_bad_md_layout(array);
640 }
641 prl = DDF_RAID5;
642 break;
643 case 6:
644 switch (array->layout) {
645 case ALGORITHM_ROTATING_N_RESTART:
646 rlq = DDF_RAID5_N_RESTART;
647 break;
648 case ALGORITHM_ROTATING_ZERO_RESTART:
649 rlq = DDF_RAID6_0_RESTART;
650 break;
651 case ALGORITHM_ROTATING_N_CONTINUE:
652 rlq = DDF_RAID5_N_CONTINUE;
653 break;
654 default:
655 return err_bad_md_layout(array);
656 }
657 prl = DDF_RAID6;
658 break;
659 case 10:
660 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
661 rlq = DDF_RAID1_SIMPLE;
662 prim_elmnt_count = cpu_to_be16(2);
663 sec_elmnt_count = array->raid_disks / 2;
664 srl = DDF_2SPANNED;
665 prl = DDF_RAID1;
666 } else if (array->raid_disks % 3 == 0 &&
667 array->layout == 0x103) {
668 rlq = DDF_RAID1_MULTI;
669 prim_elmnt_count = cpu_to_be16(3);
670 sec_elmnt_count = array->raid_disks / 3;
671 srl = DDF_2SPANNED;
672 prl = DDF_RAID1;
673 } else if (array->layout == 0x201) {
674 prl = DDF_RAID1E;
675 rlq = DDF_RAID1E_OFFSET;
676 } else if (array->layout == 0x102) {
677 prl = DDF_RAID1E;
678 rlq = DDF_RAID1E_ADJACENT;
679 } else
680 return err_bad_md_layout(array);
681 break;
682 default:
683 return err_bad_md_layout(array);
684 }
685 conf->prl = prl;
686 conf->prim_elmnt_count = prim_elmnt_count;
687 conf->rlq = rlq;
688 conf->srl = srl;
689 conf->sec_elmnt_count = sec_elmnt_count;
690 return 0;
691 }
692
693 static int err_bad_ddf_layout(const struct vd_config *conf)
694 {
695 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
696 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
697 return -1;
698 }
699
700 static int layout_ddf2md(const struct vd_config *conf,
701 mdu_array_info_t *array)
702 {
703 int level = LEVEL_UNSUPPORTED;
704 int layout = 0;
705 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
706
707 if (conf->sec_elmnt_count > 1) {
708 /* see also check_secondary() */
709 if (conf->prl != DDF_RAID1 ||
710 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
711 pr_err("Unsupported secondary RAID level %u/%u\n",
712 conf->prl, conf->srl);
713 return -1;
714 }
715 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
716 layout = 0x102;
717 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
718 layout = 0x103;
719 else
720 return err_bad_ddf_layout(conf);
721 raiddisks *= conf->sec_elmnt_count;
722 level = 10;
723 goto good;
724 }
725
726 switch (conf->prl) {
727 case DDF_CONCAT:
728 level = LEVEL_LINEAR;
729 break;
730 case DDF_RAID0:
731 if (conf->rlq != DDF_RAID0_SIMPLE)
732 return err_bad_ddf_layout(conf);
733 level = 0;
734 break;
735 case DDF_RAID1:
736 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
737 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
738 return err_bad_ddf_layout(conf);
739 level = 1;
740 break;
741 case DDF_RAID1E:
742 if (conf->rlq == DDF_RAID1E_ADJACENT)
743 layout = 0x102;
744 else if (conf->rlq == DDF_RAID1E_OFFSET)
745 layout = 0x201;
746 else
747 return err_bad_ddf_layout(conf);
748 level = 10;
749 break;
750 case DDF_RAID4:
751 if (conf->rlq != DDF_RAID4_N)
752 return err_bad_ddf_layout(conf);
753 level = 4;
754 break;
755 case DDF_RAID5:
756 switch (conf->rlq) {
757 case DDF_RAID5_N_RESTART:
758 layout = ALGORITHM_LEFT_ASYMMETRIC;
759 break;
760 case DDF_RAID5_0_RESTART:
761 layout = ALGORITHM_RIGHT_ASYMMETRIC;
762 break;
763 case DDF_RAID5_N_CONTINUE:
764 layout = ALGORITHM_LEFT_SYMMETRIC;
765 break;
766 default:
767 return err_bad_ddf_layout(conf);
768 }
769 level = 5;
770 break;
771 case DDF_RAID6:
772 switch (conf->rlq) {
773 case DDF_RAID5_N_RESTART:
774 layout = ALGORITHM_ROTATING_N_RESTART;
775 break;
776 case DDF_RAID6_0_RESTART:
777 layout = ALGORITHM_ROTATING_ZERO_RESTART;
778 break;
779 case DDF_RAID5_N_CONTINUE:
780 layout = ALGORITHM_ROTATING_N_CONTINUE;
781 break;
782 default:
783 return err_bad_ddf_layout(conf);
784 }
785 level = 6;
786 break;
787 default:
788 return err_bad_ddf_layout(conf);
789 };
790
791 good:
792 array->level = level;
793 array->layout = layout;
794 array->raid_disks = raiddisks;
795 return 0;
796 }
797
798 static int load_ddf_header(int fd, unsigned long long lba,
799 unsigned long long size,
800 int type,
801 struct ddf_header *hdr, struct ddf_header *anchor)
802 {
803 /* read a ddf header (primary or secondary) from fd/lba
804 * and check that it is consistent with anchor
805 * Need to check:
806 * magic, crc, guid, rev, and LBA's header_type, and
807 * everything after header_type must be the same
808 */
809 if (lba >= size-1)
810 return 0;
811
812 if (lseek64(fd, lba<<9, 0) < 0)
813 return 0;
814
815 if (read(fd, hdr, 512) != 512)
816 return 0;
817
818 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
819 pr_err("bad header magic\n");
820 return 0;
821 }
822 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
823 pr_err("bad CRC\n");
824 return 0;
825 }
826 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
827 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
828 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
829 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
830 hdr->type != type ||
831 memcmp(anchor->pad2, hdr->pad2, 512 -
832 offsetof(struct ddf_header, pad2)) != 0) {
833 pr_err("header mismatch\n");
834 return 0;
835 }
836
837 /* Looks good enough to me... */
838 return 1;
839 }
840
841 static void *load_section(int fd, struct ddf_super *super, void *buf,
842 be32 offset_be, be32 len_be, int check)
843 {
844 unsigned long long offset = be32_to_cpu(offset_be);
845 unsigned long long len = be32_to_cpu(len_be);
846 int dofree = (buf == NULL);
847
848 if (check)
849 if (len != 2 && len != 8 && len != 32 &&
850 len != 128 && len != 512)
851 return NULL;
852
853 if (len > 1024)
854 return NULL;
855 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
856 buf = NULL;
857
858 if (!buf)
859 return NULL;
860
861 if (super->active->type == 1)
862 offset += be64_to_cpu(super->active->primary_lba);
863 else
864 offset += be64_to_cpu(super->active->secondary_lba);
865
866 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
867 if (dofree)
868 free(buf);
869 return NULL;
870 }
871 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
872 if (dofree)
873 free(buf);
874 return NULL;
875 }
876 return buf;
877 }
878
879 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
880 {
881 unsigned long long dsize;
882
883 get_dev_size(fd, NULL, &dsize);
884
885 if (lseek64(fd, dsize-512, 0) < 0) {
886 if (devname)
887 pr_err("Cannot seek to anchor block on %s: %s\n",
888 devname, strerror(errno));
889 return 1;
890 }
891 if (read(fd, &super->anchor, 512) != 512) {
892 if (devname)
893 pr_err("Cannot read anchor block on %s: %s\n",
894 devname, strerror(errno));
895 return 1;
896 }
897 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
898 if (devname)
899 pr_err("no DDF anchor found on %s\n",
900 devname);
901 return 2;
902 }
903 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
904 if (devname)
905 pr_err("bad CRC on anchor on %s\n",
906 devname);
907 return 2;
908 }
909 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
910 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
911 if (devname)
912 pr_err("can only support super revision %.8s and earlier, not %.8s on %s\n",
913 DDF_REVISION_2, super->anchor.revision,devname);
914 return 2;
915 }
916 super->active = NULL;
917 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
918 dsize >> 9, 1,
919 &super->primary, &super->anchor) == 0) {
920 if (devname)
921 pr_err("Failed to load primary DDF header on %s\n", devname);
922 } else
923 super->active = &super->primary;
924
925 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
926 dsize >> 9, 2,
927 &super->secondary, &super->anchor)) {
928 if (super->active == NULL ||
929 (be32_to_cpu(super->primary.seq)
930 < be32_to_cpu(super->secondary.seq) &&
931 !super->secondary.openflag) ||
932 (be32_to_cpu(super->primary.seq) ==
933 be32_to_cpu(super->secondary.seq) &&
934 super->primary.openflag && !super->secondary.openflag))
935 super->active = &super->secondary;
936 } else if (devname &&
937 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
938 pr_err("Failed to load secondary DDF header on %s\n",
939 devname);
940 if (super->active == NULL)
941 return 2;
942 return 0;
943 }
944
945 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
946 {
947 void *ok;
948 ok = load_section(fd, super, &super->controller,
949 super->active->controller_section_offset,
950 super->active->controller_section_length,
951 0);
952 super->phys = load_section(fd, super, NULL,
953 super->active->phys_section_offset,
954 super->active->phys_section_length,
955 1);
956 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
957
958 super->virt = load_section(fd, super, NULL,
959 super->active->virt_section_offset,
960 super->active->virt_section_length,
961 1);
962 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
963 if (!ok ||
964 !super->phys ||
965 !super->virt) {
966 free(super->phys);
967 free(super->virt);
968 super->phys = NULL;
969 super->virt = NULL;
970 return 2;
971 }
972 super->conflist = NULL;
973 super->dlist = NULL;
974
975 super->max_part = be16_to_cpu(super->active->max_partitions);
976 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
977 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
978 return 0;
979 }
980
981 #define DDF_UNUSED_BVD 0xff
982 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
983 {
984 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
985 unsigned int i, vdsize;
986 void *p;
987 if (n_vds == 0) {
988 vcl->other_bvds = NULL;
989 return 0;
990 }
991 vdsize = ddf->conf_rec_len * 512;
992 if (posix_memalign(&p, 512, n_vds *
993 (vdsize + sizeof(struct vd_config *))) != 0)
994 return -1;
995 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
996 for (i = 0; i < n_vds; i++) {
997 vcl->other_bvds[i] = p + i * vdsize;
998 memset(vcl->other_bvds[i], 0, vdsize);
999 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
1000 }
1001 return 0;
1002 }
1003
1004 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
1005 unsigned int len)
1006 {
1007 int i;
1008 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1009 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
1010 break;
1011
1012 if (i < vcl->conf.sec_elmnt_count-1) {
1013 if (be32_to_cpu(vd->seqnum) <=
1014 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1015 return;
1016 } else {
1017 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1018 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1019 break;
1020 if (i == vcl->conf.sec_elmnt_count-1) {
1021 pr_err("no space for sec level config %u, count is %u\n",
1022 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1023 return;
1024 }
1025 }
1026 memcpy(vcl->other_bvds[i], vd, len);
1027 }
1028
1029 static int load_ddf_local(int fd, struct ddf_super *super,
1030 char *devname, int keep)
1031 {
1032 struct dl *dl;
1033 struct stat stb;
1034 char *conf;
1035 unsigned int i;
1036 unsigned int confsec;
1037 int vnum;
1038 unsigned int max_virt_disks =
1039 be16_to_cpu(super->active->max_vd_entries);
1040 unsigned long long dsize;
1041
1042 /* First the local disk info */
1043 if (posix_memalign((void**)&dl, 512,
1044 sizeof(*dl) +
1045 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1046 pr_err("could not allocate disk info buffer\n");
1047 return 1;
1048 }
1049
1050 load_section(fd, super, &dl->disk,
1051 super->active->data_section_offset,
1052 super->active->data_section_length,
1053 0);
1054 dl->devname = devname ? xstrdup(devname) : NULL;
1055
1056 fstat(fd, &stb);
1057 dl->major = major(stb.st_rdev);
1058 dl->minor = minor(stb.st_rdev);
1059 dl->next = super->dlist;
1060 dl->fd = keep ? fd : -1;
1061
1062 dl->size = 0;
1063 if (get_dev_size(fd, devname, &dsize))
1064 dl->size = dsize >> 9;
1065 /* If the disks have different sizes, the LBAs will differ
1066 * between phys disks.
1067 * At this point here, the values in super->active must be valid
1068 * for this phys disk. */
1069 dl->primary_lba = super->active->primary_lba;
1070 dl->secondary_lba = super->active->secondary_lba;
1071 dl->workspace_lba = super->active->workspace_lba;
1072 dl->spare = NULL;
1073 for (i = 0 ; i < super->max_part ; i++)
1074 dl->vlist[i] = NULL;
1075 super->dlist = dl;
1076 dl->pdnum = -1;
1077 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1078 if (memcmp(super->phys->entries[i].guid,
1079 dl->disk.guid, DDF_GUID_LEN) == 0)
1080 dl->pdnum = i;
1081
1082 /* Now the config list. */
1083 /* 'conf' is an array of config entries, some of which are
1084 * probably invalid. Those which are good need to be copied into
1085 * the conflist
1086 */
1087
1088 conf = load_section(fd, super, super->conf,
1089 super->active->config_section_offset,
1090 super->active->config_section_length,
1091 0);
1092 super->conf = conf;
1093 vnum = 0;
1094 for (confsec = 0;
1095 confsec < be32_to_cpu(super->active->config_section_length);
1096 confsec += super->conf_rec_len) {
1097 struct vd_config *vd =
1098 (struct vd_config *)((char*)conf + confsec*512);
1099 struct vcl *vcl;
1100
1101 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1102 if (dl->spare)
1103 continue;
1104 if (posix_memalign((void**)&dl->spare, 512,
1105 super->conf_rec_len*512) != 0) {
1106 pr_err("could not allocate spare info buf\n");
1107 return 1;
1108 }
1109
1110 memcpy(dl->spare, vd, super->conf_rec_len*512);
1111 continue;
1112 }
1113 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1114 /* Must be vendor-unique - I cannot handle those */
1115 continue;
1116
1117 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1118 if (memcmp(vcl->conf.guid,
1119 vd->guid, DDF_GUID_LEN) == 0)
1120 break;
1121 }
1122
1123 if (vcl) {
1124 dl->vlist[vnum++] = vcl;
1125 if (vcl->other_bvds != NULL &&
1126 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1127 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1128 continue;
1129 }
1130 if (be32_to_cpu(vd->seqnum) <=
1131 be32_to_cpu(vcl->conf.seqnum))
1132 continue;
1133 } else {
1134 if (posix_memalign((void**)&vcl, 512,
1135 (super->conf_rec_len*512 +
1136 offsetof(struct vcl, conf))) != 0) {
1137 pr_err("could not allocate vcl buf\n");
1138 return 1;
1139 }
1140 vcl->next = super->conflist;
1141 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1142 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1143 if (alloc_other_bvds(super, vcl) != 0) {
1144 pr_err("could not allocate other bvds\n");
1145 free(vcl);
1146 return 1;
1147 };
1148 super->conflist = vcl;
1149 dl->vlist[vnum++] = vcl;
1150 }
1151 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1152 for (i=0; i < max_virt_disks ; i++)
1153 if (memcmp(super->virt->entries[i].guid,
1154 vcl->conf.guid, DDF_GUID_LEN)==0)
1155 break;
1156 if (i < max_virt_disks)
1157 vcl->vcnum = i;
1158 }
1159
1160 return 0;
1161 }
1162
1163 static int load_super_ddf(struct supertype *st, int fd,
1164 char *devname)
1165 {
1166 unsigned long long dsize;
1167 struct ddf_super *super;
1168 int rv;
1169
1170 if (get_dev_size(fd, devname, &dsize) == 0)
1171 return 1;
1172
1173 if (test_partition(fd))
1174 /* DDF is not allowed on partitions */
1175 return 1;
1176
1177 /* 32M is a lower bound */
1178 if (dsize <= 32*1024*1024) {
1179 if (devname)
1180 pr_err("%s is too small for ddf: size is %llu sectors.\n",
1181 devname, dsize>>9);
1182 return 1;
1183 }
1184 if (dsize & 511) {
1185 if (devname)
1186 pr_err("%s is an odd size for ddf: size is %llu bytes.\n",
1187 devname, dsize);
1188 return 1;
1189 }
1190
1191 free_super_ddf(st);
1192
1193 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1194 pr_err("malloc of %zu failed.\n",
1195 sizeof(*super));
1196 return 1;
1197 }
1198 memset(super, 0, sizeof(*super));
1199
1200 rv = load_ddf_headers(fd, super, devname);
1201 if (rv) {
1202 free(super);
1203 return rv;
1204 }
1205
1206 /* Have valid headers and have chosen the best. Let's read in the rest*/
1207
1208 rv = load_ddf_global(fd, super, devname);
1209
1210 if (rv) {
1211 if (devname)
1212 pr_err("Failed to load all information sections on %s\n", devname);
1213 free(super);
1214 return rv;
1215 }
1216
1217 rv = load_ddf_local(fd, super, devname, 0);
1218
1219 if (rv) {
1220 if (devname)
1221 pr_err("Failed to load all information sections on %s\n", devname);
1222 free(super);
1223 return rv;
1224 }
1225
1226 /* Should possibly check the sections .... */
1227
1228 st->sb = super;
1229 if (st->ss == NULL) {
1230 st->ss = &super_ddf;
1231 st->minor_version = 0;
1232 st->max_devs = 512;
1233 }
1234 return 0;
1235
1236 }
1237
1238 static void free_super_ddf(struct supertype *st)
1239 {
1240 struct ddf_super *ddf = st->sb;
1241 if (ddf == NULL)
1242 return;
1243 free(ddf->phys);
1244 free(ddf->virt);
1245 free(ddf->conf);
1246 while (ddf->conflist) {
1247 struct vcl *v = ddf->conflist;
1248 ddf->conflist = v->next;
1249 if (v->block_sizes)
1250 free(v->block_sizes);
1251 if (v->other_bvds)
1252 /*
1253 v->other_bvds[0] points to beginning of buffer,
1254 see alloc_other_bvds()
1255 */
1256 free(v->other_bvds[0]);
1257 free(v);
1258 }
1259 while (ddf->dlist) {
1260 struct dl *d = ddf->dlist;
1261 ddf->dlist = d->next;
1262 if (d->fd >= 0)
1263 close(d->fd);
1264 if (d->spare)
1265 free(d->spare);
1266 free(d);
1267 }
1268 while (ddf->add_list) {
1269 struct dl *d = ddf->add_list;
1270 ddf->add_list = d->next;
1271 if (d->fd >= 0)
1272 close(d->fd);
1273 if (d->spare)
1274 free(d->spare);
1275 free(d);
1276 }
1277 free(ddf);
1278 st->sb = NULL;
1279 }
1280
1281 static struct supertype *match_metadata_desc_ddf(char *arg)
1282 {
1283 /* 'ddf' only supports containers */
1284 struct supertype *st;
1285 if (strcmp(arg, "ddf") != 0 &&
1286 strcmp(arg, "default") != 0
1287 )
1288 return NULL;
1289
1290 st = xcalloc(1, sizeof(*st));
1291 st->ss = &super_ddf;
1292 st->max_devs = 512;
1293 st->minor_version = 0;
1294 st->sb = NULL;
1295 return st;
1296 }
1297
1298 static mapping_t ddf_state[] = {
1299 { "Optimal", 0},
1300 { "Degraded", 1},
1301 { "Deleted", 2},
1302 { "Missing", 3},
1303 { "Failed", 4},
1304 { "Partially Optimal", 5},
1305 { "-reserved-", 6},
1306 { "-reserved-", 7},
1307 { NULL, 0}
1308 };
1309
1310 static mapping_t ddf_init_state[] = {
1311 { "Not Initialised", 0},
1312 { "QuickInit in Progress", 1},
1313 { "Fully Initialised", 2},
1314 { "*UNKNOWN*", 3},
1315 { NULL, 0}
1316 };
1317 static mapping_t ddf_access[] = {
1318 { "Read/Write", 0},
1319 { "Reserved", 1},
1320 { "Read Only", 2},
1321 { "Blocked (no access)", 3},
1322 { NULL ,0}
1323 };
1324
1325 static mapping_t ddf_level[] = {
1326 { "RAID0", DDF_RAID0},
1327 { "RAID1", DDF_RAID1},
1328 { "RAID3", DDF_RAID3},
1329 { "RAID4", DDF_RAID4},
1330 { "RAID5", DDF_RAID5},
1331 { "RAID1E",DDF_RAID1E},
1332 { "JBOD", DDF_JBOD},
1333 { "CONCAT",DDF_CONCAT},
1334 { "RAID5E",DDF_RAID5E},
1335 { "RAID5EE",DDF_RAID5EE},
1336 { "RAID6", DDF_RAID6},
1337 { NULL, 0}
1338 };
1339 static mapping_t ddf_sec_level[] = {
1340 { "Striped", DDF_2STRIPED},
1341 { "Mirrored", DDF_2MIRRORED},
1342 { "Concat", DDF_2CONCAT},
1343 { "Spanned", DDF_2SPANNED},
1344 { NULL, 0}
1345 };
1346
1347 static int all_ff(const char *guid)
1348 {
1349 int i;
1350 for (i = 0; i < DDF_GUID_LEN; i++)
1351 if (guid[i] != (char)0xff)
1352 return 0;
1353 return 1;
1354 }
1355
1356 static const char *guid_str(const char *guid)
1357 {
1358 static char buf[DDF_GUID_LEN*2+1];
1359 int i;
1360 char *p = buf;
1361 for (i = 0; i < DDF_GUID_LEN; i++) {
1362 unsigned char c = guid[i];
1363 if (c >= 32 && c < 127)
1364 p += sprintf(p, "%c", c);
1365 else
1366 p += sprintf(p, "%02x", c);
1367 }
1368 *p = '\0';
1369 return (const char *) buf;
1370 }
1371
1372 static void print_guid(char *guid, int tstamp)
1373 {
1374 /* A GUIDs are part (or all) ASCII and part binary.
1375 * They tend to be space padded.
1376 * We print the GUID in HEX, then in parentheses add
1377 * any initial ASCII sequence, and a possible
1378 * time stamp from bytes 16-19
1379 */
1380 int l = DDF_GUID_LEN;
1381 int i;
1382
1383 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1384 if ((i&3)==0 && i != 0) printf(":");
1385 printf("%02X", guid[i]&255);
1386 }
1387
1388 printf("\n (");
1389 while (l && guid[l-1] == ' ')
1390 l--;
1391 for (i=0 ; i<l ; i++) {
1392 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1393 fputc(guid[i], stdout);
1394 else
1395 break;
1396 }
1397 if (tstamp) {
1398 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1399 char tbuf[100];
1400 struct tm *tm;
1401 tm = localtime(&then);
1402 strftime(tbuf, 100, " %D %T",tm);
1403 fputs(tbuf, stdout);
1404 }
1405 printf(")");
1406 }
1407
1408 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1409 {
1410 int crl = sb->conf_rec_len;
1411 struct vcl *vcl;
1412
1413 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1414 unsigned int i;
1415 struct vd_config *vc = &vcl->conf;
1416
1417 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1418 continue;
1419 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1420 continue;
1421
1422 /* Ok, we know about this VD, let's give more details */
1423 printf(" Raid Devices[%d] : %d (", n,
1424 be16_to_cpu(vc->prim_elmnt_count));
1425 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1426 int j;
1427 int cnt = be16_to_cpu(sb->phys->max_pdes);
1428 for (j=0; j<cnt; j++)
1429 if (be32_eq(vc->phys_refnum[i],
1430 sb->phys->entries[j].refnum))
1431 break;
1432 if (i) printf(" ");
1433 if (j < cnt)
1434 printf("%d", j);
1435 else
1436 printf("--");
1437 printf("@%lluK", (unsigned long long) be64_to_cpu(LBA_OFFSET(sb, vc)[i])/2);
1438 }
1439 printf(")\n");
1440 if (vc->chunk_shift != 255)
1441 printf(" Chunk Size[%d] : %d sectors\n", n,
1442 1 << vc->chunk_shift);
1443 printf(" Raid Level[%d] : %s\n", n,
1444 map_num(ddf_level, vc->prl)?:"-unknown-");
1445 if (vc->sec_elmnt_count != 1) {
1446 printf(" Secondary Position[%d] : %d of %d\n", n,
1447 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1448 printf(" Secondary Level[%d] : %s\n", n,
1449 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1450 }
1451 printf(" Device Size[%d] : %llu\n", n,
1452 be64_to_cpu(vc->blocks)/2);
1453 printf(" Array Size[%d] : %llu\n", n,
1454 be64_to_cpu(vc->array_blocks)/2);
1455 }
1456 }
1457
1458 static void examine_vds(struct ddf_super *sb)
1459 {
1460 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1461 unsigned int i;
1462 printf(" Virtual Disks : %d\n", cnt);
1463
1464 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1465 struct virtual_entry *ve = &sb->virt->entries[i];
1466 if (all_ff(ve->guid))
1467 continue;
1468 printf("\n");
1469 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1470 printf("\n");
1471 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1472 printf(" state[%d] : %s, %s%s\n", i,
1473 map_num_s(ddf_state, ve->state & 7),
1474 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1475 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1476 printf(" init state[%d] : %s\n", i,
1477 map_num_s(ddf_init_state, ve->init_state & DDF_initstate_mask));
1478 printf(" access[%d] : %s\n", i,
1479 map_num_s(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1480 printf(" Name[%d] : %.16s\n", i, ve->name);
1481 examine_vd(i, sb, ve->guid);
1482 }
1483 if (cnt) printf("\n");
1484 }
1485
1486 static void examine_pds(struct ddf_super *sb)
1487 {
1488 int cnt = be16_to_cpu(sb->phys->max_pdes);
1489 int i;
1490 struct dl *dl;
1491 int unlisted = 0;
1492 printf(" Physical Disks : %d\n", cnt);
1493 printf(" Number RefNo Size Device Type/State\n");
1494
1495 for (dl = sb->dlist; dl; dl = dl->next)
1496 dl->displayed = 0;
1497
1498 for (i=0 ; i<cnt ; i++) {
1499 struct phys_disk_entry *pd = &sb->phys->entries[i];
1500 int type = be16_to_cpu(pd->type);
1501 int state = be16_to_cpu(pd->state);
1502
1503 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1504 /* Not in use */
1505 continue;
1506 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1507 //printf("\n");
1508 printf(" %3d %08x ", i,
1509 be32_to_cpu(pd->refnum));
1510 printf("%8lluK ",
1511 be64_to_cpu(pd->config_size)>>1);
1512 for (dl = sb->dlist; dl ; dl = dl->next) {
1513 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1514 char *dv = map_dev(dl->major, dl->minor, 0);
1515 if (dv) {
1516 printf("%-15s", dv);
1517 break;
1518 }
1519 }
1520 }
1521 if (!dl)
1522 printf("%15s","");
1523 else
1524 dl->displayed = 1;
1525 printf(" %s%s%s%s%s",
1526 (type&2) ? "active":"",
1527 (type&4) ? "Global-Spare":"",
1528 (type&8) ? "spare" : "",
1529 (type&16)? ", foreign" : "",
1530 (type&32)? "pass-through" : "");
1531 if (state & DDF_Failed)
1532 /* This over-rides these three */
1533 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1534 printf("/%s%s%s%s%s%s%s",
1535 (state&1)? "Online": "Offline",
1536 (state&2)? ", Failed": "",
1537 (state&4)? ", Rebuilding": "",
1538 (state&8)? ", in-transition": "",
1539 (state&16)? ", SMART-errors": "",
1540 (state&32)? ", Unrecovered-Read-Errors": "",
1541 (state&64)? ", Missing" : "");
1542 printf("\n");
1543 }
1544 for (dl = sb->dlist; dl; dl = dl->next) {
1545 char *dv;
1546 if (dl->displayed)
1547 continue;
1548 if (!unlisted)
1549 printf(" Physical disks not in metadata!:\n");
1550 unlisted = 1;
1551 dv = map_dev(dl->major, dl->minor, 0);
1552 printf(" %08x %s\n", be32_to_cpu(dl->disk.refnum),
1553 dv ? dv : "-unknown-");
1554 }
1555 if (unlisted)
1556 printf("\n");
1557 }
1558
1559 static void examine_super_ddf(struct supertype *st, char *homehost)
1560 {
1561 struct ddf_super *sb = st->sb;
1562
1563 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1564 printf(" Version : %.8s\n", sb->anchor.revision);
1565 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1566 printf("\n");
1567 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1568 printf("\n");
1569 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1570 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1571 DDF_HEADER_MAGIC)
1572 ?"yes" : "no"));
1573 examine_vds(sb);
1574 examine_pds(sb);
1575 }
1576
1577 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1578 {
1579 /*
1580 * Figure out the VD number for this supertype.
1581 * Returns DDF_CONTAINER for the container itself,
1582 * and DDF_NOTFOUND on error.
1583 */
1584 struct ddf_super *ddf = st->sb;
1585 struct mdinfo *sra;
1586 char *sub, *end;
1587 unsigned int vcnum;
1588
1589 if (*st->container_devnm == '\0')
1590 return DDF_CONTAINER;
1591
1592 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1593 if (!sra || sra->array.major_version != -1 ||
1594 sra->array.minor_version != -2 ||
1595 !is_subarray(sra->text_version)) {
1596 if (sra)
1597 sysfs_free(sra);
1598 return DDF_NOTFOUND;
1599 }
1600
1601 sub = strchr(sra->text_version + 1, '/');
1602 if (sub != NULL)
1603 vcnum = strtoul(sub + 1, &end, 10);
1604 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1605 vcnum >= be16_to_cpu(ddf->active->max_vd_entries)) {
1606 sysfs_free(sra);
1607 return DDF_NOTFOUND;
1608 }
1609
1610 return vcnum;
1611 }
1612
1613 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1614 {
1615 /* We just write a generic DDF ARRAY entry
1616 */
1617 struct mdinfo info;
1618 char nbuf[64];
1619 getinfo_super_ddf(st, &info, NULL);
1620 fname_from_uuid(&info, nbuf);
1621
1622 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1623 }
1624
1625 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1626 {
1627 /* We write a DDF ARRAY member entry for each vd, identifying container
1628 * by uuid and member by unit number and uuid.
1629 */
1630 struct ddf_super *ddf = st->sb;
1631 struct mdinfo info;
1632 unsigned int i;
1633 char nbuf[64];
1634 getinfo_super_ddf(st, &info, NULL);
1635 fname_from_uuid(&info, nbuf);
1636
1637 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1638 struct virtual_entry *ve = &ddf->virt->entries[i];
1639 struct vcl vcl;
1640 char nbuf1[64];
1641 char namebuf[17];
1642 if (all_ff(ve->guid))
1643 continue;
1644 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1645 ddf->currentconf =&vcl;
1646 vcl.vcnum = i;
1647 uuid_from_super_ddf(st, info.uuid);
1648 fname_from_uuid(&info, nbuf1);
1649 _ddf_array_name(namebuf, ddf, i);
1650 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1651 namebuf[0] == '\0' ? "" : " " DEV_MD_DIR, namebuf,
1652 nbuf+5, i, nbuf1+5);
1653 }
1654 }
1655
1656 static void export_examine_super_ddf(struct supertype *st)
1657 {
1658 struct mdinfo info;
1659 char nbuf[64];
1660 getinfo_super_ddf(st, &info, NULL);
1661 fname_from_uuid(&info, nbuf);
1662 printf("MD_METADATA=ddf\n");
1663 printf("MD_LEVEL=container\n");
1664 printf("MD_UUID=%s\n", nbuf+5);
1665 printf("MD_DEVICES=%u\n",
1666 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1667 }
1668
1669 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1670 {
1671 void *buf;
1672 unsigned long long dsize, offset;
1673 int bytes;
1674 struct ddf_header *ddf;
1675 int written = 0;
1676
1677 /* The meta consists of an anchor, a primary, and a secondary.
1678 * This all lives at the end of the device.
1679 * So it is easiest to find the earliest of primary and
1680 * secondary, and copy everything from there.
1681 *
1682 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1683 * we choose one of those
1684 */
1685
1686 if (posix_memalign(&buf, 4096, 4096) != 0)
1687 return 1;
1688
1689 if (!get_dev_size(from, NULL, &dsize))
1690 goto err;
1691
1692 if (lseek64(from, dsize-512, 0) < 0)
1693 goto err;
1694 if (read(from, buf, 512) != 512)
1695 goto err;
1696 ddf = buf;
1697 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1698 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1699 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1700 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1701 goto err;
1702
1703 offset = dsize - 512;
1704 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1705 offset = be64_to_cpu(ddf->primary_lba) << 9;
1706 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1707 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1708
1709 bytes = dsize - offset;
1710
1711 if (lseek64(from, offset, 0) < 0 ||
1712 lseek64(to, offset, 0) < 0)
1713 goto err;
1714 while (written < bytes) {
1715 int n = bytes - written;
1716 if (n > 4096)
1717 n = 4096;
1718 if (read(from, buf, n) != n)
1719 goto err;
1720 if (write(to, buf, n) != n)
1721 goto err;
1722 written += n;
1723 }
1724 free(buf);
1725 return 0;
1726 err:
1727 free(buf);
1728 return 1;
1729 }
1730
1731 static void detail_super_ddf(struct supertype *st, char *homehost,
1732 char *subarray)
1733 {
1734 struct ddf_super *sb = st->sb;
1735 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1736
1737 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1738 printf("\n");
1739 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1740 printf(" Virtual Disks : %d\n", cnt);
1741 printf("\n");
1742 }
1743
1744 static const char *vendors_with_variable_volume_UUID[] = {
1745 "LSI ",
1746 };
1747
1748 static int volume_id_is_reliable(const struct ddf_super *ddf)
1749 {
1750 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1751 int i;
1752 for (i = 0; i < n; i++)
1753 if (!memcmp(ddf->controller.guid,
1754 vendors_with_variable_volume_UUID[i], 8))
1755 return 0;
1756 return 1;
1757 }
1758
1759 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1760 unsigned int vcnum, int uuid[4])
1761 {
1762 char buf[DDF_GUID_LEN+18], sha[20], *p;
1763 struct sha1_ctx ctx;
1764 if (volume_id_is_reliable(ddf)) {
1765 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1766 return;
1767 }
1768 /*
1769 * Some fake RAID BIOSes (in particular, LSI ones) change the
1770 * VD GUID at every boot. These GUIDs are not suitable for
1771 * identifying an array. Luckily the header GUID appears to
1772 * remain constant.
1773 * We construct a pseudo-UUID from the header GUID and those
1774 * properties of the subarray that we expect to remain constant.
1775 */
1776 memset(buf, 0, sizeof(buf));
1777 p = buf;
1778 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1779 p += DDF_GUID_LEN;
1780 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1781 p += 16;
1782 *((__u16 *) p) = vcnum;
1783 sha1_init_ctx(&ctx);
1784 sha1_process_bytes(buf, sizeof(buf), &ctx);
1785 sha1_finish_ctx(&ctx, sha);
1786 memcpy(uuid, sha, 4*4);
1787 }
1788
1789 static void brief_detail_super_ddf(struct supertype *st, char *subarray)
1790 {
1791 struct mdinfo info;
1792 char nbuf[64];
1793 struct ddf_super *ddf = st->sb;
1794 unsigned int vcnum = get_vd_num_of_subarray(st);
1795 if (vcnum == DDF_CONTAINER)
1796 uuid_from_super_ddf(st, info.uuid);
1797 else if (vcnum == DDF_NOTFOUND)
1798 return;
1799 else
1800 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1801 fname_from_uuid(&info, nbuf);
1802 printf(" UUID=%s", nbuf + 5);
1803 }
1804
1805 static int match_home_ddf(struct supertype *st, char *homehost)
1806 {
1807 /* It matches 'this' host if the controller is a
1808 * Linux-MD controller with vendor_data matching
1809 * the hostname. It would be nice if we could
1810 * test against controller found in /sys or somewhere...
1811 */
1812 struct ddf_super *ddf = st->sb;
1813 unsigned int len;
1814
1815 if (!homehost)
1816 return 0;
1817 len = strlen(homehost);
1818
1819 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1820 len < sizeof(ddf->controller.vendor_data) &&
1821 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1822 ddf->controller.vendor_data[len] == 0);
1823 }
1824
1825 static int find_index_in_bvd(const struct ddf_super *ddf,
1826 const struct vd_config *conf, unsigned int n,
1827 unsigned int *n_bvd)
1828 {
1829 /*
1830 * Find the index of the n-th valid physical disk in this BVD.
1831 * Unused entries can be sprinkled in with the used entries,
1832 * but don't count.
1833 */
1834 unsigned int i, j;
1835 for (i = 0, j = 0;
1836 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1837 i++) {
1838 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1839 if (n == j) {
1840 *n_bvd = i;
1841 return 1;
1842 }
1843 j++;
1844 }
1845 }
1846 dprintf("couldn't find BVD member %u (total %u)\n",
1847 n, be16_to_cpu(conf->prim_elmnt_count));
1848 return 0;
1849 }
1850
1851 /* Given a member array instance number, and a raid disk within that instance,
1852 * find the vd_config structure. The offset of the given disk in the phys_refnum
1853 * table is returned in n_bvd.
1854 * For two-level members with a secondary raid level the vd_config for
1855 * the appropriate BVD is returned.
1856 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1857 */
1858 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1859 unsigned int n,
1860 unsigned int *n_bvd, struct vcl **vcl)
1861 {
1862 struct vcl *v;
1863
1864 for (v = ddf->conflist; v; v = v->next) {
1865 unsigned int nsec, ibvd = 0;
1866 struct vd_config *conf;
1867 if (inst != v->vcnum)
1868 continue;
1869 conf = &v->conf;
1870 if (conf->sec_elmnt_count == 1) {
1871 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1872 *vcl = v;
1873 return conf;
1874 } else
1875 goto bad;
1876 }
1877 if (v->other_bvds == NULL) {
1878 pr_err("BUG: other_bvds is NULL, nsec=%u\n",
1879 conf->sec_elmnt_count);
1880 goto bad;
1881 }
1882 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1883 if (conf->sec_elmnt_seq != nsec) {
1884 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1885 if (v->other_bvds[ibvd-1]->sec_elmnt_seq ==
1886 nsec)
1887 break;
1888 }
1889 if (ibvd == conf->sec_elmnt_count)
1890 goto bad;
1891 conf = v->other_bvds[ibvd-1];
1892 }
1893 if (!find_index_in_bvd(ddf, conf,
1894 n - nsec*conf->sec_elmnt_count, n_bvd))
1895 goto bad;
1896 dprintf("found disk %u as member %u in bvd %d of array %u\n",
1897 n, *n_bvd, ibvd, inst);
1898 *vcl = v;
1899 return conf;
1900 }
1901 bad:
1902 pr_err("Couldn't find disk %d in array %u\n", n, inst);
1903 return NULL;
1904 }
1905
1906 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1907 {
1908 /* Find the entry in phys_disk which has the given refnum
1909 * and return it's index
1910 */
1911 unsigned int i;
1912 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1913 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1914 return i;
1915 return -1;
1916 }
1917
1918 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1919 {
1920 char buf[20];
1921 struct sha1_ctx ctx;
1922 sha1_init_ctx(&ctx);
1923 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1924 sha1_finish_ctx(&ctx, buf);
1925 memcpy(uuid, buf, 4*4);
1926 }
1927
1928 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1929 {
1930 /* The uuid returned here is used for:
1931 * uuid to put into bitmap file (Create, Grow)
1932 * uuid for backup header when saving critical section (Grow)
1933 * comparing uuids when re-adding a device into an array
1934 * In these cases the uuid required is that of the data-array,
1935 * not the device-set.
1936 * uuid to recognise same set when adding a missing device back
1937 * to an array. This is a uuid for the device-set.
1938 *
1939 * For each of these we can make do with a truncated
1940 * or hashed uuid rather than the original, as long as
1941 * everyone agrees.
1942 * In the case of SVD we assume the BVD is of interest,
1943 * though that might be the case if a bitmap were made for
1944 * a mirrored SVD - worry about that later.
1945 * So we need to find the VD configuration record for the
1946 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1947 * The first 16 bytes of the sha1 of these is used.
1948 */
1949 struct ddf_super *ddf = st->sb;
1950 struct vcl *vcl = ddf->currentconf;
1951
1952 if (vcl)
1953 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1954 else
1955 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1956 }
1957
1958 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1959 {
1960 struct ddf_super *ddf = st->sb;
1961 int map_disks = info->array.raid_disks;
1962 __u32 *cptr;
1963
1964 if (ddf->currentconf) {
1965 getinfo_super_ddf_bvd(st, info, map);
1966 return;
1967 }
1968 memset(info, 0, sizeof(*info));
1969
1970 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1971 info->array.level = LEVEL_CONTAINER;
1972 info->array.layout = 0;
1973 info->array.md_minor = -1;
1974 cptr = (__u32 *)(ddf->anchor.guid + 16);
1975 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1976
1977 info->array.chunk_size = 0;
1978 info->container_enough = 1;
1979
1980 info->disk.major = 0;
1981 info->disk.minor = 0;
1982 if (ddf->dlist) {
1983 struct phys_disk_entry *pde = NULL;
1984 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1985 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1986
1987 if (info->disk.raid_disk < 0)
1988 return;
1989
1990 info->data_offset = be64_to_cpu(ddf->phys->
1991 entries[info->disk.raid_disk].
1992 config_size);
1993 info->component_size = ddf->dlist->size - info->data_offset;
1994 pde = ddf->phys->entries + info->disk.raid_disk;
1995 if (pde &&
1996 !(be16_to_cpu(pde->state) & DDF_Failed) &&
1997 !(be16_to_cpu(pde->state) & DDF_Missing))
1998 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1999 else
2000 info->disk.state = 1 << MD_DISK_FAULTY;
2001
2002 } else {
2003 /* There should always be a dlist, but just in case...*/
2004 info->disk.number = -1;
2005 info->disk.raid_disk = -1;
2006 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2007 }
2008 info->events = be32_to_cpu(ddf->active->seq);
2009 info->array.utime = DECADE + be32_to_cpu(ddf->active->timestamp);
2010
2011 info->recovery_start = MaxSector;
2012 info->reshape_active = 0;
2013 info->recovery_blocked = 0;
2014 info->name[0] = 0;
2015
2016 info->array.major_version = -1;
2017 info->array.minor_version = -2;
2018 strcpy(info->text_version, "ddf");
2019 info->safe_mode_delay = 0;
2020
2021 uuid_from_super_ddf(st, info->uuid);
2022
2023 if (map) {
2024 int i, e = 0;
2025 int max = be16_to_cpu(ddf->phys->max_pdes);
2026 for (i = e = 0 ; i < map_disks ; i++, e++) {
2027 while (e < max &&
2028 be32_to_cpu(ddf->phys->entries[e].refnum) == 0xffffffff)
2029 e++;
2030 if (i < info->array.raid_disks && e < max &&
2031 !(be16_to_cpu(ddf->phys->entries[e].state) &
2032 DDF_Failed))
2033 map[i] = 1;
2034 else
2035 map[i] = 0;
2036 }
2037 }
2038 }
2039
2040 /* size of name must be at least 17 bytes! */
2041 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2042 {
2043 int j;
2044 memcpy(name, ddf->virt->entries[i].name, 16);
2045 name[16] = 0;
2046 for(j = 0; j < 16; j++)
2047 if (name[j] == ' ')
2048 name[j] = 0;
2049 }
2050
2051 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2052 {
2053 struct ddf_super *ddf = st->sb;
2054 struct vcl *vc = ddf->currentconf;
2055 int cd = ddf->currentdev;
2056 int n_prim;
2057 int j;
2058 struct dl *dl = NULL;
2059 int map_disks = info->array.raid_disks;
2060 __u32 *cptr;
2061 struct vd_config *conf;
2062
2063 memset(info, 0, sizeof(*info));
2064 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2065 return;
2066 info->array.md_minor = -1;
2067 cptr = (__u32 *)(vc->conf.guid + 16);
2068 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2069 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2070 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2071 info->custom_array_size = be64_to_cpu(vc->conf.array_blocks);
2072
2073 conf = &vc->conf;
2074 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2075 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2076 int ibvd = cd / n_prim - 1;
2077 cd %= n_prim;
2078 conf = vc->other_bvds[ibvd];
2079 }
2080
2081 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2082 info->data_offset =
2083 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2084 if (vc->block_sizes)
2085 info->component_size = vc->block_sizes[cd];
2086 else
2087 info->component_size = be64_to_cpu(conf->blocks);
2088
2089 for (dl = ddf->dlist; dl ; dl = dl->next)
2090 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2091 break;
2092 }
2093
2094 info->disk.major = 0;
2095 info->disk.minor = 0;
2096 info->disk.state = 0;
2097 if (dl && dl->pdnum >= 0) {
2098 info->disk.major = dl->major;
2099 info->disk.minor = dl->minor;
2100 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2101 * be16_to_cpu(conf->prim_elmnt_count);
2102 info->disk.number = dl->pdnum;
2103 info->disk.state = 0;
2104 if (info->disk.number >= 0 &&
2105 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2106 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2107 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2108 info->events = be32_to_cpu(ddf->active->seq);
2109 }
2110
2111 info->container_member = ddf->currentconf->vcnum;
2112
2113 info->recovery_start = MaxSector;
2114 info->resync_start = 0;
2115 info->reshape_active = 0;
2116 info->recovery_blocked = 0;
2117 if (!(ddf->virt->entries[info->container_member].state &
2118 DDF_state_inconsistent) &&
2119 (ddf->virt->entries[info->container_member].init_state &
2120 DDF_initstate_mask) == DDF_init_full)
2121 info->resync_start = MaxSector;
2122
2123 uuid_from_super_ddf(st, info->uuid);
2124
2125 info->array.major_version = -1;
2126 info->array.minor_version = -2;
2127 sprintf(info->text_version, "/%s/%d",
2128 st->container_devnm,
2129 info->container_member);
2130 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2131
2132 _ddf_array_name(info->name, ddf, info->container_member);
2133
2134 if (map)
2135 for (j = 0; j < map_disks; j++) {
2136 map[j] = 0;
2137 if (j < info->array.raid_disks) {
2138 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2139 if (i >= 0 &&
2140 (be16_to_cpu(ddf->phys->entries[i].state)
2141 & DDF_Online) &&
2142 !(be16_to_cpu(ddf->phys->entries[i].state)
2143 & DDF_Failed))
2144 map[i] = 1;
2145 }
2146 }
2147 }
2148
2149 static void make_header_guid(char *guid)
2150 {
2151 be32 stamp;
2152 /* Create a DDF Header of Virtual Disk GUID */
2153
2154 /* 24 bytes of fiction required.
2155 * first 8 are a 'vendor-id' - "Linux-MD"
2156 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2157 * Remaining 8 random number plus timestamp
2158 */
2159 memcpy(guid, T10, sizeof(T10));
2160 stamp = cpu_to_be32(0xdeadbeef);
2161 memcpy(guid+8, &stamp, 4);
2162 stamp = cpu_to_be32(0);
2163 memcpy(guid+12, &stamp, 4);
2164 stamp = cpu_to_be32(time(0) - DECADE);
2165 memcpy(guid+16, &stamp, 4);
2166 stamp._v32 = random32();
2167 memcpy(guid+20, &stamp, 4);
2168 }
2169
2170 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2171 {
2172 unsigned int i;
2173 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2174 if (all_ff(ddf->virt->entries[i].guid))
2175 return i;
2176 }
2177 return DDF_NOTFOUND;
2178 }
2179
2180 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2181 const char *name)
2182 {
2183 unsigned int i;
2184 if (name == NULL)
2185 return DDF_NOTFOUND;
2186 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2187 if (all_ff(ddf->virt->entries[i].guid))
2188 continue;
2189 if (!strncmp(name, ddf->virt->entries[i].name,
2190 sizeof(ddf->virt->entries[i].name)))
2191 return i;
2192 }
2193 return DDF_NOTFOUND;
2194 }
2195
2196 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2197 const char *guid)
2198 {
2199 unsigned int i;
2200 if (guid == NULL || all_ff(guid))
2201 return DDF_NOTFOUND;
2202 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2203 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2204 return i;
2205 return DDF_NOTFOUND;
2206 }
2207
2208 static int init_super_ddf(struct supertype *st,
2209 mdu_array_info_t *info,
2210 struct shape *s, char *name, char *homehost,
2211 int *uuid, unsigned long long data_offset)
2212 {
2213 /* This is primarily called by Create when creating a new array.
2214 * We will then get add_to_super called for each component, and then
2215 * write_init_super called to write it out to each device.
2216 * For DDF, Create can create on fresh devices or on a pre-existing
2217 * array.
2218 * To create on a pre-existing array a different method will be called.
2219 * This one is just for fresh drives.
2220 *
2221 * We need to create the entire 'ddf' structure which includes:
2222 * DDF headers - these are easy.
2223 * Controller data - a Sector describing this controller .. not that
2224 * this is a controller exactly.
2225 * Physical Disk Record - one entry per device, so
2226 * leave plenty of space.
2227 * Virtual Disk Records - again, just leave plenty of space.
2228 * This just lists VDs, doesn't give details.
2229 * Config records - describe the VDs that use this disk
2230 * DiskData - describes 'this' device.
2231 * BadBlockManagement - empty
2232 * Diag Space - empty
2233 * Vendor Logs - Could we put bitmaps here?
2234 *
2235 */
2236 struct ddf_super *ddf;
2237 char hostname[17];
2238 int hostlen;
2239 int max_phys_disks, max_virt_disks;
2240 unsigned long long sector;
2241 int clen;
2242 int i;
2243 int pdsize, vdsize;
2244 struct phys_disk *pd;
2245 struct virtual_disk *vd;
2246
2247 if (st->sb)
2248 return init_super_ddf_bvd(st, info, s->size, name, homehost, uuid,
2249 data_offset);
2250
2251 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2252 pr_err("could not allocate superblock\n");
2253 return 0;
2254 }
2255 memset(ddf, 0, sizeof(*ddf));
2256 st->sb = ddf;
2257
2258 if (info == NULL) {
2259 /* zeroing superblock */
2260 return 0;
2261 }
2262
2263 /* At least 32MB *must* be reserved for the ddf. So let's just
2264 * start 32MB from the end, and put the primary header there.
2265 * Don't do secondary for now.
2266 * We don't know exactly where that will be yet as it could be
2267 * different on each device. So just set up the lengths.
2268 */
2269
2270 ddf->anchor.magic = DDF_HEADER_MAGIC;
2271 make_header_guid(ddf->anchor.guid);
2272
2273 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2274 ddf->anchor.seq = cpu_to_be32(1);
2275 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2276 ddf->anchor.openflag = 0xFF;
2277 ddf->anchor.foreignflag = 0;
2278 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2279 ddf->anchor.pad0 = 0xff;
2280 memset(ddf->anchor.pad1, 0xff, 12);
2281 memset(ddf->anchor.header_ext, 0xff, 32);
2282 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2283 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2284 ddf->anchor.type = DDF_HEADER_ANCHOR;
2285 memset(ddf->anchor.pad2, 0xff, 3);
2286 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2287 /* Put this at bottom of 32M reserved.. */
2288 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2289 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2290 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2291 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2292 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2293 ddf->max_part = 64;
2294 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2295 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2296 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2297 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2298 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2299 memset(ddf->anchor.pad3, 0xff, 54);
2300 /* Controller section is one sector long immediately
2301 * after the ddf header */
2302 sector = 1;
2303 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2304 ddf->anchor.controller_section_length = cpu_to_be32(1);
2305 sector += 1;
2306
2307 /* phys is 8 sectors after that */
2308 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2309 sizeof(struct phys_disk_entry)*max_phys_disks,
2310 512);
2311 switch(pdsize/512) {
2312 case 2: case 8: case 32: case 128: case 512: break;
2313 default: abort();
2314 }
2315 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2316 ddf->anchor.phys_section_length =
2317 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2318 sector += pdsize/512;
2319
2320 /* virt is another 32 sectors */
2321 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2322 sizeof(struct virtual_entry) * max_virt_disks,
2323 512);
2324 switch(vdsize/512) {
2325 case 2: case 8: case 32: case 128: case 512: break;
2326 default: abort();
2327 }
2328 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2329 ddf->anchor.virt_section_length =
2330 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2331 sector += vdsize/512;
2332
2333 clen = ddf->conf_rec_len * (ddf->max_part+1);
2334 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2335 ddf->anchor.config_section_length = cpu_to_be32(clen);
2336 sector += clen;
2337
2338 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2339 ddf->anchor.data_section_length = cpu_to_be32(1);
2340 sector += 1;
2341
2342 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2343 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2344 ddf->anchor.diag_space_length = cpu_to_be32(0);
2345 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2346 ddf->anchor.vendor_length = cpu_to_be32(0);
2347 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2348
2349 memset(ddf->anchor.pad4, 0xff, 256);
2350
2351 memcpy(&ddf->primary, &ddf->anchor, 512);
2352 memcpy(&ddf->secondary, &ddf->anchor, 512);
2353
2354 ddf->primary.openflag = 1; /* I guess.. */
2355 ddf->primary.type = DDF_HEADER_PRIMARY;
2356
2357 ddf->secondary.openflag = 1; /* I guess.. */
2358 ddf->secondary.type = DDF_HEADER_SECONDARY;
2359
2360 ddf->active = &ddf->primary;
2361
2362 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2363
2364 /* 24 more bytes of fiction required.
2365 * first 8 are a 'vendor-id' - "Linux-MD"
2366 * Remaining 16 are serial number.... maybe a hostname would do?
2367 */
2368 memcpy(ddf->controller.guid, T10, sizeof(T10));
2369 s_gethostname(hostname, sizeof(hostname));
2370 hostlen = strlen(hostname);
2371 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2372 for (i = strlen(T10) ; i+hostlen < 24; i++)
2373 ddf->controller.guid[i] = ' ';
2374
2375 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2376 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2377 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2378 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2379 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2380 memset(ddf->controller.pad, 0xff, 8);
2381 memset(ddf->controller.vendor_data, 0xff, 448);
2382 if (homehost && strlen(homehost) < 440)
2383 strcpy((char*)ddf->controller.vendor_data, homehost);
2384
2385 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2386 pr_err("could not allocate pd\n");
2387 return 0;
2388 }
2389 ddf->phys = pd;
2390 ddf->pdsize = pdsize;
2391
2392 memset(pd, 0xff, pdsize);
2393 memset(pd, 0, sizeof(*pd));
2394 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2395 pd->used_pdes = cpu_to_be16(0);
2396 pd->max_pdes = cpu_to_be16(max_phys_disks);
2397 memset(pd->pad, 0xff, 52);
2398 for (i = 0; i < max_phys_disks; i++)
2399 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2400
2401 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2402 pr_err("could not allocate vd\n");
2403 return 0;
2404 }
2405 ddf->virt = vd;
2406 ddf->vdsize = vdsize;
2407 memset(vd, 0, vdsize);
2408 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2409 vd->populated_vdes = cpu_to_be16(0);
2410 vd->max_vdes = cpu_to_be16(max_virt_disks);
2411 memset(vd->pad, 0xff, 52);
2412
2413 for (i=0; i<max_virt_disks; i++)
2414 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2415
2416 st->sb = ddf;
2417 ddf_set_updates_pending(ddf, NULL);
2418 return 1;
2419 }
2420
2421 static int chunk_to_shift(int chunksize)
2422 {
2423 return ffs(chunksize/512)-1;
2424 }
2425
2426 struct extent {
2427 unsigned long long start, size;
2428 };
2429 static int cmp_extent(const void *av, const void *bv)
2430 {
2431 const struct extent *a = av;
2432 const struct extent *b = bv;
2433 if (a->start < b->start)
2434 return -1;
2435 if (a->start > b->start)
2436 return 1;
2437 return 0;
2438 }
2439
2440 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2441 {
2442 /* Find a list of used extents on the given physical device
2443 * (dnum) of the given ddf.
2444 * Return a malloced array of 'struct extent'
2445 */
2446 struct extent *rv;
2447 int n = 0;
2448 unsigned int i;
2449 __u16 state;
2450
2451 if (dl->pdnum < 0)
2452 return NULL;
2453 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2454
2455 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2456 return NULL;
2457
2458 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2459
2460 for (i = 0; i < ddf->max_part; i++) {
2461 const struct vd_config *bvd;
2462 unsigned int ibvd;
2463 struct vcl *v = dl->vlist[i];
2464 if (v == NULL ||
2465 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2466 &bvd, &ibvd) == DDF_NOTFOUND)
2467 continue;
2468 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2469 rv[n].size = be64_to_cpu(bvd->blocks);
2470 n++;
2471 }
2472 qsort(rv, n, sizeof(*rv), cmp_extent);
2473
2474 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2475 rv[n].size = 0;
2476 return rv;
2477 }
2478
2479 static unsigned long long find_space(
2480 struct ddf_super *ddf, struct dl *dl,
2481 unsigned long long data_offset,
2482 unsigned long long *size)
2483 {
2484 /* Find if the requested amount of space is available.
2485 * If it is, return start.
2486 * If not, set *size to largest space.
2487 * If data_offset != INVALID_SECTORS, then the space must start
2488 * at this location.
2489 */
2490 struct extent *e = get_extents(ddf, dl);
2491 int i = 0;
2492 unsigned long long pos = 0;
2493 unsigned long long max_size = 0;
2494
2495 if (!e) {
2496 *size = 0;
2497 return INVALID_SECTORS;
2498 }
2499 do {
2500 unsigned long long esize = e[i].start - pos;
2501 if (data_offset != INVALID_SECTORS &&
2502 pos <= data_offset &&
2503 e[i].start > data_offset) {
2504 pos = data_offset;
2505 esize = e[i].start - pos;
2506 }
2507 if (data_offset != INVALID_SECTORS &&
2508 pos != data_offset) {
2509 i++;
2510 continue;
2511 }
2512 if (esize >= *size) {
2513 /* Found! */
2514 free(e);
2515 return pos;
2516 }
2517 if (esize > max_size)
2518 max_size = esize;
2519 pos = e[i].start + e[i].size;
2520 i++;
2521 } while (e[i-1].size);
2522 *size = max_size;
2523 free(e);
2524 return INVALID_SECTORS;
2525 }
2526
2527 static int init_super_ddf_bvd(struct supertype *st,
2528 mdu_array_info_t *info,
2529 unsigned long long size,
2530 char *name, char *homehost,
2531 int *uuid, unsigned long long data_offset)
2532 {
2533 /* We are creating a BVD inside a pre-existing container.
2534 * so st->sb is already set.
2535 * We need to create a new vd_config and a new virtual_entry
2536 */
2537 struct ddf_super *ddf = st->sb;
2538 unsigned int venum, i;
2539 struct virtual_entry *ve;
2540 struct vcl *vcl;
2541 struct vd_config *vc;
2542
2543 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2544 pr_err("This ddf already has an array called %s\n", name);
2545 return 0;
2546 }
2547 venum = find_unused_vde(ddf);
2548 if (venum == DDF_NOTFOUND) {
2549 pr_err("Cannot find spare slot for virtual disk\n");
2550 return 0;
2551 }
2552 ve = &ddf->virt->entries[venum];
2553
2554 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2555 * timestamp, random number
2556 */
2557 make_header_guid(ve->guid);
2558 ve->unit = cpu_to_be16(info->md_minor);
2559 ve->pad0 = 0xFFFF;
2560 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2561 DDF_GUID_LEN);
2562 ve->type = cpu_to_be16(0);
2563 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2564 if (info->state & 1) /* clean */
2565 ve->init_state = DDF_init_full;
2566 else
2567 ve->init_state = DDF_init_not;
2568
2569 memset(ve->pad1, 0xff, 14);
2570 memset(ve->name, '\0', sizeof(ve->name));
2571 if (name) {
2572 int l = strnlen(name, sizeof(ve->name));
2573 memcpy(ve->name, name, l);
2574 }
2575 ddf->virt->populated_vdes =
2576 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2577
2578 /* Now create a new vd_config */
2579 if (posix_memalign((void**)&vcl, 512,
2580 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2581 pr_err("could not allocate vd_config\n");
2582 return 0;
2583 }
2584 vcl->vcnum = venum;
2585 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2586 vc = &vcl->conf;
2587
2588 vc->magic = DDF_VD_CONF_MAGIC;
2589 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2590 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2591 vc->seqnum = cpu_to_be32(1);
2592 memset(vc->pad0, 0xff, 24);
2593 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2594 if (layout_md2ddf(info, vc) == -1 ||
2595 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2596 pr_err("unsupported RAID level/layout %d/%d with %d disks\n",
2597 info->level, info->layout, info->raid_disks);
2598 free(vcl);
2599 return 0;
2600 }
2601 vc->sec_elmnt_seq = 0;
2602 if (alloc_other_bvds(ddf, vcl) != 0) {
2603 pr_err("could not allocate other bvds\n");
2604 free(vcl);
2605 return 0;
2606 }
2607 vc->blocks = cpu_to_be64(size * 2);
2608 vc->array_blocks = cpu_to_be64(
2609 calc_array_size(info->level, info->raid_disks, info->layout,
2610 info->chunk_size, size * 2));
2611 memset(vc->pad1, 0xff, 8);
2612 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2613 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2614 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2615 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2616 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2617 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2618 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2619 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2620 memset(vc->cache_pol, 0, 8);
2621 vc->bg_rate = 0x80;
2622 memset(vc->pad2, 0xff, 3);
2623 memset(vc->pad3, 0xff, 52);
2624 memset(vc->pad4, 0xff, 192);
2625 memset(vc->v0, 0xff, 32);
2626 memset(vc->v1, 0xff, 32);
2627 memset(vc->v2, 0xff, 16);
2628 memset(vc->v3, 0xff, 16);
2629 memset(vc->vendor, 0xff, 32);
2630
2631 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2632 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2633
2634 for (i = 1; i < vc->sec_elmnt_count; i++) {
2635 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2636 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2637 }
2638
2639 vcl->next = ddf->conflist;
2640 ddf->conflist = vcl;
2641 ddf->currentconf = vcl;
2642 ddf_set_updates_pending(ddf, NULL);
2643 return 1;
2644 }
2645
2646 static void add_to_super_ddf_bvd(struct supertype *st,
2647 mdu_disk_info_t *dk, int fd, char *devname,
2648 unsigned long long data_offset)
2649 {
2650 /* fd and devname identify a device within the ddf container (st).
2651 * dk identifies a location in the new BVD.
2652 * We need to find suitable free space in that device and update
2653 * the phys_refnum and lba_offset for the newly created vd_config.
2654 * We might also want to update the type in the phys_disk
2655 * section.
2656 *
2657 * Alternately: fd == -1 and we have already chosen which device to
2658 * use and recorded in dlist->raid_disk;
2659 */
2660 struct dl *dl;
2661 struct ddf_super *ddf = st->sb;
2662 struct vd_config *vc;
2663 unsigned int i;
2664 unsigned long long blocks, pos;
2665 unsigned int raid_disk = dk->raid_disk;
2666
2667 if (fd == -1) {
2668 for (dl = ddf->dlist; dl ; dl = dl->next)
2669 if (dl->raiddisk == dk->raid_disk)
2670 break;
2671 } else {
2672 for (dl = ddf->dlist; dl ; dl = dl->next)
2673 if (dl->major == dk->major &&
2674 dl->minor == dk->minor)
2675 break;
2676 }
2677 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2678 return;
2679
2680 vc = &ddf->currentconf->conf;
2681 if (vc->sec_elmnt_count > 1) {
2682 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2683 if (raid_disk >= n)
2684 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2685 raid_disk %= n;
2686 }
2687
2688 blocks = be64_to_cpu(vc->blocks);
2689 if (ddf->currentconf->block_sizes)
2690 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2691
2692 pos = find_space(ddf, dl, data_offset, &blocks);
2693 if (pos == INVALID_SECTORS)
2694 return;
2695
2696 ddf->currentdev = dk->raid_disk;
2697 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2698 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2699
2700 for (i = 0; i < ddf->max_part ; i++)
2701 if (dl->vlist[i] == NULL)
2702 break;
2703 if (i == ddf->max_part)
2704 return;
2705 dl->vlist[i] = ddf->currentconf;
2706
2707 if (fd >= 0)
2708 dl->fd = fd;
2709 if (devname)
2710 dl->devname = devname;
2711
2712 /* Check if we can mark array as optimal yet */
2713 i = ddf->currentconf->vcnum;
2714 ddf->virt->entries[i].state =
2715 (ddf->virt->entries[i].state & ~DDF_state_mask)
2716 | get_svd_state(ddf, ddf->currentconf);
2717 be16_clear(ddf->phys->entries[dl->pdnum].type,
2718 cpu_to_be16(DDF_Global_Spare));
2719 be16_set(ddf->phys->entries[dl->pdnum].type,
2720 cpu_to_be16(DDF_Active_in_VD));
2721 dprintf("added disk %d/%08x to VD %d/%s as disk %d\n",
2722 dl->pdnum, be32_to_cpu(dl->disk.refnum),
2723 ddf->currentconf->vcnum, guid_str(vc->guid),
2724 dk->raid_disk);
2725 ddf_set_updates_pending(ddf, vc);
2726 }
2727
2728 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2729 {
2730 unsigned int i;
2731 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2732 if (all_ff(ddf->phys->entries[i].guid))
2733 return i;
2734 }
2735 return DDF_NOTFOUND;
2736 }
2737
2738 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2739 {
2740 __u64 cfs, t;
2741 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2742 t = be64_to_cpu(dl->secondary_lba);
2743 if (t != ~(__u64)0)
2744 cfs = min(cfs, t);
2745 /*
2746 * Some vendor DDF structures interpret workspace_lba
2747 * very differently than we do: Make a sanity check on the value.
2748 */
2749 t = be64_to_cpu(dl->workspace_lba);
2750 if (t < cfs) {
2751 __u64 wsp = cfs - t;
2752 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2753 pr_err("%x:%x: workspace size 0x%llx too big, ignoring\n",
2754 dl->major, dl->minor, (unsigned long long)wsp);
2755 } else
2756 cfs = t;
2757 }
2758 pde->config_size = cpu_to_be64(cfs);
2759 dprintf("%x:%x config_size %llx, DDF structure is %llx blocks\n",
2760 dl->major, dl->minor,
2761 (unsigned long long)cfs, (unsigned long long)(dl->size-cfs));
2762 }
2763
2764 /* Add a device to a container, either while creating it or while
2765 * expanding a pre-existing container
2766 */
2767 static int add_to_super_ddf(struct supertype *st,
2768 mdu_disk_info_t *dk, int fd, char *devname,
2769 unsigned long long data_offset)
2770 {
2771 struct ddf_super *ddf = st->sb;
2772 struct dl *dd;
2773 time_t now;
2774 struct tm *tm;
2775 unsigned long long size;
2776 struct phys_disk_entry *pde;
2777 unsigned int n, i;
2778 struct stat stb;
2779 __u32 *tptr;
2780
2781 if (ddf->currentconf) {
2782 add_to_super_ddf_bvd(st, dk, fd, devname, data_offset);
2783 return 0;
2784 }
2785
2786 /* This is device numbered dk->number. We need to create
2787 * a phys_disk entry and a more detailed disk_data entry.
2788 */
2789 fstat(fd, &stb);
2790 n = find_unused_pde(ddf);
2791 if (n == DDF_NOTFOUND) {
2792 pr_err("No free slot in array, cannot add disk\n");
2793 return 1;
2794 }
2795 pde = &ddf->phys->entries[n];
2796 get_dev_size(fd, NULL, &size);
2797 if (size <= 32*1024*1024) {
2798 pr_err("device size must be at least 32MB\n");
2799 return 1;
2800 }
2801 size >>= 9;
2802
2803 if (posix_memalign((void**)&dd, 512,
2804 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2805 pr_err("could allocate buffer for new disk, aborting\n");
2806 return 1;
2807 }
2808 dd->major = major(stb.st_rdev);
2809 dd->minor = minor(stb.st_rdev);
2810 dd->devname = devname;
2811 dd->fd = fd;
2812 dd->spare = NULL;
2813
2814 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2815 now = time(0);
2816 tm = localtime(&now);
2817 sprintf(dd->disk.guid, "%8s%04d%02d%02d", T10,
2818 (__u16)tm->tm_year+1900,
2819 (__u8)tm->tm_mon+1, (__u8)tm->tm_mday);
2820 tptr = (__u32 *)(dd->disk.guid + 16);
2821 *tptr++ = random32();
2822 *tptr = random32();
2823
2824 do {
2825 /* Cannot be bothered finding a CRC of some irrelevant details*/
2826 dd->disk.refnum._v32 = random32();
2827 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2828 i > 0; i--)
2829 if (be32_eq(ddf->phys->entries[i-1].refnum,
2830 dd->disk.refnum))
2831 break;
2832 } while (i > 0);
2833
2834 dd->disk.forced_ref = 1;
2835 dd->disk.forced_guid = 1;
2836 memset(dd->disk.vendor, ' ', 32);
2837 memcpy(dd->disk.vendor, "Linux", 5);
2838 memset(dd->disk.pad, 0xff, 442);
2839 for (i = 0; i < ddf->max_part ; i++)
2840 dd->vlist[i] = NULL;
2841
2842 dd->pdnum = n;
2843
2844 if (st->update_tail) {
2845 int len = (sizeof(struct phys_disk) +
2846 sizeof(struct phys_disk_entry));
2847 struct phys_disk *pd;
2848
2849 pd = xmalloc(len);
2850 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2851 pd->used_pdes = cpu_to_be16(n);
2852 pde = &pd->entries[0];
2853 dd->mdupdate = pd;
2854 } else
2855 ddf->phys->used_pdes = cpu_to_be16(
2856 1 + be16_to_cpu(ddf->phys->used_pdes));
2857
2858 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2859 pde->refnum = dd->disk.refnum;
2860 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2861 pde->state = cpu_to_be16(DDF_Online);
2862 dd->size = size;
2863 /*
2864 * If there is already a device in dlist, try to reserve the same
2865 * amount of workspace. Otherwise, use 32MB.
2866 * We checked disk size above already.
2867 */
2868 #define __calc_lba(new, old, lba, mb) do { \
2869 unsigned long long dif; \
2870 if ((old) != NULL) \
2871 dif = (old)->size - be64_to_cpu((old)->lba); \
2872 else \
2873 dif = (new)->size; \
2874 if ((new)->size > dif) \
2875 (new)->lba = cpu_to_be64((new)->size - dif); \
2876 else \
2877 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2878 } while (0)
2879 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2880 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2881 if (ddf->dlist == NULL ||
2882 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2883 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2884 _set_config_size(pde, dd);
2885
2886 sprintf(pde->path, "%17.17s","Information: nil") ;
2887 memset(pde->pad, 0xff, 6);
2888
2889 if (st->update_tail) {
2890 dd->next = ddf->add_list;
2891 ddf->add_list = dd;
2892 } else {
2893 dd->next = ddf->dlist;
2894 ddf->dlist = dd;
2895 ddf_set_updates_pending(ddf, NULL);
2896 }
2897
2898 return 0;
2899 }
2900
2901 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2902 {
2903 struct ddf_super *ddf = st->sb;
2904 struct dl *dl;
2905
2906 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2907 * disappeared from the container.
2908 * We need to arrange that it disappears from the metadata and
2909 * internal data structures too.
2910 * Most of the work is done by ddf_process_update which edits
2911 * the metadata and closes the file handle and attaches the memory
2912 * where free_updates will free it.
2913 */
2914 for (dl = ddf->dlist; dl ; dl = dl->next)
2915 if (dl->major == dk->major &&
2916 dl->minor == dk->minor)
2917 break;
2918 if (!dl || dl->pdnum < 0)
2919 return -1;
2920
2921 if (st->update_tail) {
2922 int len = (sizeof(struct phys_disk) +
2923 sizeof(struct phys_disk_entry));
2924 struct phys_disk *pd;
2925
2926 pd = xmalloc(len);
2927 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2928 pd->used_pdes = cpu_to_be16(dl->pdnum);
2929 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2930 append_metadata_update(st, pd, len);
2931 }
2932 return 0;
2933 }
2934
2935 /*
2936 * This is the write_init_super method for a ddf container. It is
2937 * called when creating a container or adding another device to a
2938 * container.
2939 */
2940
2941 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2942 {
2943 unsigned long long sector;
2944 struct ddf_header *header;
2945 int fd, i, n_config, conf_size, buf_size;
2946 int ret = 0;
2947 char *conf;
2948
2949 fd = d->fd;
2950
2951 switch (type) {
2952 case DDF_HEADER_PRIMARY:
2953 header = &ddf->primary;
2954 sector = be64_to_cpu(header->primary_lba);
2955 break;
2956 case DDF_HEADER_SECONDARY:
2957 header = &ddf->secondary;
2958 sector = be64_to_cpu(header->secondary_lba);
2959 break;
2960 default:
2961 return 0;
2962 }
2963 if (sector == ~(__u64)0)
2964 return 0;
2965
2966 header->type = type;
2967 header->openflag = 1;
2968 header->crc = calc_crc(header, 512);
2969
2970 lseek64(fd, sector<<9, 0);
2971 if (write(fd, header, 512) < 0)
2972 goto out;
2973
2974 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2975 if (write(fd, &ddf->controller, 512) < 0)
2976 goto out;
2977
2978 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2979 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2980 goto out;
2981 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2982 if (write(fd, ddf->virt, ddf->vdsize) < 0)
2983 goto out;
2984
2985 /* Now write lots of config records. */
2986 n_config = ddf->max_part;
2987 conf_size = ddf->conf_rec_len * 512;
2988 conf = ddf->conf;
2989 buf_size = conf_size * (n_config + 1);
2990 if (!conf) {
2991 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
2992 goto out;
2993 ddf->conf = conf;
2994 }
2995 for (i = 0 ; i <= n_config ; i++) {
2996 struct vcl *c;
2997 struct vd_config *vdc = NULL;
2998 if (i == n_config) {
2999 c = (struct vcl *)d->spare;
3000 if (c)
3001 vdc = &c->conf;
3002 } else {
3003 unsigned int dummy;
3004 c = d->vlist[i];
3005 if (c)
3006 get_pd_index_from_refnum(
3007 c, d->disk.refnum,
3008 ddf->mppe,
3009 (const struct vd_config **)&vdc,
3010 &dummy);
3011 }
3012 if (vdc) {
3013 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3014 i, be32_to_cpu(d->disk.refnum),
3015 guid_str(vdc->guid),
3016 vdc->sec_elmnt_seq);
3017 vdc->crc = calc_crc(vdc, conf_size);
3018 memcpy(conf + i*conf_size, vdc, conf_size);
3019 } else
3020 memset(conf + i*conf_size, 0xff, conf_size);
3021 }
3022 if (write(fd, conf, buf_size) != buf_size)
3023 goto out;
3024
3025 d->disk.crc = calc_crc(&d->disk, 512);
3026 if (write(fd, &d->disk, 512) < 0)
3027 goto out;
3028
3029 ret = 1;
3030 out:
3031 header->openflag = 0;
3032 header->crc = calc_crc(header, 512);
3033
3034 lseek64(fd, sector<<9, 0);
3035 if (write(fd, header, 512) < 0)
3036 ret = 0;
3037
3038 return ret;
3039 }
3040
3041 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3042 {
3043 unsigned long long size;
3044 int fd = d->fd;
3045 if (fd < 0)
3046 return 0;
3047
3048 /* We need to fill in the primary, (secondary) and workspace
3049 * lba's in the headers, set their checksums,
3050 * Also checksum phys, virt....
3051 *
3052 * Then write everything out, finally the anchor is written.
3053 */
3054 get_dev_size(fd, NULL, &size);
3055 size /= 512;
3056 memcpy(&ddf->anchor, ddf->active, 512);
3057 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3058 ddf->anchor.workspace_lba = d->workspace_lba;
3059 else
3060 ddf->anchor.workspace_lba =
3061 cpu_to_be64(size - 32*1024*2);
3062 if (be64_to_cpu(d->primary_lba) != 0ULL)
3063 ddf->anchor.primary_lba = d->primary_lba;
3064 else
3065 ddf->anchor.primary_lba =
3066 cpu_to_be64(size - 16*1024*2);
3067 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3068 ddf->anchor.secondary_lba = d->secondary_lba;
3069 else
3070 ddf->anchor.secondary_lba =
3071 cpu_to_be64(size - 32*1024*2);
3072 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
3073 memcpy(&ddf->primary, &ddf->anchor, 512);
3074 memcpy(&ddf->secondary, &ddf->anchor, 512);
3075
3076 ddf->anchor.type = DDF_HEADER_ANCHOR;
3077 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3078 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3079 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3080
3081 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3082 return 0;
3083
3084 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3085 return 0;
3086
3087 lseek64(fd, (size-1)*512, SEEK_SET);
3088 if (write(fd, &ddf->anchor, 512) < 0)
3089 return 0;
3090
3091 return 1;
3092 }
3093
3094 static int __write_init_super_ddf(struct supertype *st)
3095 {
3096 struct ddf_super *ddf = st->sb;
3097 struct dl *d;
3098 int attempts = 0;
3099 int successes = 0;
3100
3101 pr_state(ddf, __func__);
3102
3103 /* try to write updated metadata,
3104 * if we catch a failure move on to the next disk
3105 */
3106 for (d = ddf->dlist; d; d=d->next) {
3107 attempts++;
3108 successes += _write_super_to_disk(ddf, d);
3109 }
3110
3111 return attempts != successes;
3112 }
3113
3114 static int write_init_super_ddf(struct supertype *st)
3115 {
3116 struct ddf_super *ddf = st->sb;
3117 struct vcl *currentconf = ddf->currentconf;
3118
3119 /* We are done with currentconf - reset it so st refers to the container */
3120 ddf->currentconf = NULL;
3121
3122 if (st->update_tail) {
3123 /* queue the virtual_disk and vd_config as metadata updates */
3124 struct virtual_disk *vd;
3125 struct vd_config *vc;
3126 int len, tlen;
3127 unsigned int i;
3128
3129 if (!currentconf) {
3130 /* Must be adding a physical disk to the container */
3131 int len = (sizeof(struct phys_disk) +
3132 sizeof(struct phys_disk_entry));
3133
3134 /* adding a disk to the container. */
3135 if (!ddf->add_list)
3136 return 0;
3137
3138 append_metadata_update(st, ddf->add_list->mdupdate, len);
3139 ddf->add_list->mdupdate = NULL;
3140 return 0;
3141 }
3142
3143 /* Newly created VD */
3144
3145 /* First the virtual disk. We have a slightly fake header */
3146 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3147 vd = xmalloc(len);
3148 *vd = *ddf->virt;
3149 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3150 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3151 append_metadata_update(st, vd, len);
3152
3153 /* Then the vd_config */
3154 len = ddf->conf_rec_len * 512;
3155 tlen = len * currentconf->conf.sec_elmnt_count;
3156 vc = xmalloc(tlen);
3157 memcpy(vc, &currentconf->conf, len);
3158 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3159 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3160 len);
3161 append_metadata_update(st, vc, tlen);
3162
3163 return 0;
3164 } else {
3165 struct dl *d;
3166 if (!currentconf)
3167 for (d = ddf->dlist; d; d=d->next)
3168 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3169 /* Note: we don't close the fd's now, but a subsequent
3170 * ->free_super() will
3171 */
3172 return __write_init_super_ddf(st);
3173 }
3174 }
3175
3176 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3177 unsigned long long data_offset)
3178 {
3179 /* We must reserve the last 32Meg */
3180 if (devsize <= 32*1024*2)
3181 return 0;
3182 return devsize - 32*1024*2;
3183 }
3184
3185 static int reserve_space(struct supertype *st, int raiddisks,
3186 unsigned long long size, int chunk,
3187 unsigned long long data_offset,
3188 unsigned long long *freesize)
3189 {
3190 /* Find 'raiddisks' spare extents at least 'size' big (but
3191 * only caring about multiples of 'chunk') and remember
3192 * them. If size==0, find the largest size possible.
3193 * Report available size in *freesize
3194 * If space cannot be found, fail.
3195 */
3196 struct dl *dl;
3197 struct ddf_super *ddf = st->sb;
3198 int cnt = 0;
3199
3200 for (dl = ddf->dlist; dl ; dl=dl->next) {
3201 dl->raiddisk = -1;
3202 dl->esize = 0;
3203 }
3204 /* Now find largest extent on each device */
3205 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3206 unsigned long long minsize = ULLONG_MAX;
3207
3208 find_space(ddf, dl, data_offset, &minsize);
3209 if (minsize >= size && minsize >= (unsigned)chunk) {
3210 cnt++;
3211 dl->esize = minsize;
3212 }
3213 }
3214 if (cnt < raiddisks) {
3215 pr_err("not enough devices with space to create array.\n");
3216 return 0; /* No enough free spaces large enough */
3217 }
3218 if (size == 0) {
3219 /* choose the largest size of which there are at least 'raiddisk' */
3220 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3221 struct dl *dl2;
3222 if (dl->esize <= size)
3223 continue;
3224 /* This is bigger than 'size', see if there are enough */
3225 cnt = 0;
3226 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3227 if (dl2->esize >= dl->esize)
3228 cnt++;
3229 if (cnt >= raiddisks)
3230 size = dl->esize;
3231 }
3232 if (chunk) {
3233 size = size / chunk;
3234 size *= chunk;
3235 }
3236 *freesize = size;
3237 if (size < 32) {
3238 pr_err("not enough spare devices to create array.\n");
3239 return 0;
3240 }
3241 }
3242 /* We have a 'size' of which there are enough spaces.
3243 * We simply do a first-fit */
3244 cnt = 0;
3245 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3246 if (dl->esize < size)
3247 continue;
3248
3249 dl->raiddisk = cnt;
3250 cnt++;
3251 }
3252 return 1;
3253 }
3254
3255 static int
3256 validate_geometry_ddf_container(struct supertype *st,
3257 int level, int raiddisks,
3258 unsigned long long data_offset,
3259 char *dev, unsigned long long *freesize,
3260 int verbose)
3261 {
3262 int fd;
3263 unsigned long long ldsize;
3264
3265 if (!is_container(level))
3266 return 0;
3267 if (!dev)
3268 return 1;
3269
3270 fd = dev_open(dev, O_RDONLY|O_EXCL);
3271 if (fd < 0) {
3272 if (verbose)
3273 pr_err("ddf: Cannot open %s: %s\n",
3274 dev, strerror(errno));
3275 return 0;
3276 }
3277 if (!get_dev_size(fd, dev, &ldsize)) {
3278 close(fd);
3279 return 0;
3280 }
3281 close(fd);
3282 if (freesize) {
3283 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3284 if (*freesize == 0)
3285 return 0;
3286 }
3287
3288 return 1;
3289 }
3290
3291 static int validate_geometry_ddf(struct supertype *st,
3292 int level, int layout, int raiddisks,
3293 int *chunk, unsigned long long size,
3294 unsigned long long data_offset,
3295 char *dev, unsigned long long *freesize,
3296 int consistency_policy, int verbose)
3297 {
3298 int fd;
3299 struct mdinfo *sra;
3300 int cfd;
3301
3302 /* ddf potentially supports lots of things, but it depends on
3303 * what devices are offered (and maybe kernel version?)
3304 * If given unused devices, we will make a container.
3305 * If given devices in a container, we will make a BVD.
3306 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3307 */
3308
3309 if (level == LEVEL_NONE)
3310 level = LEVEL_CONTAINER;
3311 if (is_container(level)) {
3312 /* Must be a fresh device to add to a container */
3313 return validate_geometry_ddf_container(st, level, raiddisks,
3314 data_offset, dev,
3315 freesize, verbose);
3316 }
3317
3318 if (*chunk == UnSet)
3319 *chunk = DEFAULT_CHUNK;
3320
3321 if (!dev) {
3322 mdu_array_info_t array = {
3323 .level = level,
3324 .layout = layout,
3325 .raid_disks = raiddisks
3326 };
3327 struct vd_config conf;
3328 if (layout_md2ddf(&array, &conf) == -1) {
3329 if (verbose)
3330 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3331 level, layout, raiddisks);
3332 return 0;
3333 }
3334 /* Should check layout? etc */
3335
3336 if (st->sb && freesize) {
3337 /* --create was given a container to create in.
3338 * So we need to check that there are enough
3339 * free spaces and return the amount of space.
3340 * We may as well remember which drives were
3341 * chosen so that add_to_super/getinfo_super
3342 * can return them.
3343 */
3344 return reserve_space(st, raiddisks, size, *chunk,
3345 data_offset, freesize);
3346 }
3347 return 1;
3348 }
3349
3350 if (st->sb) {
3351 /* A container has already been opened, so we are
3352 * creating in there. Maybe a BVD, maybe an SVD.
3353 * Should make a distinction one day.
3354 */
3355 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3356 chunk, size, data_offset, dev,
3357 freesize,
3358 verbose);
3359 }
3360 /* This is the first device for the array.
3361 * If it is a container, we read it in and do automagic allocations,
3362 * no other devices should be given.
3363 * Otherwise it must be a member device of a container, and we
3364 * do manual allocation.
3365 * Later we should check for a BVD and make an SVD.
3366 */
3367 fd = open(dev, O_RDONLY|O_EXCL, 0);
3368 if (fd >= 0) {
3369 close(fd);
3370 /* Just a bare device, no good to us */
3371 if (verbose)
3372 pr_err("ddf: Cannot create this array on device %s - a container is required.\n",
3373 dev);
3374 return 0;
3375 }
3376 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3377 if (verbose)
3378 pr_err("ddf: Cannot open %s: %s\n",
3379 dev, strerror(errno));
3380 return 0;
3381 }
3382 /* Well, it is in use by someone, maybe a 'ddf' container. */
3383 cfd = open_container(fd);
3384 if (cfd < 0) {
3385 close(fd);
3386 if (verbose)
3387 pr_err("ddf: Cannot use %s: %s\n",
3388 dev, strerror(EBUSY));
3389 return 0;
3390 }
3391 sra = sysfs_read(cfd, NULL, GET_VERSION);
3392 close(fd);
3393 if (sra && sra->array.major_version == -1 &&
3394 strcmp(sra->text_version, "ddf") == 0) {
3395 /* This is a member of a ddf container. Load the container
3396 * and try to create a bvd
3397 */
3398 struct ddf_super *ddf;
3399 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3400 st->sb = ddf;
3401 strcpy(st->container_devnm, fd2devnm(cfd));
3402 close(cfd);
3403 return validate_geometry_ddf_bvd(st, level, layout,
3404 raiddisks, chunk, size,
3405 data_offset,
3406 dev, freesize,
3407 verbose);
3408 }
3409 close(cfd);
3410 } else /* device may belong to a different container */
3411 return 0;
3412
3413 return 1;
3414 }
3415
3416 static int validate_geometry_ddf_bvd(struct supertype *st,
3417 int level, int layout, int raiddisks,
3418 int *chunk, unsigned long long size,
3419 unsigned long long data_offset,
3420 char *dev, unsigned long long *freesize,
3421 int verbose)
3422 {
3423 dev_t rdev;
3424 struct ddf_super *ddf = st->sb;
3425 struct dl *dl;
3426 unsigned long long maxsize;
3427 /* ddf/bvd supports lots of things, but not containers */
3428 if (is_container(level)) {
3429 if (verbose)
3430 pr_err("DDF cannot create a container within an container\n");
3431 return 0;
3432 }
3433 /* We must have the container info already read in. */
3434 if (!ddf)
3435 return 0;
3436
3437 if (!dev) {
3438 /* General test: make sure there is space for
3439 * 'raiddisks' device extents of size 'size'.
3440 */
3441 unsigned long long minsize = size;
3442 int dcnt = 0;
3443 if (minsize == 0)
3444 minsize = 8;
3445 for (dl = ddf->dlist; dl ; dl = dl->next) {
3446 if (find_space(ddf, dl, data_offset, &minsize) !=
3447 INVALID_SECTORS)
3448 dcnt++;
3449 }
3450 if (dcnt < raiddisks) {
3451 if (verbose)
3452 pr_err("ddf: Not enough devices with space for this array (%d < %d)\n",
3453 dcnt, raiddisks);
3454 return 0;
3455 }
3456 return 1;
3457 }
3458 /* This device must be a member of the set */
3459 if (!stat_is_blkdev(dev, &rdev))
3460 return 0;
3461 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3462 if (dl->major == (int)major(rdev) &&
3463 dl->minor == (int)minor(rdev))
3464 break;
3465 }
3466 if (!dl) {
3467 if (verbose)
3468 pr_err("ddf: %s is not in the same DDF set\n",
3469 dev);
3470 return 0;
3471 }
3472 maxsize = ULLONG_MAX;
3473 find_space(ddf, dl, data_offset, &maxsize);
3474 *freesize = maxsize;
3475
3476 return 1;
3477 }
3478
3479 static int load_super_ddf_all(struct supertype *st, int fd,
3480 void **sbp, char *devname)
3481 {
3482 struct mdinfo *sra;
3483 struct ddf_super *super;
3484 struct mdinfo *sd, *best = NULL;
3485 int bestseq = 0;
3486 int seq;
3487 char nm[20];
3488 int dfd;
3489
3490 sra = sysfs_read(fd, NULL, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3491 if (!sra)
3492 return 1;
3493 if (sra->array.major_version != -1 ||
3494 sra->array.minor_version != -2 ||
3495 strcmp(sra->text_version, "ddf") != 0)
3496 return 1;
3497
3498 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3499 return 1;
3500 memset(super, 0, sizeof(*super));
3501
3502 /* first, try each device, and choose the best ddf */
3503 for (sd = sra->devs ; sd ; sd = sd->next) {
3504 int rv;
3505 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3506 dfd = dev_open(nm, O_RDONLY);
3507 if (dfd < 0)
3508 return 2;
3509 rv = load_ddf_headers(dfd, super, NULL);
3510 close(dfd);
3511 if (rv == 0) {
3512 seq = be32_to_cpu(super->active->seq);
3513 if (super->active->openflag)
3514 seq--;
3515 if (!best || seq > bestseq) {
3516 bestseq = seq;
3517 best = sd;
3518 }
3519 }
3520 }
3521 if (!best)
3522 return 1;
3523 /* OK, load this ddf */
3524 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3525 dfd = dev_open(nm, O_RDONLY);
3526 if (dfd < 0)
3527 return 1;
3528 load_ddf_headers(dfd, super, NULL);
3529 load_ddf_global(dfd, super, NULL);
3530 close(dfd);
3531 /* Now we need the device-local bits */
3532 for (sd = sra->devs ; sd ; sd = sd->next) {
3533 int rv;
3534
3535 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3536 dfd = dev_open(nm, O_RDWR);
3537 if (dfd < 0)
3538 return 2;
3539 rv = load_ddf_headers(dfd, super, NULL);
3540 if (rv == 0)
3541 rv = load_ddf_local(dfd, super, NULL, 1);
3542 if (rv)
3543 return 1;
3544 }
3545
3546 *sbp = super;
3547 if (st->ss == NULL) {
3548 st->ss = &super_ddf;
3549 st->minor_version = 0;
3550 st->max_devs = 512;
3551 }
3552 strcpy(st->container_devnm, fd2devnm(fd));
3553 return 0;
3554 }
3555
3556 static int load_container_ddf(struct supertype *st, int fd,
3557 char *devname)
3558 {
3559 return load_super_ddf_all(st, fd, &st->sb, devname);
3560 }
3561
3562 static int check_secondary(const struct vcl *vc)
3563 {
3564 const struct vd_config *conf = &vc->conf;
3565 int i;
3566
3567 /* The only DDF secondary RAID level md can support is
3568 * RAID 10, if the stripe sizes and Basic volume sizes
3569 * are all equal.
3570 * Other configurations could in theory be supported by exposing
3571 * the BVDs to user space and using device mapper for the secondary
3572 * mapping. So far we don't support that.
3573 */
3574
3575 __u64 sec_elements[4] = {0, 0, 0, 0};
3576 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3577 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3578
3579 if (vc->other_bvds == NULL) {
3580 pr_err("No BVDs for secondary RAID found\n");
3581 return -1;
3582 }
3583 if (conf->prl != DDF_RAID1) {
3584 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3585 return -1;
3586 }
3587 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3588 pr_err("Secondary RAID level %d is unsupported\n",
3589 conf->srl);
3590 return -1;
3591 }
3592 __set_sec_seen(conf->sec_elmnt_seq);
3593 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3594 const struct vd_config *bvd = vc->other_bvds[i];
3595 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3596 continue;
3597 if (bvd->srl != conf->srl) {
3598 pr_err("Inconsistent secondary RAID level across BVDs\n");
3599 return -1;
3600 }
3601 if (bvd->prl != conf->prl) {
3602 pr_err("Different RAID levels for BVDs are unsupported\n");
3603 return -1;
3604 }
3605 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3606 pr_err("All BVDs must have the same number of primary elements\n");
3607 return -1;
3608 }
3609 if (bvd->chunk_shift != conf->chunk_shift) {
3610 pr_err("Different strip sizes for BVDs are unsupported\n");
3611 return -1;
3612 }
3613 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3614 pr_err("Different BVD sizes are unsupported\n");
3615 return -1;
3616 }
3617 __set_sec_seen(bvd->sec_elmnt_seq);
3618 }
3619 for (i = 0; i < conf->sec_elmnt_count; i++) {
3620 if (!__was_sec_seen(i)) {
3621 /* pr_err("BVD %d is missing\n", i); */
3622 return -1;
3623 }
3624 }
3625 return 0;
3626 }
3627
3628 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3629 be32 refnum, unsigned int nmax,
3630 const struct vd_config **bvd,
3631 unsigned int *idx)
3632 {
3633 unsigned int i, j, n, sec, cnt;
3634
3635 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3636 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3637
3638 for (i = 0, j = 0 ; i < nmax ; i++) {
3639 /* j counts valid entries for this BVD */
3640 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3641 *bvd = &vc->conf;
3642 *idx = i;
3643 return sec * cnt + j;
3644 }
3645 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3646 j++;
3647 }
3648 if (vc->other_bvds == NULL)
3649 goto bad;
3650
3651 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3652 struct vd_config *vd = vc->other_bvds[n-1];
3653 sec = vd->sec_elmnt_seq;
3654 if (sec == DDF_UNUSED_BVD)
3655 continue;
3656 for (i = 0, j = 0 ; i < nmax ; i++) {
3657 if (be32_eq(vd->phys_refnum[i], refnum)) {
3658 *bvd = vd;
3659 *idx = i;
3660 return sec * cnt + j;
3661 }
3662 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3663 j++;
3664 }
3665 }
3666 bad:
3667 *bvd = NULL;
3668 return DDF_NOTFOUND;
3669 }
3670
3671 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3672 {
3673 /* Given a container loaded by load_super_ddf_all,
3674 * extract information about all the arrays into
3675 * an mdinfo tree.
3676 *
3677 * For each vcl in conflist: create an mdinfo, fill it in,
3678 * then look for matching devices (phys_refnum) in dlist
3679 * and create appropriate device mdinfo.
3680 */
3681 struct ddf_super *ddf = st->sb;
3682 struct mdinfo *rest = NULL;
3683 struct vcl *vc;
3684
3685 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3686 unsigned int i;
3687 struct mdinfo *this;
3688 char *ep;
3689 __u32 *cptr;
3690 unsigned int pd;
3691
3692 if (subarray &&
3693 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3694 *ep != '\0'))
3695 continue;
3696
3697 if (vc->conf.sec_elmnt_count > 1) {
3698 if (check_secondary(vc) != 0)
3699 continue;
3700 }
3701
3702 this = xcalloc(1, sizeof(*this));
3703 this->next = rest;
3704 rest = this;
3705
3706 if (layout_ddf2md(&vc->conf, &this->array))
3707 continue;
3708 this->array.md_minor = -1;
3709 this->array.major_version = -1;
3710 this->array.minor_version = -2;
3711 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3712 cptr = (__u32 *)(vc->conf.guid + 16);
3713 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3714 this->array.utime = DECADE +
3715 be32_to_cpu(vc->conf.timestamp);
3716 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3717
3718 i = vc->vcnum;
3719 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3720 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3721 DDF_init_full) {
3722 this->array.state = 0;
3723 this->resync_start = 0;
3724 } else {
3725 this->array.state = 1;
3726 this->resync_start = MaxSector;
3727 }
3728 _ddf_array_name(this->name, ddf, i);
3729 memset(this->uuid, 0, sizeof(this->uuid));
3730 this->component_size = be64_to_cpu(vc->conf.blocks);
3731 this->array.size = this->component_size / 2;
3732 this->container_member = i;
3733
3734 ddf->currentconf = vc;
3735 uuid_from_super_ddf(st, this->uuid);
3736 if (!subarray)
3737 ddf->currentconf = NULL;
3738
3739 sprintf(this->text_version, "/%s/%d",
3740 st->container_devnm, this->container_member);
3741
3742 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3743 struct mdinfo *dev;
3744 struct dl *d;
3745 const struct vd_config *bvd;
3746 unsigned int iphys;
3747 int stt;
3748
3749 if (be32_to_cpu(ddf->phys->entries[pd].refnum) ==
3750 0xffffffff)
3751 continue;
3752
3753 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3754 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding)) !=
3755 DDF_Online)
3756 continue;
3757
3758 i = get_pd_index_from_refnum(
3759 vc, ddf->phys->entries[pd].refnum,
3760 ddf->mppe, &bvd, &iphys);
3761 if (i == DDF_NOTFOUND)
3762 continue;
3763
3764 this->array.working_disks++;
3765
3766 for (d = ddf->dlist; d ; d=d->next)
3767 if (be32_eq(d->disk.refnum,
3768 ddf->phys->entries[pd].refnum))
3769 break;
3770 if (d == NULL)
3771 /* Haven't found that one yet, maybe there are others */
3772 continue;
3773
3774 dev = xcalloc(1, sizeof(*dev));
3775 dev->next = this->devs;
3776 this->devs = dev;
3777
3778 dev->disk.number = be32_to_cpu(d->disk.refnum);
3779 dev->disk.major = d->major;
3780 dev->disk.minor = d->minor;
3781 dev->disk.raid_disk = i;
3782 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3783 dev->recovery_start = MaxSector;
3784
3785 dev->events = be32_to_cpu(ddf->active->seq);
3786 dev->data_offset =
3787 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3788 dev->component_size = be64_to_cpu(bvd->blocks);
3789 if (d->devname)
3790 strcpy(dev->name, d->devname);
3791 }
3792 }
3793 return rest;
3794 }
3795
3796 static int store_super_ddf(struct supertype *st, int fd)
3797 {
3798 struct ddf_super *ddf = st->sb;
3799 unsigned long long dsize;
3800 void *buf;
3801 int rc;
3802
3803 if (!ddf)
3804 return 1;
3805
3806 if (!get_dev_size(fd, NULL, &dsize))
3807 return 1;
3808
3809 if (ddf->dlist || ddf->conflist) {
3810 struct stat sta;
3811 struct dl *dl;
3812 int ofd, ret;
3813
3814 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3815 pr_err("file descriptor for invalid device\n");
3816 return 1;
3817 }
3818 for (dl = ddf->dlist; dl; dl = dl->next)
3819 if (dl->major == (int)major(sta.st_rdev) &&
3820 dl->minor == (int)minor(sta.st_rdev))
3821 break;
3822 if (!dl) {
3823 pr_err("couldn't find disk %d/%d\n",
3824 (int)major(sta.st_rdev),
3825 (int)minor(sta.st_rdev));
3826 return 1;
3827 }
3828 ofd = dl->fd;
3829 dl->fd = fd;
3830 ret = (_write_super_to_disk(ddf, dl) != 1);
3831 dl->fd = ofd;
3832 return ret;
3833 }
3834
3835 if (posix_memalign(&buf, 512, 512) != 0)
3836 return 1;
3837 memset(buf, 0, 512);
3838
3839 lseek64(fd, dsize-512, 0);
3840 rc = write(fd, buf, 512);
3841 free(buf);
3842 if (rc < 0)
3843 return 1;
3844 return 0;
3845 }
3846
3847 static int compare_super_ddf(struct supertype *st, struct supertype *tst,
3848 int verbose)
3849 {
3850 /*
3851 * return:
3852 * 0 same, or first was empty, and second was copied
3853 * 1 second had wrong magic number - but that isn't possible
3854 * 2 wrong uuid
3855 * 3 wrong other info
3856 */
3857 struct ddf_super *first = st->sb;
3858 struct ddf_super *second = tst->sb;
3859 struct dl *dl1, *dl2;
3860 struct vcl *vl1, *vl2;
3861 unsigned int max_vds, max_pds, pd, vd;
3862
3863 if (!first) {
3864 st->sb = tst->sb;
3865 tst->sb = NULL;
3866 return 0;
3867 }
3868
3869 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3870 return 2;
3871
3872 /* It is only OK to compare info in the anchor. Anything else
3873 * could be changing due to a reconfig so must be ignored.
3874 * guid really should be enough anyway.
3875 */
3876
3877 if (!be32_eq(first->active->seq, second->active->seq)) {
3878 dprintf("sequence number mismatch %u<->%u\n",
3879 be32_to_cpu(first->active->seq),
3880 be32_to_cpu(second->active->seq));
3881 return 0;
3882 }
3883
3884 /*
3885 * At this point we are fairly sure that the meta data matches.
3886 * But the new disk may contain additional local data.
3887 * Add it to the super block.
3888 */
3889 max_vds = be16_to_cpu(first->active->max_vd_entries);
3890 max_pds = be16_to_cpu(first->phys->max_pdes);
3891 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3892 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3893 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3894 DDF_GUID_LEN))
3895 break;
3896 if (vl1) {
3897 if (vl1->other_bvds != NULL &&
3898 vl1->conf.sec_elmnt_seq !=
3899 vl2->conf.sec_elmnt_seq) {
3900 dprintf("adding BVD %u\n",
3901 vl2->conf.sec_elmnt_seq);
3902 add_other_bvd(vl1, &vl2->conf,
3903 first->conf_rec_len*512);
3904 }
3905 continue;
3906 }
3907
3908 if (posix_memalign((void **)&vl1, 512,
3909 (first->conf_rec_len*512 +
3910 offsetof(struct vcl, conf))) != 0) {
3911 pr_err("could not allocate vcl buf\n");
3912 return 3;
3913 }
3914
3915 vl1->next = first->conflist;
3916 vl1->block_sizes = NULL;
3917 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3918 if (alloc_other_bvds(first, vl1) != 0) {
3919 pr_err("could not allocate other bvds\n");
3920 free(vl1);
3921 return 3;
3922 }
3923 for (vd = 0; vd < max_vds; vd++)
3924 if (!memcmp(first->virt->entries[vd].guid,
3925 vl1->conf.guid, DDF_GUID_LEN))
3926 break;
3927 vl1->vcnum = vd;
3928 dprintf("added config for VD %u\n", vl1->vcnum);
3929 first->conflist = vl1;
3930 }
3931
3932 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
3933 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
3934 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
3935 break;
3936 if (dl1)
3937 continue;
3938
3939 if (posix_memalign((void **)&dl1, 512,
3940 sizeof(*dl1) + (first->max_part) *
3941 sizeof(dl1->vlist[0])) != 0) {
3942 pr_err("could not allocate disk info buffer\n");
3943 return 3;
3944 }
3945 memcpy(dl1, dl2, sizeof(*dl1));
3946 dl1->mdupdate = NULL;
3947 dl1->next = first->dlist;
3948 dl1->fd = -1;
3949 for (pd = 0; pd < max_pds; pd++)
3950 if (be32_eq(first->phys->entries[pd].refnum,
3951 dl1->disk.refnum))
3952 break;
3953 dl1->pdnum = pd < max_pds ? (int)pd : -1;
3954 if (dl2->spare) {
3955 if (posix_memalign((void **)&dl1->spare, 512,
3956 first->conf_rec_len*512) != 0) {
3957 pr_err("could not allocate spare info buf\n");
3958 return 3;
3959 }
3960 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
3961 }
3962 for (vd = 0 ; vd < first->max_part ; vd++) {
3963 if (!dl2->vlist[vd]) {
3964 dl1->vlist[vd] = NULL;
3965 continue;
3966 }
3967 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
3968 if (!memcmp(vl1->conf.guid,
3969 dl2->vlist[vd]->conf.guid,
3970 DDF_GUID_LEN))
3971 break;
3972 dl1->vlist[vd] = vl1;
3973 }
3974 }
3975 first->dlist = dl1;
3976 dprintf("added disk %d: %08x\n", dl1->pdnum,
3977 be32_to_cpu(dl1->disk.refnum));
3978 }
3979
3980 return 0;
3981 }
3982
3983 /*
3984 * A new array 'a' has been started which claims to be instance 'inst'
3985 * within container 'c'.
3986 * We need to confirm that the array matches the metadata in 'c' so
3987 * that we don't corrupt any metadata.
3988 */
3989 static int ddf_open_new(struct supertype *c, struct active_array *a, int inst)
3990 {
3991 struct ddf_super *ddf = c->sb;
3992 struct mdinfo *dev;
3993 struct dl *dl;
3994 static const char faulty[] = "faulty";
3995
3996 if (all_ff(ddf->virt->entries[inst].guid)) {
3997 pr_err("subarray %d doesn't exist\n", inst);
3998 return -ENODEV;
3999 }
4000 dprintf("new subarray %d, GUID: %s\n", inst,
4001 guid_str(ddf->virt->entries[inst].guid));
4002 for (dev = a->info.devs; dev; dev = dev->next) {
4003 for (dl = ddf->dlist; dl; dl = dl->next)
4004 if (dl->major == dev->disk.major &&
4005 dl->minor == dev->disk.minor)
4006 break;
4007 if (!dl || dl->pdnum < 0) {
4008 pr_err("device %d/%d of subarray %d not found in meta data\n",
4009 dev->disk.major, dev->disk.minor, inst);
4010 return -1;
4011 }
4012 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4013 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4014 pr_err("new subarray %d contains broken device %d/%d (%02x)\n",
4015 inst, dl->major, dl->minor,
4016 be16_to_cpu(ddf->phys->entries[dl->pdnum].state));
4017 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4018 sizeof(faulty) - 1)
4019 pr_err("Write to state_fd failed\n");
4020 dev->curr_state = DS_FAULTY;
4021 }
4022 }
4023 a->info.container_member = inst;
4024 return 0;
4025 }
4026
4027 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4028 {
4029 /* This member array is being activated. If any devices
4030 * are missing they must now be marked as failed.
4031 */
4032 struct vd_config *vc;
4033 unsigned int n_bvd;
4034 struct vcl *vcl;
4035 struct dl *dl;
4036 int pd;
4037 int n;
4038 int state;
4039
4040 for (n = 0; ; n++) {
4041 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4042 if (!vc)
4043 break;
4044 for (dl = ddf->dlist; dl; dl = dl->next)
4045 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4046 break;
4047 if (dl)
4048 /* Found this disk, so not missing */
4049 continue;
4050
4051 /* Mark the device as failed/missing. */
4052 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4053 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4054 cpu_to_be16(DDF_Online))) {
4055 be16_clear(ddf->phys->entries[pd].state,
4056 cpu_to_be16(DDF_Online));
4057 be16_set(ddf->phys->entries[pd].state,
4058 cpu_to_be16(DDF_Failed|DDF_Missing));
4059 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4060 ddf_set_updates_pending(ddf, vc);
4061 }
4062
4063 /* Mark the array as Degraded */
4064 state = get_svd_state(ddf, vcl);
4065 if (ddf->virt->entries[inst].state !=
4066 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4067 | state)) {
4068 ddf->virt->entries[inst].state =
4069 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4070 | state;
4071 a->check_degraded = 1;
4072 ddf_set_updates_pending(ddf, vc);
4073 }
4074 }
4075 }
4076
4077 /*
4078 * The array 'a' is to be marked clean in the metadata.
4079 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4080 * clean up to the point (in sectors). If that cannot be recorded in the
4081 * metadata, then leave it as dirty.
4082 *
4083 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4084 * !global! virtual_disk.virtual_entry structure.
4085 */
4086 static int ddf_set_array_state(struct active_array *a, int consistent)
4087 {
4088 struct ddf_super *ddf = a->container->sb;
4089 int inst = a->info.container_member;
4090 int old = ddf->virt->entries[inst].state;
4091 if (consistent == 2) {
4092 handle_missing(ddf, a, inst);
4093 consistent = 1;
4094 if (!is_resync_complete(&a->info))
4095 consistent = 0;
4096 }
4097 if (consistent)
4098 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4099 else
4100 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4101 if (old != ddf->virt->entries[inst].state)
4102 ddf_set_updates_pending(ddf, NULL);
4103
4104 old = ddf->virt->entries[inst].init_state;
4105 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4106 if (is_resync_complete(&a->info))
4107 ddf->virt->entries[inst].init_state |= DDF_init_full;
4108 else if (a->info.resync_start == 0)
4109 ddf->virt->entries[inst].init_state |= DDF_init_not;
4110 else
4111 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4112 if (old != ddf->virt->entries[inst].init_state)
4113 ddf_set_updates_pending(ddf, NULL);
4114
4115 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4116 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4117 consistent?"clean":"dirty",
4118 a->info.resync_start);
4119 return consistent;
4120 }
4121
4122 static int get_bvd_state(const struct ddf_super *ddf,
4123 const struct vd_config *vc)
4124 {
4125 unsigned int i, n_bvd, working = 0;
4126 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4127 int pd, st, state;
4128 char *avail = xcalloc(1, n_prim);
4129 mdu_array_info_t array;
4130
4131 layout_ddf2md(vc, &array);
4132
4133 for (i = 0; i < n_prim; i++) {
4134 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4135 continue;
4136 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4137 if (pd < 0)
4138 continue;
4139 st = be16_to_cpu(ddf->phys->entries[pd].state);
4140 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding)) ==
4141 DDF_Online) {
4142 working++;
4143 avail[i] = 1;
4144 }
4145 }
4146
4147 state = DDF_state_degraded;
4148 if (working == n_prim)
4149 state = DDF_state_optimal;
4150 else
4151 switch (vc->prl) {
4152 case DDF_RAID0:
4153 case DDF_CONCAT:
4154 case DDF_JBOD:
4155 state = DDF_state_failed;
4156 break;
4157 case DDF_RAID1:
4158 if (working == 0)
4159 state = DDF_state_failed;
4160 else if (working >= 2)
4161 state = DDF_state_part_optimal;
4162 break;
4163 case DDF_RAID1E:
4164 if (!enough(10, n_prim, array.layout, 1, avail))
4165 state = DDF_state_failed;
4166 break;
4167 case DDF_RAID4:
4168 case DDF_RAID5:
4169 if (working < n_prim - 1)
4170 state = DDF_state_failed;
4171 break;
4172 case DDF_RAID6:
4173 if (working < n_prim - 2)
4174 state = DDF_state_failed;
4175 else if (working == n_prim - 1)
4176 state = DDF_state_part_optimal;
4177 break;
4178 }
4179 return state;
4180 }
4181
4182 static int secondary_state(int state, int other, int seclevel)
4183 {
4184 if (state == DDF_state_optimal && other == DDF_state_optimal)
4185 return DDF_state_optimal;
4186 if (seclevel == DDF_2MIRRORED) {
4187 if (state == DDF_state_optimal || other == DDF_state_optimal)
4188 return DDF_state_part_optimal;
4189 if (state == DDF_state_failed && other == DDF_state_failed)
4190 return DDF_state_failed;
4191 return DDF_state_degraded;
4192 } else {
4193 if (state == DDF_state_failed || other == DDF_state_failed)
4194 return DDF_state_failed;
4195 if (state == DDF_state_degraded || other == DDF_state_degraded)
4196 return DDF_state_degraded;
4197 return DDF_state_part_optimal;
4198 }
4199 }
4200
4201 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4202 {
4203 int state = get_bvd_state(ddf, &vcl->conf);
4204 unsigned int i;
4205 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4206 state = secondary_state(
4207 state,
4208 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4209 vcl->conf.srl);
4210 }
4211 return state;
4212 }
4213
4214 /*
4215 * The state of each disk is stored in the global phys_disk structure
4216 * in phys_disk.entries[n].state.
4217 * This makes various combinations awkward.
4218 * - When a device fails in any array, it must be failed in all arrays
4219 * that include a part of this device.
4220 * - When a component is rebuilding, we cannot include it officially in the
4221 * array unless this is the only array that uses the device.
4222 *
4223 * So: when transitioning:
4224 * Online -> failed, just set failed flag. monitor will propagate
4225 * spare -> online, the device might need to be added to the array.
4226 * spare -> failed, just set failed. Don't worry if in array or not.
4227 */
4228 static void ddf_set_disk(struct active_array *a, int n, int state)
4229 {
4230 struct ddf_super *ddf = a->container->sb;
4231 unsigned int inst = a->info.container_member, n_bvd;
4232 struct vcl *vcl;
4233 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4234 &n_bvd, &vcl);
4235 int pd;
4236 struct mdinfo *mdi;
4237 struct dl *dl;
4238 int update = 0;
4239
4240 dprintf("%d to %x\n", n, state);
4241 if (vc == NULL) {
4242 dprintf("ddf: cannot find instance %d!!\n", inst);
4243 return;
4244 }
4245 /* Find the matching slot in 'info'. */
4246 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4247 if (mdi->disk.raid_disk == n)
4248 break;
4249 if (!mdi) {
4250 pr_err("cannot find raid disk %d\n", n);
4251 return;
4252 }
4253
4254 /* and find the 'dl' entry corresponding to that. */
4255 for (dl = ddf->dlist; dl; dl = dl->next)
4256 if (mdi->state_fd >= 0 &&
4257 mdi->disk.major == dl->major &&
4258 mdi->disk.minor == dl->minor)
4259 break;
4260 if (!dl) {
4261 pr_err("cannot find raid disk %d (%d/%d)\n",
4262 n, mdi->disk.major, mdi->disk.minor);
4263 return;
4264 }
4265
4266 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4267 if (pd < 0 || pd != dl->pdnum) {
4268 /* disk doesn't currently exist or has changed.
4269 * If it is now in_sync, insert it. */
4270 dprintf("phys disk not found for %d: %d/%d ref %08x\n",
4271 dl->pdnum, dl->major, dl->minor,
4272 be32_to_cpu(dl->disk.refnum));
4273 dprintf("array %u disk %u ref %08x pd %d\n",
4274 inst, n_bvd,
4275 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4276 if ((state & DS_INSYNC) && ! (state & DS_FAULTY) &&
4277 dl->pdnum >= 0) {
4278 pd = dl->pdnum;
4279 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4280 LBA_OFFSET(ddf, vc)[n_bvd] =
4281 cpu_to_be64(mdi->data_offset);
4282 be16_clear(ddf->phys->entries[pd].type,
4283 cpu_to_be16(DDF_Global_Spare));
4284 be16_set(ddf->phys->entries[pd].type,
4285 cpu_to_be16(DDF_Active_in_VD));
4286 update = 1;
4287 }
4288 } else {
4289 be16 old = ddf->phys->entries[pd].state;
4290 if (state & DS_FAULTY)
4291 be16_set(ddf->phys->entries[pd].state,
4292 cpu_to_be16(DDF_Failed));
4293 if (state & DS_INSYNC) {
4294 be16_set(ddf->phys->entries[pd].state,
4295 cpu_to_be16(DDF_Online));
4296 be16_clear(ddf->phys->entries[pd].state,
4297 cpu_to_be16(DDF_Rebuilding));
4298 }
4299 if (!be16_eq(old, ddf->phys->entries[pd].state))
4300 update = 1;
4301 }
4302
4303 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4304 be32_to_cpu(dl->disk.refnum), state,
4305 be16_to_cpu(ddf->phys->entries[pd].state));
4306
4307 /* Now we need to check the state of the array and update
4308 * virtual_disk.entries[n].state.
4309 * It needs to be one of "optimal", "degraded", "failed".
4310 * I don't understand 'deleted' or 'missing'.
4311 */
4312 state = get_svd_state(ddf, vcl);
4313
4314 if (ddf->virt->entries[inst].state !=
4315 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4316 | state)) {
4317 ddf->virt->entries[inst].state =
4318 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4319 | state;
4320 update = 1;
4321 }
4322 if (update)
4323 ddf_set_updates_pending(ddf, vc);
4324 }
4325
4326 static void ddf_sync_metadata(struct supertype *st)
4327 {
4328 /*
4329 * Write all data to all devices.
4330 * Later, we might be able to track whether only local changes
4331 * have been made, or whether any global data has been changed,
4332 * but ddf is sufficiently weird that it probably always
4333 * changes global data ....
4334 */
4335 struct ddf_super *ddf = st->sb;
4336 if (!ddf->updates_pending)
4337 return;
4338 ddf->updates_pending = 0;
4339 __write_init_super_ddf(st);
4340 dprintf("ddf: sync_metadata\n");
4341 }
4342
4343 static int del_from_conflist(struct vcl **list, const char *guid)
4344 {
4345 struct vcl **p;
4346 int found = 0;
4347 for (p = list; p && *p; p = &((*p)->next))
4348 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4349 found = 1;
4350 *p = (*p)->next;
4351 }
4352 return found;
4353 }
4354
4355 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4356 {
4357 struct dl *dl;
4358 unsigned int vdnum, i;
4359 vdnum = find_vde_by_guid(ddf, guid);
4360 if (vdnum == DDF_NOTFOUND) {
4361 pr_err("could not find VD %s\n", guid_str(guid));
4362 return -1;
4363 }
4364 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4365 pr_err("could not find conf %s\n", guid_str(guid));
4366 return -1;
4367 }
4368 for (dl = ddf->dlist; dl; dl = dl->next)
4369 for (i = 0; i < ddf->max_part; i++)
4370 if (dl->vlist[i] != NULL &&
4371 !memcmp(dl->vlist[i]->conf.guid, guid,
4372 DDF_GUID_LEN))
4373 dl->vlist[i] = NULL;
4374 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4375 dprintf("deleted %s\n", guid_str(guid));
4376 return 0;
4377 }
4378
4379 static int kill_subarray_ddf(struct supertype *st, char *subarray_id)
4380 {
4381 struct ddf_super *ddf = st->sb;
4382 /*
4383 * currentconf is set in container_content_ddf,
4384 * called with subarray arg
4385 */
4386 struct vcl *victim = ddf->currentconf;
4387 struct vd_config *conf;
4388 unsigned int vdnum;
4389
4390 ddf->currentconf = NULL;
4391 if (!victim) {
4392 pr_err("nothing to kill\n");
4393 return -1;
4394 }
4395 conf = &victim->conf;
4396 vdnum = find_vde_by_guid(ddf, conf->guid);
4397 if (vdnum == DDF_NOTFOUND) {
4398 pr_err("could not find VD %s\n", guid_str(conf->guid));
4399 return -1;
4400 }
4401 if (st->update_tail) {
4402 struct virtual_disk *vd;
4403 int len = sizeof(struct virtual_disk)
4404 + sizeof(struct virtual_entry);
4405 vd = xmalloc(len);
4406 if (vd == NULL) {
4407 pr_err("failed to allocate %d bytes\n", len);
4408 return -1;
4409 }
4410 memset(vd, 0 , len);
4411 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4412 vd->populated_vdes = cpu_to_be16(0);
4413 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4414 /* we use DDF_state_deleted as marker */
4415 vd->entries[0].state = DDF_state_deleted;
4416 append_metadata_update(st, vd, len);
4417 } else {
4418 _kill_subarray_ddf(ddf, conf->guid);
4419 ddf_set_updates_pending(ddf, NULL);
4420 ddf_sync_metadata(st);
4421 }
4422 return 0;
4423 }
4424
4425 static void copy_matching_bvd(struct ddf_super *ddf,
4426 struct vd_config *conf,
4427 const struct metadata_update *update)
4428 {
4429 unsigned int mppe =
4430 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4431 unsigned int len = ddf->conf_rec_len * 512;
4432 char *p;
4433 struct vd_config *vc;
4434 for (p = update->buf; p < update->buf + update->len; p += len) {
4435 vc = (struct vd_config *) p;
4436 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4437 memcpy(conf->phys_refnum, vc->phys_refnum,
4438 mppe * (sizeof(__u32) + sizeof(__u64)));
4439 return;
4440 }
4441 }
4442 pr_err("no match for BVD %d of %s in update\n",
4443 conf->sec_elmnt_seq, guid_str(conf->guid));
4444 }
4445
4446 static void ddf_process_phys_update(struct supertype *st,
4447 struct metadata_update *update)
4448 {
4449 struct ddf_super *ddf = st->sb;
4450 struct phys_disk *pd;
4451 unsigned int ent;
4452
4453 pd = (struct phys_disk*)update->buf;
4454 ent = be16_to_cpu(pd->used_pdes);
4455 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4456 return;
4457 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4458 struct dl **dlp;
4459 /* removing this disk. */
4460 be16_set(ddf->phys->entries[ent].state,
4461 cpu_to_be16(DDF_Missing));
4462 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4463 struct dl *dl = *dlp;
4464 if (dl->pdnum == (signed)ent) {
4465 close(dl->fd);
4466 dl->fd = -1;
4467 *dlp = dl->next;
4468 update->space = dl->devname;
4469 *(void**)dl = update->space_list;
4470 update->space_list = (void**)dl;
4471 break;
4472 }
4473 }
4474 ddf_set_updates_pending(ddf, NULL);
4475 return;
4476 }
4477 if (!all_ff(ddf->phys->entries[ent].guid))
4478 return;
4479 ddf->phys->entries[ent] = pd->entries[0];
4480 ddf->phys->used_pdes = cpu_to_be16
4481 (1 + be16_to_cpu(ddf->phys->used_pdes));
4482 ddf_set_updates_pending(ddf, NULL);
4483 if (ddf->add_list) {
4484 struct active_array *a;
4485 struct dl *al = ddf->add_list;
4486 ddf->add_list = al->next;
4487
4488 al->next = ddf->dlist;
4489 ddf->dlist = al;
4490
4491 /* As a device has been added, we should check
4492 * for any degraded devices that might make
4493 * use of this spare */
4494 for (a = st->arrays ; a; a=a->next)
4495 a->check_degraded = 1;
4496 }
4497 }
4498
4499 static void ddf_process_virt_update(struct supertype *st,
4500 struct metadata_update *update)
4501 {
4502 struct ddf_super *ddf = st->sb;
4503 struct virtual_disk *vd;
4504 unsigned int ent;
4505
4506 vd = (struct virtual_disk*)update->buf;
4507
4508 if (vd->entries[0].state == DDF_state_deleted) {
4509 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4510 return;
4511 } else {
4512 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4513 if (ent != DDF_NOTFOUND) {
4514 dprintf("VD %s exists already in slot %d\n",
4515 guid_str(vd->entries[0].guid),
4516 ent);
4517 return;
4518 }
4519 ent = find_unused_vde(ddf);
4520 if (ent == DDF_NOTFOUND)
4521 return;
4522 ddf->virt->entries[ent] = vd->entries[0];
4523 ddf->virt->populated_vdes =
4524 cpu_to_be16(
4525 1 + be16_to_cpu(
4526 ddf->virt->populated_vdes));
4527 dprintf("added VD %s in slot %d(s=%02x i=%02x)\n",
4528 guid_str(vd->entries[0].guid), ent,
4529 ddf->virt->entries[ent].state,
4530 ddf->virt->entries[ent].init_state);
4531 }
4532 ddf_set_updates_pending(ddf, NULL);
4533 }
4534
4535 static void ddf_remove_failed(struct ddf_super *ddf)
4536 {
4537 /* Now remove any 'Failed' devices that are not part
4538 * of any VD. They will have the Transition flag set.
4539 * Once done, we need to update all dl->pdnum numbers.
4540 */
4541 unsigned int pdnum;
4542 unsigned int pd2 = 0;
4543 struct dl *dl;
4544
4545 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4546 pdnum++) {
4547 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4548 0xFFFFFFFF)
4549 continue;
4550 if (be16_and(ddf->phys->entries[pdnum].state,
4551 cpu_to_be16(DDF_Failed)) &&
4552 be16_and(ddf->phys->entries[pdnum].state,
4553 cpu_to_be16(DDF_Transition))) {
4554 /* skip this one unless in dlist*/
4555 for (dl = ddf->dlist; dl; dl = dl->next)
4556 if (dl->pdnum == (int)pdnum)
4557 break;
4558 if (!dl)
4559 continue;
4560 }
4561 if (pdnum == pd2)
4562 pd2++;
4563 else {
4564 ddf->phys->entries[pd2] =
4565 ddf->phys->entries[pdnum];
4566 for (dl = ddf->dlist; dl; dl = dl->next)
4567 if (dl->pdnum == (int)pdnum)
4568 dl->pdnum = pd2;
4569 pd2++;
4570 }
4571 }
4572 ddf->phys->used_pdes = cpu_to_be16(pd2);
4573 while (pd2 < pdnum) {
4574 memset(ddf->phys->entries[pd2].guid, 0xff,
4575 DDF_GUID_LEN);
4576 pd2++;
4577 }
4578 }
4579
4580 static void ddf_update_vlist(struct ddf_super *ddf, struct dl *dl)
4581 {
4582 struct vcl *vcl;
4583 unsigned int vn = 0;
4584 int in_degraded = 0;
4585
4586 if (dl->pdnum < 0)
4587 return;
4588 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4589 unsigned int dn, ibvd;
4590 const struct vd_config *conf;
4591 int vstate;
4592 dn = get_pd_index_from_refnum(vcl,
4593 dl->disk.refnum,
4594 ddf->mppe,
4595 &conf, &ibvd);
4596 if (dn == DDF_NOTFOUND)
4597 continue;
4598 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4599 dl->pdnum,
4600 be32_to_cpu(dl->disk.refnum),
4601 guid_str(conf->guid),
4602 conf->sec_elmnt_seq, vn);
4603 /* Clear the Transition flag */
4604 if (be16_and
4605 (ddf->phys->entries[dl->pdnum].state,
4606 cpu_to_be16(DDF_Failed)))
4607 be16_clear(ddf->phys
4608 ->entries[dl->pdnum].state,
4609 cpu_to_be16(DDF_Transition));
4610 dl->vlist[vn++] = vcl;
4611 vstate = ddf->virt->entries[vcl->vcnum].state
4612 & DDF_state_mask;
4613 if (vstate == DDF_state_degraded ||
4614 vstate == DDF_state_part_optimal)
4615 in_degraded = 1;
4616 }
4617 while (vn < ddf->max_part)
4618 dl->vlist[vn++] = NULL;
4619 if (dl->vlist[0]) {
4620 be16_clear(ddf->phys->entries[dl->pdnum].type,
4621 cpu_to_be16(DDF_Global_Spare));
4622 if (!be16_and(ddf->phys
4623 ->entries[dl->pdnum].type,
4624 cpu_to_be16(DDF_Active_in_VD))) {
4625 be16_set(ddf->phys
4626 ->entries[dl->pdnum].type,
4627 cpu_to_be16(DDF_Active_in_VD));
4628 if (in_degraded)
4629 be16_set(ddf->phys
4630 ->entries[dl->pdnum]
4631 .state,
4632 cpu_to_be16
4633 (DDF_Rebuilding));
4634 }
4635 }
4636 if (dl->spare) {
4637 be16_clear(ddf->phys->entries[dl->pdnum].type,
4638 cpu_to_be16(DDF_Global_Spare));
4639 be16_set(ddf->phys->entries[dl->pdnum].type,
4640 cpu_to_be16(DDF_Spare));
4641 }
4642 if (!dl->vlist[0] && !dl->spare) {
4643 be16_set(ddf->phys->entries[dl->pdnum].type,
4644 cpu_to_be16(DDF_Global_Spare));
4645 be16_clear(ddf->phys->entries[dl->pdnum].type,
4646 cpu_to_be16(DDF_Spare));
4647 be16_clear(ddf->phys->entries[dl->pdnum].type,
4648 cpu_to_be16(DDF_Active_in_VD));
4649 }
4650 }
4651
4652 static void ddf_process_conf_update(struct supertype *st,
4653 struct metadata_update *update)
4654 {
4655 struct ddf_super *ddf = st->sb;
4656 struct vd_config *vc;
4657 struct vcl *vcl;
4658 struct dl *dl;
4659 unsigned int ent;
4660 unsigned int pdnum, len;
4661
4662 vc = (struct vd_config*)update->buf;
4663 len = ddf->conf_rec_len * 512;
4664 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4665 pr_err("%s: insufficient data (%d) for %u BVDs\n",
4666 guid_str(vc->guid), update->len,
4667 vc->sec_elmnt_count);
4668 return;
4669 }
4670 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4671 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4672 break;
4673 dprintf("conf update for %s (%s)\n",
4674 guid_str(vc->guid), (vcl ? "old" : "new"));
4675 if (vcl) {
4676 /* An update, just copy the phys_refnum and lba_offset
4677 * fields
4678 */
4679 unsigned int i;
4680 unsigned int k;
4681 copy_matching_bvd(ddf, &vcl->conf, update);
4682 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4683 dprintf("BVD %u has %08x at %llu\n", 0,
4684 be32_to_cpu(vcl->conf.phys_refnum[k]),
4685 be64_to_cpu(LBA_OFFSET(ddf,
4686 &vcl->conf)[k]));
4687 for (i = 1; i < vc->sec_elmnt_count; i++) {
4688 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4689 update);
4690 for (k = 0; k < be16_to_cpu(
4691 vc->prim_elmnt_count); k++)
4692 dprintf("BVD %u has %08x at %llu\n", i,
4693 be32_to_cpu
4694 (vcl->other_bvds[i-1]->
4695 phys_refnum[k]),
4696 be64_to_cpu
4697 (LBA_OFFSET
4698 (ddf,
4699 vcl->other_bvds[i-1])[k]));
4700 }
4701 } else {
4702 /* A new VD_CONF */
4703 unsigned int i;
4704 if (!update->space)
4705 return;
4706 vcl = update->space;
4707 update->space = NULL;
4708 vcl->next = ddf->conflist;
4709 memcpy(&vcl->conf, vc, len);
4710 ent = find_vde_by_guid(ddf, vc->guid);
4711 if (ent == DDF_NOTFOUND)
4712 return;
4713 vcl->vcnum = ent;
4714 ddf->conflist = vcl;
4715 for (i = 1; i < vc->sec_elmnt_count; i++)
4716 memcpy(vcl->other_bvds[i-1],
4717 update->buf + len * i, len);
4718 }
4719 /* Set DDF_Transition on all Failed devices - to help
4720 * us detect those that are no longer in use
4721 */
4722 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4723 pdnum++)
4724 if (be16_and(ddf->phys->entries[pdnum].state,
4725 cpu_to_be16(DDF_Failed)))
4726 be16_set(ddf->phys->entries[pdnum].state,
4727 cpu_to_be16(DDF_Transition));
4728
4729 /* Now make sure vlist is correct for each dl. */
4730 for (dl = ddf->dlist; dl; dl = dl->next)
4731 ddf_update_vlist(ddf, dl);
4732 ddf_remove_failed(ddf);
4733
4734 ddf_set_updates_pending(ddf, vc);
4735 }
4736
4737 static void ddf_process_update(struct supertype *st,
4738 struct metadata_update *update)
4739 {
4740 /* Apply this update to the metadata.
4741 * The first 4 bytes are a DDF_*_MAGIC which guides
4742 * our actions.
4743 * Possible update are:
4744 * DDF_PHYS_RECORDS_MAGIC
4745 * Add a new physical device or remove an old one.
4746 * Changes to this record only happen implicitly.
4747 * used_pdes is the device number.
4748 * DDF_VIRT_RECORDS_MAGIC
4749 * Add a new VD. Possibly also change the 'access' bits.
4750 * populated_vdes is the entry number.
4751 * DDF_VD_CONF_MAGIC
4752 * New or updated VD. the VIRT_RECORD must already
4753 * exist. For an update, phys_refnum and lba_offset
4754 * (at least) are updated, and the VD_CONF must
4755 * be written to precisely those devices listed with
4756 * a phys_refnum.
4757 * DDF_SPARE_ASSIGN_MAGIC
4758 * replacement Spare Assignment Record... but for which device?
4759 *
4760 * So, e.g.:
4761 * - to create a new array, we send a VIRT_RECORD and
4762 * a VD_CONF. Then assemble and start the array.
4763 * - to activate a spare we send a VD_CONF to add the phys_refnum
4764 * and offset. This will also mark the spare as active with
4765 * a spare-assignment record.
4766 */
4767 be32 *magic = (be32 *)update->buf;
4768
4769 dprintf("Process update %x\n", be32_to_cpu(*magic));
4770
4771 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4772 if (update->len == (sizeof(struct phys_disk) +
4773 sizeof(struct phys_disk_entry)))
4774 ddf_process_phys_update(st, update);
4775 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4776 if (update->len == (sizeof(struct virtual_disk) +
4777 sizeof(struct virtual_entry)))
4778 ddf_process_virt_update(st, update);
4779 } else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4780 ddf_process_conf_update(st, update);
4781 }
4782 /* case DDF_SPARE_ASSIGN_MAGIC */
4783 }
4784
4785 static int ddf_prepare_update(struct supertype *st,
4786 struct metadata_update *update)
4787 {
4788 /* This update arrived at managemon.
4789 * We are about to pass it to monitor.
4790 * If a malloc is needed, do it here.
4791 */
4792 struct ddf_super *ddf = st->sb;
4793 be32 *magic;
4794 if (update->len < 4)
4795 return 0;
4796 magic = (be32 *)update->buf;
4797 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4798 struct vcl *vcl;
4799 struct vd_config *conf;
4800 if (update->len < (int)sizeof(*conf))
4801 return 0;
4802 conf = (struct vd_config *) update->buf;
4803 if (posix_memalign(&update->space, 512,
4804 offsetof(struct vcl, conf)
4805 + ddf->conf_rec_len * 512) != 0) {
4806 update->space = NULL;
4807 return 0;
4808 }
4809 vcl = update->space;
4810 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4811 if (alloc_other_bvds(ddf, vcl) != 0) {
4812 free(update->space);
4813 update->space = NULL;
4814 return 0;
4815 }
4816 }
4817 return 1;
4818 }
4819
4820 /*
4821 * Check degraded state of a RAID10.
4822 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4823 */
4824 static int raid10_degraded(struct mdinfo *info)
4825 {
4826 int n_prim, n_bvds;
4827 int i;
4828 struct mdinfo *d;
4829 char *found;
4830 int ret = -1;
4831
4832 n_prim = info->array.layout & ~0x100;
4833 n_bvds = info->array.raid_disks / n_prim;
4834 found = xmalloc(n_bvds);
4835 if (found == NULL)
4836 return ret;
4837 memset(found, 0, n_bvds);
4838 for (d = info->devs; d; d = d->next) {
4839 i = d->disk.raid_disk / n_prim;
4840 if (i >= n_bvds) {
4841 pr_err("BUG: invalid raid disk\n");
4842 goto out;
4843 }
4844 if (is_fd_valid(d->state_fd))
4845 found[i]++;
4846 }
4847 ret = 2;
4848 for (i = 0; i < n_bvds; i++)
4849 if (!found[i]) {
4850 dprintf("BVD %d/%d failed\n", i, n_bvds);
4851 ret = 0;
4852 goto out;
4853 } else if (found[i] < n_prim) {
4854 dprintf("BVD %d/%d degraded\n", i, n_bvds);
4855 ret = 1;
4856 }
4857 out:
4858 free(found);
4859 return ret;
4860 }
4861
4862 /*
4863 * Check if the array 'a' is degraded but not failed.
4864 * If it is, find as many spares as are available and needed and
4865 * arrange for their inclusion.
4866 * We only choose devices which are not already in the array,
4867 * and prefer those with a spare-assignment to this array.
4868 * Otherwise we choose global spares - assuming always that
4869 * there is enough room.
4870 * For each spare that we assign, we return an 'mdinfo' which
4871 * describes the position for the device in the array.
4872 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4873 * the new phys_refnum and lba_offset values.
4874 *
4875 * Only worry about BVDs at the moment.
4876 */
4877 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4878 struct metadata_update **updates)
4879 {
4880 int working = 0;
4881 struct mdinfo *d;
4882 struct ddf_super *ddf = a->container->sb;
4883 int global_ok = 0;
4884 struct mdinfo *rv = NULL;
4885 struct mdinfo *di;
4886 struct metadata_update *mu;
4887 struct dl *dl;
4888 int i;
4889 unsigned int j;
4890 struct vcl *vcl;
4891 struct vd_config *vc;
4892 unsigned int n_bvd;
4893
4894 for (d = a->info.devs ; d ; d = d->next) {
4895 if ((d->curr_state & DS_FAULTY) &&
4896 d->state_fd >= 0)
4897 /* wait for Removal to happen */
4898 return NULL;
4899 if (d->state_fd >= 0)
4900 working ++;
4901 }
4902
4903 dprintf("working=%d (%d) level=%d\n", working,
4904 a->info.array.raid_disks,
4905 a->info.array.level);
4906 if (working == a->info.array.raid_disks)
4907 return NULL; /* array not degraded */
4908 switch (a->info.array.level) {
4909 case 1:
4910 if (working == 0)
4911 return NULL; /* failed */
4912 break;
4913 case 4:
4914 case 5:
4915 if (working < a->info.array.raid_disks - 1)
4916 return NULL; /* failed */
4917 break;
4918 case 6:
4919 if (working < a->info.array.raid_disks - 2)
4920 return NULL; /* failed */
4921 break;
4922 case 10:
4923 if (raid10_degraded(&a->info) < 1)
4924 return NULL;
4925 break;
4926 default: /* concat or stripe */
4927 return NULL; /* failed */
4928 }
4929
4930 /* For each slot, if it is not working, find a spare */
4931 dl = ddf->dlist;
4932 for (i = 0; i < a->info.array.raid_disks; i++) {
4933 for (d = a->info.devs ; d ; d = d->next)
4934 if (d->disk.raid_disk == i)
4935 break;
4936 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4937 if (d && (d->state_fd >= 0))
4938 continue;
4939
4940 /* OK, this device needs recovery. Find a spare */
4941 again:
4942 for ( ; dl ; dl = dl->next) {
4943 unsigned long long esize;
4944 unsigned long long pos;
4945 struct mdinfo *d2;
4946 int is_global = 0;
4947 int is_dedicated = 0;
4948 be16 state;
4949
4950 if (dl->pdnum < 0)
4951 continue;
4952 state = ddf->phys->entries[dl->pdnum].state;
4953 if (be16_and(state,
4954 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4955 !be16_and(state,
4956 cpu_to_be16(DDF_Online)))
4957 continue;
4958
4959 /* If in this array, skip */
4960 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
4961 if (d2->state_fd >= 0 &&
4962 d2->disk.major == dl->major &&
4963 d2->disk.minor == dl->minor) {
4964 dprintf("%x:%x (%08x) already in array\n",
4965 dl->major, dl->minor,
4966 be32_to_cpu(dl->disk.refnum));
4967 break;
4968 }
4969 if (d2)
4970 continue;
4971 if (be16_and(ddf->phys->entries[dl->pdnum].type,
4972 cpu_to_be16(DDF_Spare))) {
4973 /* Check spare assign record */
4974 if (dl->spare) {
4975 if (dl->spare->type & DDF_spare_dedicated) {
4976 /* check spare_ents for guid */
4977 unsigned int j;
4978 for (j = 0 ;
4979 j < be16_to_cpu
4980 (dl->spare
4981 ->populated);
4982 j++) {
4983 if (memcmp(dl->spare->spare_ents[j].guid,
4984 ddf->virt->entries[a->info.container_member].guid,
4985 DDF_GUID_LEN) == 0)
4986 is_dedicated = 1;
4987 }
4988 } else
4989 is_global = 1;
4990 }
4991 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
4992 cpu_to_be16(DDF_Global_Spare))) {
4993 is_global = 1;
4994 } else if (!be16_and(ddf->phys
4995 ->entries[dl->pdnum].state,
4996 cpu_to_be16(DDF_Failed))) {
4997 /* we can possibly use some of this */
4998 is_global = 1;
4999 }
5000 if ( ! (is_dedicated ||
5001 (is_global && global_ok))) {
5002 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5003 is_dedicated, is_global);
5004 continue;
5005 }
5006
5007 /* We are allowed to use this device - is there space?
5008 * We need a->info.component_size sectors */
5009 esize = a->info.component_size;
5010 pos = find_space(ddf, dl, INVALID_SECTORS, &esize);
5011
5012 if (esize < a->info.component_size) {
5013 dprintf("%x:%x has no room: %llu %llu\n",
5014 dl->major, dl->minor,
5015 esize, a->info.component_size);
5016 /* No room */
5017 continue;
5018 }
5019
5020 /* Cool, we have a device with some space at pos */
5021 di = xcalloc(1, sizeof(*di));
5022 di->disk.number = i;
5023 di->disk.raid_disk = i;
5024 di->disk.major = dl->major;
5025 di->disk.minor = dl->minor;
5026 di->disk.state = 0;
5027 di->recovery_start = 0;
5028 di->data_offset = pos;
5029 di->component_size = a->info.component_size;
5030 di->next = rv;
5031 rv = di;
5032 dprintf("%x:%x (%08x) to be %d at %llu\n",
5033 dl->major, dl->minor,
5034 be32_to_cpu(dl->disk.refnum), i, pos);
5035
5036 break;
5037 }
5038 if (!dl && ! global_ok) {
5039 /* not enough dedicated spares, try global */
5040 global_ok = 1;
5041 dl = ddf->dlist;
5042 goto again;
5043 }
5044 }
5045
5046 if (!rv)
5047 /* No spares found */
5048 return rv;
5049 /* Now 'rv' has a list of devices to return.
5050 * Create a metadata_update record to update the
5051 * phys_refnum and lba_offset values
5052 */
5053 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5054 &n_bvd, &vcl);
5055 if (vc == NULL) {
5056 free(rv);
5057 return NULL;
5058 }
5059
5060 mu = xmalloc(sizeof(*mu));
5061 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5062 free(mu);
5063 free(rv);
5064 return NULL;
5065 }
5066
5067 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5068 mu->buf = xmalloc(mu->len);
5069 mu->space = NULL;
5070 mu->space_list = NULL;
5071 mu->next = *updates;
5072 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5073 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5074 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5075 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5076
5077 vc = (struct vd_config*)mu->buf;
5078 for (di = rv ; di ; di = di->next) {
5079 unsigned int i_sec, i_prim;
5080 i_sec = di->disk.raid_disk
5081 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5082 i_prim = di->disk.raid_disk
5083 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5084 vc = (struct vd_config *)(mu->buf
5085 + i_sec * ddf->conf_rec_len * 512);
5086 for (dl = ddf->dlist; dl; dl = dl->next)
5087 if (dl->major == di->disk.major &&
5088 dl->minor == di->disk.minor)
5089 break;
5090 if (!dl || dl->pdnum < 0) {
5091 pr_err("BUG: can't find disk %d (%d/%d)\n",
5092 di->disk.raid_disk,
5093 di->disk.major, di->disk.minor);
5094 free(mu);
5095 free(rv);
5096 return NULL;
5097 }
5098 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5099 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5100 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5101 be32_to_cpu(vc->phys_refnum[i_prim]),
5102 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5103 }
5104 *updates = mu;
5105 return rv;
5106 }
5107
5108 static int ddf_level_to_layout(int level)
5109 {
5110 switch(level) {
5111 case 0:
5112 case 1:
5113 return 0;
5114 case 5:
5115 return ALGORITHM_LEFT_SYMMETRIC;
5116 case 6:
5117 return ALGORITHM_ROTATING_N_CONTINUE;
5118 case 10:
5119 return 0x102;
5120 default:
5121 return UnSet;
5122 }
5123 }
5124
5125 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5126 {
5127 if (level && *level == UnSet)
5128 *level = LEVEL_CONTAINER;
5129
5130 if (level && layout && *layout == UnSet)
5131 *layout = ddf_level_to_layout(*level);
5132 }
5133
5134 struct superswitch super_ddf = {
5135 .examine_super = examine_super_ddf,
5136 .brief_examine_super = brief_examine_super_ddf,
5137 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5138 .export_examine_super = export_examine_super_ddf,
5139 .detail_super = detail_super_ddf,
5140 .brief_detail_super = brief_detail_super_ddf,
5141 .validate_geometry = validate_geometry_ddf,
5142 .write_init_super = write_init_super_ddf,
5143 .add_to_super = add_to_super_ddf,
5144 .remove_from_super = remove_from_super_ddf,
5145 .load_container = load_container_ddf,
5146 .copy_metadata = copy_metadata_ddf,
5147 .kill_subarray = kill_subarray_ddf,
5148 .match_home = match_home_ddf,
5149 .uuid_from_super= uuid_from_super_ddf,
5150 .getinfo_super = getinfo_super_ddf,
5151
5152 .avail_size = avail_size_ddf,
5153
5154 .compare_super = compare_super_ddf,
5155
5156 .load_super = load_super_ddf,
5157 .init_super = init_super_ddf,
5158 .store_super = store_super_ddf,
5159 .free_super = free_super_ddf,
5160 .match_metadata_desc = match_metadata_desc_ddf,
5161 .container_content = container_content_ddf,
5162 .default_geometry = default_geometry_ddf,
5163
5164 .external = 1,
5165 .swapuuid = 0,
5166
5167 /* for mdmon */
5168 .open_new = ddf_open_new,
5169 .set_array_state= ddf_set_array_state,
5170 .set_disk = ddf_set_disk,
5171 .sync_metadata = ddf_sync_metadata,
5172 .process_update = ddf_process_update,
5173 .prepare_update = ddf_prepare_update,
5174 .activate_spare = ddf_activate_spare,
5175 .name = "ddf",
5176 };