]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: don't assume the anchor is fully up-to-date.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes; /* This is a counter, not a max - the list
233 * of used entries may not be dense */
234 be16 max_pdes;
235 __u8 pad[52];
236 struct phys_disk_entry {
237 char guid[DDF_GUID_LEN];
238 be32 refnum;
239 be16 type;
240 be16 state;
241 be64 config_size; /* DDF structures must be after here */
242 char path[18]; /* Another horrible structure really
243 * but is "used for information
244 * purposes only" */
245 __u8 pad[6];
246 } entries[0];
247 };
248
249 /* phys_disk_entry.type is a bitmap - bigendian remember */
250 #define DDF_Forced_PD_GUID 1
251 #define DDF_Active_in_VD 2
252 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
253 #define DDF_Spare 8 /* overrides Global_spare */
254 #define DDF_Foreign 16
255 #define DDF_Legacy 32 /* no DDF on this device */
256
257 #define DDF_Interface_mask 0xf00
258 #define DDF_Interface_SCSI 0x100
259 #define DDF_Interface_SAS 0x200
260 #define DDF_Interface_SATA 0x300
261 #define DDF_Interface_FC 0x400
262
263 /* phys_disk_entry.state is a bigendian bitmap */
264 #define DDF_Online 1
265 #define DDF_Failed 2 /* overrides 1,4,8 */
266 #define DDF_Rebuilding 4
267 #define DDF_Transition 8
268 #define DDF_SMART 16
269 #define DDF_ReadErrors 32
270 #define DDF_Missing 64
271
272 /* The content of the virt_section global scope */
273 struct virtual_disk {
274 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
275 be32 crc;
276 be16 populated_vdes;
277 be16 max_vdes;
278 __u8 pad[52];
279 struct virtual_entry {
280 char guid[DDF_GUID_LEN];
281 be16 unit;
282 __u16 pad0; /* 0xffff */
283 be16 guid_crc;
284 be16 type;
285 __u8 state;
286 __u8 init_state;
287 __u8 pad1[14];
288 char name[16];
289 } entries[0];
290 };
291
292 /* virtual_entry.type is a bitmap - bigendian */
293 #define DDF_Shared 1
294 #define DDF_Enforce_Groups 2
295 #define DDF_Unicode 4
296 #define DDF_Owner_Valid 8
297
298 /* virtual_entry.state is a bigendian bitmap */
299 #define DDF_state_mask 0x7
300 #define DDF_state_optimal 0x0
301 #define DDF_state_degraded 0x1
302 #define DDF_state_deleted 0x2
303 #define DDF_state_missing 0x3
304 #define DDF_state_failed 0x4
305 #define DDF_state_part_optimal 0x5
306
307 #define DDF_state_morphing 0x8
308 #define DDF_state_inconsistent 0x10
309
310 /* virtual_entry.init_state is a bigendian bitmap */
311 #define DDF_initstate_mask 0x03
312 #define DDF_init_not 0x00
313 #define DDF_init_quick 0x01 /* initialisation is progress.
314 * i.e. 'state_inconsistent' */
315 #define DDF_init_full 0x02
316
317 #define DDF_access_mask 0xc0
318 #define DDF_access_rw 0x00
319 #define DDF_access_ro 0x80
320 #define DDF_access_blocked 0xc0
321
322 /* The content of the config_section - local scope
323 * It has multiple records each config_record_len sectors
324 * They can be vd_config or spare_assign
325 */
326
327 struct vd_config {
328 be32 magic; /* DDF_VD_CONF_MAGIC */
329 be32 crc;
330 char guid[DDF_GUID_LEN];
331 be32 timestamp;
332 be32 seqnum;
333 __u8 pad0[24];
334 be16 prim_elmnt_count;
335 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
336 __u8 prl;
337 __u8 rlq;
338 __u8 sec_elmnt_count;
339 __u8 sec_elmnt_seq;
340 __u8 srl;
341 be64 blocks; /* blocks per component could be different
342 * on different component devices...(only
343 * for concat I hope) */
344 be64 array_blocks; /* blocks in array */
345 __u8 pad1[8];
346 be32 spare_refs[8];
347 __u8 cache_pol[8];
348 __u8 bg_rate;
349 __u8 pad2[3];
350 __u8 pad3[52];
351 __u8 pad4[192];
352 __u8 v0[32]; /* reserved- 0xff */
353 __u8 v1[32]; /* reserved- 0xff */
354 __u8 v2[16]; /* reserved- 0xff */
355 __u8 v3[16]; /* reserved- 0xff */
356 __u8 vendor[32];
357 be32 phys_refnum[0]; /* refnum of each disk in sequence */
358 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
359 bvd are always the same size */
360 };
361 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
362
363 /* vd_config.cache_pol[7] is a bitmap */
364 #define DDF_cache_writeback 1 /* else writethrough */
365 #define DDF_cache_wadaptive 2 /* only applies if writeback */
366 #define DDF_cache_readahead 4
367 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
368 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
369 #define DDF_cache_wallowed 32 /* enable write caching */
370 #define DDF_cache_rallowed 64 /* enable read caching */
371
372 struct spare_assign {
373 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
374 be32 crc;
375 be32 timestamp;
376 __u8 reserved[7];
377 __u8 type;
378 be16 populated; /* SAEs used */
379 be16 max; /* max SAEs */
380 __u8 pad[8];
381 struct spare_assign_entry {
382 char guid[DDF_GUID_LEN];
383 be16 secondary_element;
384 __u8 pad[6];
385 } spare_ents[0];
386 };
387 /* spare_assign.type is a bitmap */
388 #define DDF_spare_dedicated 0x1 /* else global */
389 #define DDF_spare_revertible 0x2 /* else committable */
390 #define DDF_spare_active 0x4 /* else not active */
391 #define DDF_spare_affinity 0x8 /* enclosure affinity */
392
393 /* The data_section contents - local scope */
394 struct disk_data {
395 be32 magic; /* DDF_PHYS_DATA_MAGIC */
396 be32 crc;
397 char guid[DDF_GUID_LEN];
398 be32 refnum; /* crc of some magic drive data ... */
399 __u8 forced_ref; /* set when above was not result of magic */
400 __u8 forced_guid; /* set if guid was forced rather than magic */
401 __u8 vendor[32];
402 __u8 pad[442];
403 };
404
405 /* bbm_section content */
406 struct bad_block_log {
407 be32 magic;
408 be32 crc;
409 be16 entry_count;
410 be32 spare_count;
411 __u8 pad[10];
412 be64 first_spare;
413 struct mapped_block {
414 be64 defective_start;
415 be32 replacement_start;
416 be16 remap_count;
417 __u8 pad[2];
418 } entries[0];
419 };
420
421 /* Struct for internally holding ddf structures */
422 /* The DDF structure stored on each device is potentially
423 * quite different, as some data is global and some is local.
424 * The global data is:
425 * - ddf header
426 * - controller_data
427 * - Physical disk records
428 * - Virtual disk records
429 * The local data is:
430 * - Configuration records
431 * - Physical Disk data section
432 * ( and Bad block and vendor which I don't care about yet).
433 *
434 * The local data is parsed into separate lists as it is read
435 * and reconstructed for writing. This means that we only need
436 * to make config changes once and they are automatically
437 * propagated to all devices.
438 * The global (config and disk data) records are each in a list
439 * of separate data structures. When writing we find the entry
440 * or entries applicable to the particular device.
441 */
442 struct ddf_super {
443 struct ddf_header anchor, primary, secondary;
444 struct ddf_controller_data controller;
445 struct ddf_header *active;
446 struct phys_disk *phys;
447 struct virtual_disk *virt;
448 char *conf;
449 int pdsize, vdsize;
450 unsigned int max_part, mppe, conf_rec_len;
451 int currentdev;
452 int updates_pending;
453 struct vcl {
454 union {
455 char space[512];
456 struct {
457 struct vcl *next;
458 unsigned int vcnum; /* index into ->virt */
459 /* For an array with a secondary level there are
460 * multiple vd_config structures, all with the same
461 * guid but with different sec_elmnt_seq.
462 * One of these structures is in 'conf' below.
463 * The others are in other_bvds, not in any
464 * particular order.
465 */
466 struct vd_config **other_bvds;
467 __u64 *block_sizes; /* NULL if all the same */
468 };
469 };
470 struct vd_config conf;
471 } *conflist, *currentconf;
472 struct dl {
473 union {
474 char space[512];
475 struct {
476 struct dl *next;
477 int major, minor;
478 char *devname;
479 int fd;
480 unsigned long long size; /* sectors */
481 be64 primary_lba; /* sectors */
482 be64 secondary_lba; /* sectors */
483 be64 workspace_lba; /* sectors */
484 int pdnum; /* index in ->phys */
485 struct spare_assign *spare;
486 void *mdupdate; /* hold metadata update */
487
488 /* These fields used by auto-layout */
489 int raiddisk; /* slot to fill in autolayout */
490 __u64 esize;
491 int displayed;
492 };
493 };
494 struct disk_data disk;
495 struct vcl *vlist[0]; /* max_part in size */
496 } *dlist, *add_list;
497 };
498
499 #ifndef MDASSEMBLE
500 static int load_super_ddf_all(struct supertype *st, int fd,
501 void **sbp, char *devname);
502 static int get_svd_state(const struct ddf_super *, const struct vcl *);
503 static int
504 validate_geometry_ddf_container(struct supertype *st,
505 int level, int layout, int raiddisks,
506 int chunk, unsigned long long size,
507 unsigned long long data_offset,
508 char *dev, unsigned long long *freesize,
509 int verbose);
510
511 static int validate_geometry_ddf_bvd(struct supertype *st,
512 int level, int layout, int raiddisks,
513 int *chunk, unsigned long long size,
514 unsigned long long data_offset,
515 char *dev, unsigned long long *freesize,
516 int verbose);
517 #endif
518
519 static void free_super_ddf(struct supertype *st);
520 static int all_ff(const char *guid);
521 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
522 be32 refnum, unsigned int nmax,
523 const struct vd_config **bvd,
524 unsigned int *idx);
525 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
526 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
527 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
528 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
529 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
530 static int init_super_ddf_bvd(struct supertype *st,
531 mdu_array_info_t *info,
532 unsigned long long size,
533 char *name, char *homehost,
534 int *uuid, unsigned long long data_offset);
535
536 #ifndef offsetof
537 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
538 #endif
539
540 #if DEBUG
541 static void pr_state(struct ddf_super *ddf, const char *msg)
542 {
543 unsigned int i;
544 dprintf("%s/%s: ", __func__, msg);
545 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
546 if (all_ff(ddf->virt->entries[i].guid))
547 continue;
548 dprintf("%u(s=%02x i=%02x) ", i,
549 ddf->virt->entries[i].state,
550 ddf->virt->entries[i].init_state);
551 }
552 dprintf("\n");
553 }
554 #else
555 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
556 #endif
557
558 static void _ddf_set_updates_pending(struct ddf_super *ddf, struct vd_config *vc,
559 const char *func)
560 {
561 if (vc) {
562 vc->timestamp = cpu_to_be32(time(0)-DECADE);
563 vc->seqnum = cpu_to_be32(be32_to_cpu(vc->seqnum) + 1);
564 }
565 if (ddf->updates_pending)
566 return;
567 ddf->updates_pending = 1;
568 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
569 pr_state(ddf, func);
570 }
571
572 #define ddf_set_updates_pending(x,v) _ddf_set_updates_pending((x), (v), __func__)
573
574 static be32 calc_crc(void *buf, int len)
575 {
576 /* crcs are always at the same place as in the ddf_header */
577 struct ddf_header *ddf = buf;
578 be32 oldcrc = ddf->crc;
579 __u32 newcrc;
580 ddf->crc = cpu_to_be32(0xffffffff);
581
582 newcrc = crc32(0, buf, len);
583 ddf->crc = oldcrc;
584 /* The crc is stored (like everything) bigendian, so convert
585 * here for simplicity
586 */
587 return cpu_to_be32(newcrc);
588 }
589
590 #define DDF_INVALID_LEVEL 0xff
591 #define DDF_NO_SECONDARY 0xff
592 static int err_bad_md_layout(const mdu_array_info_t *array)
593 {
594 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
595 array->level, array->layout, array->raid_disks);
596 return -1;
597 }
598
599 static int layout_md2ddf(const mdu_array_info_t *array,
600 struct vd_config *conf)
601 {
602 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
603 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
604 __u8 sec_elmnt_count = 1;
605 __u8 srl = DDF_NO_SECONDARY;
606
607 switch (array->level) {
608 case LEVEL_LINEAR:
609 prl = DDF_CONCAT;
610 break;
611 case 0:
612 rlq = DDF_RAID0_SIMPLE;
613 prl = DDF_RAID0;
614 break;
615 case 1:
616 switch (array->raid_disks) {
617 case 2:
618 rlq = DDF_RAID1_SIMPLE;
619 break;
620 case 3:
621 rlq = DDF_RAID1_MULTI;
622 break;
623 default:
624 return err_bad_md_layout(array);
625 }
626 prl = DDF_RAID1;
627 break;
628 case 4:
629 if (array->layout != 0)
630 return err_bad_md_layout(array);
631 rlq = DDF_RAID4_N;
632 prl = DDF_RAID4;
633 break;
634 case 5:
635 switch (array->layout) {
636 case ALGORITHM_LEFT_ASYMMETRIC:
637 rlq = DDF_RAID5_N_RESTART;
638 break;
639 case ALGORITHM_RIGHT_ASYMMETRIC:
640 rlq = DDF_RAID5_0_RESTART;
641 break;
642 case ALGORITHM_LEFT_SYMMETRIC:
643 rlq = DDF_RAID5_N_CONTINUE;
644 break;
645 case ALGORITHM_RIGHT_SYMMETRIC:
646 /* not mentioned in standard */
647 default:
648 return err_bad_md_layout(array);
649 }
650 prl = DDF_RAID5;
651 break;
652 case 6:
653 switch (array->layout) {
654 case ALGORITHM_ROTATING_N_RESTART:
655 rlq = DDF_RAID5_N_RESTART;
656 break;
657 case ALGORITHM_ROTATING_ZERO_RESTART:
658 rlq = DDF_RAID6_0_RESTART;
659 break;
660 case ALGORITHM_ROTATING_N_CONTINUE:
661 rlq = DDF_RAID5_N_CONTINUE;
662 break;
663 default:
664 return err_bad_md_layout(array);
665 }
666 prl = DDF_RAID6;
667 break;
668 case 10:
669 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
670 rlq = DDF_RAID1_SIMPLE;
671 prim_elmnt_count = cpu_to_be16(2);
672 sec_elmnt_count = array->raid_disks / 2;
673 } else if (array->raid_disks % 3 == 0
674 && array->layout == 0x103) {
675 rlq = DDF_RAID1_MULTI;
676 prim_elmnt_count = cpu_to_be16(3);
677 sec_elmnt_count = array->raid_disks / 3;
678 } else
679 return err_bad_md_layout(array);
680 srl = DDF_2SPANNED;
681 prl = DDF_RAID1;
682 break;
683 default:
684 return err_bad_md_layout(array);
685 }
686 conf->prl = prl;
687 conf->prim_elmnt_count = prim_elmnt_count;
688 conf->rlq = rlq;
689 conf->srl = srl;
690 conf->sec_elmnt_count = sec_elmnt_count;
691 return 0;
692 }
693
694 static int err_bad_ddf_layout(const struct vd_config *conf)
695 {
696 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
697 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
698 return -1;
699 }
700
701 static int layout_ddf2md(const struct vd_config *conf,
702 mdu_array_info_t *array)
703 {
704 int level = LEVEL_UNSUPPORTED;
705 int layout = 0;
706 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
707
708 if (conf->sec_elmnt_count > 1) {
709 /* see also check_secondary() */
710 if (conf->prl != DDF_RAID1 ||
711 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
712 pr_err("Unsupported secondary RAID level %u/%u\n",
713 conf->prl, conf->srl);
714 return -1;
715 }
716 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
717 layout = 0x102;
718 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
719 layout = 0x103;
720 else
721 return err_bad_ddf_layout(conf);
722 raiddisks *= conf->sec_elmnt_count;
723 level = 10;
724 goto good;
725 }
726
727 switch (conf->prl) {
728 case DDF_CONCAT:
729 level = LEVEL_LINEAR;
730 break;
731 case DDF_RAID0:
732 if (conf->rlq != DDF_RAID0_SIMPLE)
733 return err_bad_ddf_layout(conf);
734 level = 0;
735 break;
736 case DDF_RAID1:
737 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
738 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
739 return err_bad_ddf_layout(conf);
740 level = 1;
741 break;
742 case DDF_RAID4:
743 if (conf->rlq != DDF_RAID4_N)
744 return err_bad_ddf_layout(conf);
745 level = 4;
746 break;
747 case DDF_RAID5:
748 switch (conf->rlq) {
749 case DDF_RAID5_N_RESTART:
750 layout = ALGORITHM_LEFT_ASYMMETRIC;
751 break;
752 case DDF_RAID5_0_RESTART:
753 layout = ALGORITHM_RIGHT_ASYMMETRIC;
754 break;
755 case DDF_RAID5_N_CONTINUE:
756 layout = ALGORITHM_LEFT_SYMMETRIC;
757 break;
758 default:
759 return err_bad_ddf_layout(conf);
760 }
761 level = 5;
762 break;
763 case DDF_RAID6:
764 switch (conf->rlq) {
765 case DDF_RAID5_N_RESTART:
766 layout = ALGORITHM_ROTATING_N_RESTART;
767 break;
768 case DDF_RAID6_0_RESTART:
769 layout = ALGORITHM_ROTATING_ZERO_RESTART;
770 break;
771 case DDF_RAID5_N_CONTINUE:
772 layout = ALGORITHM_ROTATING_N_CONTINUE;
773 break;
774 default:
775 return err_bad_ddf_layout(conf);
776 }
777 level = 6;
778 break;
779 default:
780 return err_bad_ddf_layout(conf);
781 };
782
783 good:
784 array->level = level;
785 array->layout = layout;
786 array->raid_disks = raiddisks;
787 return 0;
788 }
789
790 static int load_ddf_header(int fd, unsigned long long lba,
791 unsigned long long size,
792 int type,
793 struct ddf_header *hdr, struct ddf_header *anchor)
794 {
795 /* read a ddf header (primary or secondary) from fd/lba
796 * and check that it is consistent with anchor
797 * Need to check:
798 * magic, crc, guid, rev, and LBA's header_type, and
799 * everything after header_type must be the same
800 */
801 if (lba >= size-1)
802 return 0;
803
804 if (lseek64(fd, lba<<9, 0) < 0)
805 return 0;
806
807 if (read(fd, hdr, 512) != 512)
808 return 0;
809
810 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
811 pr_err("%s: bad header magic\n", __func__);
812 return 0;
813 }
814 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
815 pr_err("%s: bad CRC\n", __func__);
816 return 0;
817 }
818 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
819 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
820 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
821 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
822 hdr->type != type ||
823 memcmp(anchor->pad2, hdr->pad2, 512 -
824 offsetof(struct ddf_header, pad2)) != 0) {
825 pr_err("%s: header mismatch\n", __func__);
826 return 0;
827 }
828
829 /* Looks good enough to me... */
830 return 1;
831 }
832
833 static void *load_section(int fd, struct ddf_super *super, void *buf,
834 be32 offset_be, be32 len_be, int check)
835 {
836 unsigned long long offset = be32_to_cpu(offset_be);
837 unsigned long long len = be32_to_cpu(len_be);
838 int dofree = (buf == NULL);
839
840 if (check)
841 if (len != 2 && len != 8 && len != 32
842 && len != 128 && len != 512)
843 return NULL;
844
845 if (len > 1024)
846 return NULL;
847 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
848 buf = NULL;
849
850 if (!buf)
851 return NULL;
852
853 if (super->active->type == 1)
854 offset += be64_to_cpu(super->active->primary_lba);
855 else
856 offset += be64_to_cpu(super->active->secondary_lba);
857
858 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
859 if (dofree)
860 free(buf);
861 return NULL;
862 }
863 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
864 if (dofree)
865 free(buf);
866 return NULL;
867 }
868 return buf;
869 }
870
871 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
872 {
873 unsigned long long dsize;
874
875 get_dev_size(fd, NULL, &dsize);
876
877 if (lseek64(fd, dsize-512, 0) < 0) {
878 if (devname)
879 pr_err("Cannot seek to anchor block on %s: %s\n",
880 devname, strerror(errno));
881 return 1;
882 }
883 if (read(fd, &super->anchor, 512) != 512) {
884 if (devname)
885 pr_err("Cannot read anchor block on %s: %s\n",
886 devname, strerror(errno));
887 return 1;
888 }
889 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
890 if (devname)
891 pr_err("no DDF anchor found on %s\n",
892 devname);
893 return 2;
894 }
895 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
896 if (devname)
897 pr_err("bad CRC on anchor on %s\n",
898 devname);
899 return 2;
900 }
901 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
902 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
903 if (devname)
904 pr_err("can only support super revision"
905 " %.8s and earlier, not %.8s on %s\n",
906 DDF_REVISION_2, super->anchor.revision,devname);
907 return 2;
908 }
909 super->active = NULL;
910 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
911 dsize >> 9, 1,
912 &super->primary, &super->anchor) == 0) {
913 if (devname)
914 pr_err("Failed to load primary DDF header "
915 "on %s\n", devname);
916 } else
917 super->active = &super->primary;
918
919 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
920 dsize >> 9, 2,
921 &super->secondary, &super->anchor)) {
922 if (super->active == NULL
923 || (be32_to_cpu(super->primary.seq)
924 < be32_to_cpu(super->secondary.seq) &&
925 !super->secondary.openflag)
926 || (be32_to_cpu(super->primary.seq)
927 == be32_to_cpu(super->secondary.seq) &&
928 super->primary.openflag && !super->secondary.openflag)
929 )
930 super->active = &super->secondary;
931 } else if (devname &&
932 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
933 pr_err("Failed to load secondary DDF header on %s\n",
934 devname);
935 if (super->active == NULL)
936 return 2;
937 return 0;
938 }
939
940 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
941 {
942 void *ok;
943 ok = load_section(fd, super, &super->controller,
944 super->active->controller_section_offset,
945 super->active->controller_section_length,
946 0);
947 super->phys = load_section(fd, super, NULL,
948 super->active->phys_section_offset,
949 super->active->phys_section_length,
950 1);
951 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
952
953 super->virt = load_section(fd, super, NULL,
954 super->active->virt_section_offset,
955 super->active->virt_section_length,
956 1);
957 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
958 if (!ok ||
959 !super->phys ||
960 !super->virt) {
961 free(super->phys);
962 free(super->virt);
963 super->phys = NULL;
964 super->virt = NULL;
965 return 2;
966 }
967 super->conflist = NULL;
968 super->dlist = NULL;
969
970 super->max_part = be16_to_cpu(super->active->max_partitions);
971 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
972 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
973 return 0;
974 }
975
976 #define DDF_UNUSED_BVD 0xff
977 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
978 {
979 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
980 unsigned int i, vdsize;
981 void *p;
982 if (n_vds == 0) {
983 vcl->other_bvds = NULL;
984 return 0;
985 }
986 vdsize = ddf->conf_rec_len * 512;
987 if (posix_memalign(&p, 512, n_vds *
988 (vdsize + sizeof(struct vd_config *))) != 0)
989 return -1;
990 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
991 for (i = 0; i < n_vds; i++) {
992 vcl->other_bvds[i] = p + i * vdsize;
993 memset(vcl->other_bvds[i], 0, vdsize);
994 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
995 }
996 return 0;
997 }
998
999 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
1000 unsigned int len)
1001 {
1002 int i;
1003 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1004 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
1005 break;
1006
1007 if (i < vcl->conf.sec_elmnt_count-1) {
1008 if (be32_to_cpu(vd->seqnum) <=
1009 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1010 return;
1011 } else {
1012 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1013 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1014 break;
1015 if (i == vcl->conf.sec_elmnt_count-1) {
1016 pr_err("no space for sec level config %u, count is %u\n",
1017 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1018 return;
1019 }
1020 }
1021 memcpy(vcl->other_bvds[i], vd, len);
1022 }
1023
1024 static int load_ddf_local(int fd, struct ddf_super *super,
1025 char *devname, int keep)
1026 {
1027 struct dl *dl;
1028 struct stat stb;
1029 char *conf;
1030 unsigned int i;
1031 unsigned int confsec;
1032 int vnum;
1033 unsigned int max_virt_disks =
1034 be16_to_cpu(super->active->max_vd_entries);
1035 unsigned long long dsize;
1036
1037 /* First the local disk info */
1038 if (posix_memalign((void**)&dl, 512,
1039 sizeof(*dl) +
1040 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1041 pr_err("%s could not allocate disk info buffer\n",
1042 __func__);
1043 return 1;
1044 }
1045
1046 load_section(fd, super, &dl->disk,
1047 super->active->data_section_offset,
1048 super->active->data_section_length,
1049 0);
1050 dl->devname = devname ? xstrdup(devname) : NULL;
1051
1052 fstat(fd, &stb);
1053 dl->major = major(stb.st_rdev);
1054 dl->minor = minor(stb.st_rdev);
1055 dl->next = super->dlist;
1056 dl->fd = keep ? fd : -1;
1057
1058 dl->size = 0;
1059 if (get_dev_size(fd, devname, &dsize))
1060 dl->size = dsize >> 9;
1061 /* If the disks have different sizes, the LBAs will differ
1062 * between phys disks.
1063 * At this point here, the values in super->active must be valid
1064 * for this phys disk. */
1065 dl->primary_lba = super->active->primary_lba;
1066 dl->secondary_lba = super->active->secondary_lba;
1067 dl->workspace_lba = super->active->workspace_lba;
1068 dl->spare = NULL;
1069 for (i = 0 ; i < super->max_part ; i++)
1070 dl->vlist[i] = NULL;
1071 super->dlist = dl;
1072 dl->pdnum = -1;
1073 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1074 if (memcmp(super->phys->entries[i].guid,
1075 dl->disk.guid, DDF_GUID_LEN) == 0)
1076 dl->pdnum = i;
1077
1078 /* Now the config list. */
1079 /* 'conf' is an array of config entries, some of which are
1080 * probably invalid. Those which are good need to be copied into
1081 * the conflist
1082 */
1083
1084 conf = load_section(fd, super, super->conf,
1085 super->active->config_section_offset,
1086 super->active->config_section_length,
1087 0);
1088 super->conf = conf;
1089 vnum = 0;
1090 for (confsec = 0;
1091 confsec < be32_to_cpu(super->active->config_section_length);
1092 confsec += super->conf_rec_len) {
1093 struct vd_config *vd =
1094 (struct vd_config *)((char*)conf + confsec*512);
1095 struct vcl *vcl;
1096
1097 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1098 if (dl->spare)
1099 continue;
1100 if (posix_memalign((void**)&dl->spare, 512,
1101 super->conf_rec_len*512) != 0) {
1102 pr_err("%s could not allocate spare info buf\n",
1103 __func__);
1104 return 1;
1105 }
1106
1107 memcpy(dl->spare, vd, super->conf_rec_len*512);
1108 continue;
1109 }
1110 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1111 /* Must be vendor-unique - I cannot handle those */
1112 continue;
1113
1114 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1115 if (memcmp(vcl->conf.guid,
1116 vd->guid, DDF_GUID_LEN) == 0)
1117 break;
1118 }
1119
1120 if (vcl) {
1121 dl->vlist[vnum++] = vcl;
1122 if (vcl->other_bvds != NULL &&
1123 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1124 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1125 continue;
1126 }
1127 if (be32_to_cpu(vd->seqnum) <=
1128 be32_to_cpu(vcl->conf.seqnum))
1129 continue;
1130 } else {
1131 if (posix_memalign((void**)&vcl, 512,
1132 (super->conf_rec_len*512 +
1133 offsetof(struct vcl, conf))) != 0) {
1134 pr_err("%s could not allocate vcl buf\n",
1135 __func__);
1136 return 1;
1137 }
1138 vcl->next = super->conflist;
1139 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1140 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1141 if (alloc_other_bvds(super, vcl) != 0) {
1142 pr_err("%s could not allocate other bvds\n",
1143 __func__);
1144 free(vcl);
1145 return 1;
1146 };
1147 super->conflist = vcl;
1148 dl->vlist[vnum++] = vcl;
1149 }
1150 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1151 for (i=0; i < max_virt_disks ; i++)
1152 if (memcmp(super->virt->entries[i].guid,
1153 vcl->conf.guid, DDF_GUID_LEN)==0)
1154 break;
1155 if (i < max_virt_disks)
1156 vcl->vcnum = i;
1157 }
1158
1159 return 0;
1160 }
1161
1162 static int load_super_ddf(struct supertype *st, int fd,
1163 char *devname)
1164 {
1165 unsigned long long dsize;
1166 struct ddf_super *super;
1167 int rv;
1168
1169 if (get_dev_size(fd, devname, &dsize) == 0)
1170 return 1;
1171
1172 if (test_partition(fd))
1173 /* DDF is not allowed on partitions */
1174 return 1;
1175
1176 /* 32M is a lower bound */
1177 if (dsize <= 32*1024*1024) {
1178 if (devname)
1179 pr_err("%s is too small for ddf: "
1180 "size is %llu sectors.\n",
1181 devname, dsize>>9);
1182 return 1;
1183 }
1184 if (dsize & 511) {
1185 if (devname)
1186 pr_err("%s is an odd size for ddf: "
1187 "size is %llu bytes.\n",
1188 devname, dsize);
1189 return 1;
1190 }
1191
1192 free_super_ddf(st);
1193
1194 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1195 pr_err("malloc of %zu failed.\n",
1196 sizeof(*super));
1197 return 1;
1198 }
1199 memset(super, 0, sizeof(*super));
1200
1201 rv = load_ddf_headers(fd, super, devname);
1202 if (rv) {
1203 free(super);
1204 return rv;
1205 }
1206
1207 /* Have valid headers and have chosen the best. Let's read in the rest*/
1208
1209 rv = load_ddf_global(fd, super, devname);
1210
1211 if (rv) {
1212 if (devname)
1213 pr_err("Failed to load all information "
1214 "sections on %s\n", devname);
1215 free(super);
1216 return rv;
1217 }
1218
1219 rv = load_ddf_local(fd, super, devname, 0);
1220
1221 if (rv) {
1222 if (devname)
1223 pr_err("Failed to load all information "
1224 "sections on %s\n", devname);
1225 free(super);
1226 return rv;
1227 }
1228
1229 /* Should possibly check the sections .... */
1230
1231 st->sb = super;
1232 if (st->ss == NULL) {
1233 st->ss = &super_ddf;
1234 st->minor_version = 0;
1235 st->max_devs = 512;
1236 }
1237 return 0;
1238
1239 }
1240
1241 static void free_super_ddf(struct supertype *st)
1242 {
1243 struct ddf_super *ddf = st->sb;
1244 if (ddf == NULL)
1245 return;
1246 free(ddf->phys);
1247 free(ddf->virt);
1248 free(ddf->conf);
1249 while (ddf->conflist) {
1250 struct vcl *v = ddf->conflist;
1251 ddf->conflist = v->next;
1252 if (v->block_sizes)
1253 free(v->block_sizes);
1254 if (v->other_bvds)
1255 /*
1256 v->other_bvds[0] points to beginning of buffer,
1257 see alloc_other_bvds()
1258 */
1259 free(v->other_bvds[0]);
1260 free(v);
1261 }
1262 while (ddf->dlist) {
1263 struct dl *d = ddf->dlist;
1264 ddf->dlist = d->next;
1265 if (d->fd >= 0)
1266 close(d->fd);
1267 if (d->spare)
1268 free(d->spare);
1269 free(d);
1270 }
1271 while (ddf->add_list) {
1272 struct dl *d = ddf->add_list;
1273 ddf->add_list = d->next;
1274 if (d->fd >= 0)
1275 close(d->fd);
1276 if (d->spare)
1277 free(d->spare);
1278 free(d);
1279 }
1280 free(ddf);
1281 st->sb = NULL;
1282 }
1283
1284 static struct supertype *match_metadata_desc_ddf(char *arg)
1285 {
1286 /* 'ddf' only supports containers */
1287 struct supertype *st;
1288 if (strcmp(arg, "ddf") != 0 &&
1289 strcmp(arg, "default") != 0
1290 )
1291 return NULL;
1292
1293 st = xcalloc(1, sizeof(*st));
1294 st->ss = &super_ddf;
1295 st->max_devs = 512;
1296 st->minor_version = 0;
1297 st->sb = NULL;
1298 return st;
1299 }
1300
1301 #ifndef MDASSEMBLE
1302
1303 static mapping_t ddf_state[] = {
1304 { "Optimal", 0},
1305 { "Degraded", 1},
1306 { "Deleted", 2},
1307 { "Missing", 3},
1308 { "Failed", 4},
1309 { "Partially Optimal", 5},
1310 { "-reserved-", 6},
1311 { "-reserved-", 7},
1312 { NULL, 0}
1313 };
1314
1315 static mapping_t ddf_init_state[] = {
1316 { "Not Initialised", 0},
1317 { "QuickInit in Progress", 1},
1318 { "Fully Initialised", 2},
1319 { "*UNKNOWN*", 3},
1320 { NULL, 0}
1321 };
1322 static mapping_t ddf_access[] = {
1323 { "Read/Write", 0},
1324 { "Reserved", 1},
1325 { "Read Only", 2},
1326 { "Blocked (no access)", 3},
1327 { NULL ,0}
1328 };
1329
1330 static mapping_t ddf_level[] = {
1331 { "RAID0", DDF_RAID0},
1332 { "RAID1", DDF_RAID1},
1333 { "RAID3", DDF_RAID3},
1334 { "RAID4", DDF_RAID4},
1335 { "RAID5", DDF_RAID5},
1336 { "RAID1E",DDF_RAID1E},
1337 { "JBOD", DDF_JBOD},
1338 { "CONCAT",DDF_CONCAT},
1339 { "RAID5E",DDF_RAID5E},
1340 { "RAID5EE",DDF_RAID5EE},
1341 { "RAID6", DDF_RAID6},
1342 { NULL, 0}
1343 };
1344 static mapping_t ddf_sec_level[] = {
1345 { "Striped", DDF_2STRIPED},
1346 { "Mirrored", DDF_2MIRRORED},
1347 { "Concat", DDF_2CONCAT},
1348 { "Spanned", DDF_2SPANNED},
1349 { NULL, 0}
1350 };
1351 #endif
1352
1353 static int all_ff(const char *guid)
1354 {
1355 int i;
1356 for (i = 0; i < DDF_GUID_LEN; i++)
1357 if (guid[i] != (char)0xff)
1358 return 0;
1359 return 1;
1360 }
1361
1362 static const char *guid_str(const char *guid)
1363 {
1364 static char buf[DDF_GUID_LEN*2+1];
1365 int i;
1366 char *p = buf;
1367 for (i = 0; i < DDF_GUID_LEN; i++) {
1368 unsigned char c = guid[i];
1369 if (c >= 32 && c < 127)
1370 p += sprintf(p, "%c", c);
1371 else
1372 p += sprintf(p, "%02x", c);
1373 }
1374 *p = '\0';
1375 return (const char *) buf;
1376 }
1377
1378 #ifndef MDASSEMBLE
1379 static void print_guid(char *guid, int tstamp)
1380 {
1381 /* A GUIDs are part (or all) ASCII and part binary.
1382 * They tend to be space padded.
1383 * We print the GUID in HEX, then in parentheses add
1384 * any initial ASCII sequence, and a possible
1385 * time stamp from bytes 16-19
1386 */
1387 int l = DDF_GUID_LEN;
1388 int i;
1389
1390 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1391 if ((i&3)==0 && i != 0) printf(":");
1392 printf("%02X", guid[i]&255);
1393 }
1394
1395 printf("\n (");
1396 while (l && guid[l-1] == ' ')
1397 l--;
1398 for (i=0 ; i<l ; i++) {
1399 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1400 fputc(guid[i], stdout);
1401 else
1402 break;
1403 }
1404 if (tstamp) {
1405 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1406 char tbuf[100];
1407 struct tm *tm;
1408 tm = localtime(&then);
1409 strftime(tbuf, 100, " %D %T",tm);
1410 fputs(tbuf, stdout);
1411 }
1412 printf(")");
1413 }
1414
1415 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1416 {
1417 int crl = sb->conf_rec_len;
1418 struct vcl *vcl;
1419
1420 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1421 unsigned int i;
1422 struct vd_config *vc = &vcl->conf;
1423
1424 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1425 continue;
1426 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1427 continue;
1428
1429 /* Ok, we know about this VD, let's give more details */
1430 printf(" Raid Devices[%d] : %d (", n,
1431 be16_to_cpu(vc->prim_elmnt_count));
1432 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1433 int j;
1434 int cnt = be16_to_cpu(sb->phys->max_pdes);
1435 for (j=0; j<cnt; j++)
1436 if (be32_eq(vc->phys_refnum[i],
1437 sb->phys->entries[j].refnum))
1438 break;
1439 if (i) printf(" ");
1440 if (j < cnt)
1441 printf("%d", j);
1442 else
1443 printf("--");
1444 }
1445 printf(")\n");
1446 if (vc->chunk_shift != 255)
1447 printf(" Chunk Size[%d] : %d sectors\n", n,
1448 1 << vc->chunk_shift);
1449 printf(" Raid Level[%d] : %s\n", n,
1450 map_num(ddf_level, vc->prl)?:"-unknown-");
1451 if (vc->sec_elmnt_count != 1) {
1452 printf(" Secondary Position[%d] : %d of %d\n", n,
1453 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1454 printf(" Secondary Level[%d] : %s\n", n,
1455 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1456 }
1457 printf(" Device Size[%d] : %llu\n", n,
1458 be64_to_cpu(vc->blocks)/2);
1459 printf(" Array Size[%d] : %llu\n", n,
1460 be64_to_cpu(vc->array_blocks)/2);
1461 }
1462 }
1463
1464 static void examine_vds(struct ddf_super *sb)
1465 {
1466 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1467 unsigned int i;
1468 printf(" Virtual Disks : %d\n", cnt);
1469
1470 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1471 struct virtual_entry *ve = &sb->virt->entries[i];
1472 if (all_ff(ve->guid))
1473 continue;
1474 printf("\n");
1475 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1476 printf("\n");
1477 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1478 printf(" state[%d] : %s, %s%s\n", i,
1479 map_num(ddf_state, ve->state & 7),
1480 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1481 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1482 printf(" init state[%d] : %s\n", i,
1483 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1484 printf(" access[%d] : %s\n", i,
1485 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1486 printf(" Name[%d] : %.16s\n", i, ve->name);
1487 examine_vd(i, sb, ve->guid);
1488 }
1489 if (cnt) printf("\n");
1490 }
1491
1492 static void examine_pds(struct ddf_super *sb)
1493 {
1494 int cnt = be16_to_cpu(sb->phys->max_pdes);
1495 int i;
1496 struct dl *dl;
1497 int unlisted = 0;
1498 printf(" Physical Disks : %d\n", cnt);
1499 printf(" Number RefNo Size Device Type/State\n");
1500
1501 for (dl = sb->dlist; dl; dl = dl->next)
1502 dl->displayed = 0;
1503
1504 for (i=0 ; i<cnt ; i++) {
1505 struct phys_disk_entry *pd = &sb->phys->entries[i];
1506 int type = be16_to_cpu(pd->type);
1507 int state = be16_to_cpu(pd->state);
1508
1509 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1510 /* Not in use */
1511 continue;
1512 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1513 //printf("\n");
1514 printf(" %3d %08x ", i,
1515 be32_to_cpu(pd->refnum));
1516 printf("%8lluK ",
1517 be64_to_cpu(pd->config_size)>>1);
1518 for (dl = sb->dlist; dl ; dl = dl->next) {
1519 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1520 char *dv = map_dev(dl->major, dl->minor, 0);
1521 if (dv) {
1522 printf("%-15s", dv);
1523 break;
1524 }
1525 }
1526 }
1527 if (!dl)
1528 printf("%15s","");
1529 else
1530 dl->displayed = 1;
1531 printf(" %s%s%s%s%s",
1532 (type&2) ? "active":"",
1533 (type&4) ? "Global-Spare":"",
1534 (type&8) ? "spare" : "",
1535 (type&16)? ", foreign" : "",
1536 (type&32)? "pass-through" : "");
1537 if (state & DDF_Failed)
1538 /* This over-rides these three */
1539 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1540 printf("/%s%s%s%s%s%s%s",
1541 (state&1)? "Online": "Offline",
1542 (state&2)? ", Failed": "",
1543 (state&4)? ", Rebuilding": "",
1544 (state&8)? ", in-transition": "",
1545 (state&16)? ", SMART-errors": "",
1546 (state&32)? ", Unrecovered-Read-Errors": "",
1547 (state&64)? ", Missing" : "");
1548 printf("\n");
1549 }
1550 for (dl = sb->dlist; dl; dl = dl->next) {
1551 char *dv;
1552 if (dl->displayed)
1553 continue;
1554 if (!unlisted)
1555 printf(" Physical disks not in metadata!:\n");
1556 unlisted = 1;
1557 dv = map_dev(dl->major, dl->minor, 0);
1558 printf(" %08x %s\n", be32_to_cpu(dl->disk.refnum),
1559 dv ? dv : "-unknown-");
1560 }
1561 if (unlisted)
1562 printf("\n");
1563 }
1564
1565 static void examine_super_ddf(struct supertype *st, char *homehost)
1566 {
1567 struct ddf_super *sb = st->sb;
1568
1569 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1570 printf(" Version : %.8s\n", sb->anchor.revision);
1571 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1572 printf("\n");
1573 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1574 printf("\n");
1575 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1576 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1577 DDF_HEADER_MAGIC)
1578 ?"yes" : "no"));
1579 examine_vds(sb);
1580 examine_pds(sb);
1581 }
1582
1583 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1584 {
1585 /*
1586 * Figure out the VD number for this supertype.
1587 * Returns DDF_CONTAINER for the container itself,
1588 * and DDF_NOTFOUND on error.
1589 */
1590 struct ddf_super *ddf = st->sb;
1591 struct mdinfo *sra;
1592 char *sub, *end;
1593 unsigned int vcnum;
1594
1595 if (*st->container_devnm == '\0')
1596 return DDF_CONTAINER;
1597
1598 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1599 if (!sra || sra->array.major_version != -1 ||
1600 sra->array.minor_version != -2 ||
1601 !is_subarray(sra->text_version))
1602 return DDF_NOTFOUND;
1603
1604 sub = strchr(sra->text_version + 1, '/');
1605 if (sub != NULL)
1606 vcnum = strtoul(sub + 1, &end, 10);
1607 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1608 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1609 return DDF_NOTFOUND;
1610
1611 return vcnum;
1612 }
1613
1614 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1615 {
1616 /* We just write a generic DDF ARRAY entry
1617 */
1618 struct mdinfo info;
1619 char nbuf[64];
1620 getinfo_super_ddf(st, &info, NULL);
1621 fname_from_uuid(st, &info, nbuf, ':');
1622
1623 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1624 }
1625
1626 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1627 {
1628 /* We write a DDF ARRAY member entry for each vd, identifying container
1629 * by uuid and member by unit number and uuid.
1630 */
1631 struct ddf_super *ddf = st->sb;
1632 struct mdinfo info;
1633 unsigned int i;
1634 char nbuf[64];
1635 getinfo_super_ddf(st, &info, NULL);
1636 fname_from_uuid(st, &info, nbuf, ':');
1637
1638 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1639 struct virtual_entry *ve = &ddf->virt->entries[i];
1640 struct vcl vcl;
1641 char nbuf1[64];
1642 char namebuf[17];
1643 if (all_ff(ve->guid))
1644 continue;
1645 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1646 ddf->currentconf =&vcl;
1647 vcl.vcnum = i;
1648 uuid_from_super_ddf(st, info.uuid);
1649 fname_from_uuid(st, &info, nbuf1, ':');
1650 _ddf_array_name(namebuf, ddf, i);
1651 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1652 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1653 nbuf+5, i, nbuf1+5);
1654 }
1655 }
1656
1657 static void export_examine_super_ddf(struct supertype *st)
1658 {
1659 struct mdinfo info;
1660 char nbuf[64];
1661 getinfo_super_ddf(st, &info, NULL);
1662 fname_from_uuid(st, &info, nbuf, ':');
1663 printf("MD_METADATA=ddf\n");
1664 printf("MD_LEVEL=container\n");
1665 printf("MD_UUID=%s\n", nbuf+5);
1666 printf("MD_DEVICES=%u\n",
1667 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1668 }
1669
1670 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1671 {
1672 void *buf;
1673 unsigned long long dsize, offset;
1674 int bytes;
1675 struct ddf_header *ddf;
1676 int written = 0;
1677
1678 /* The meta consists of an anchor, a primary, and a secondary.
1679 * This all lives at the end of the device.
1680 * So it is easiest to find the earliest of primary and
1681 * secondary, and copy everything from there.
1682 *
1683 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1684 * we choose one of those
1685 */
1686
1687 if (posix_memalign(&buf, 4096, 4096) != 0)
1688 return 1;
1689
1690 if (!get_dev_size(from, NULL, &dsize))
1691 goto err;
1692
1693 if (lseek64(from, dsize-512, 0) < 0)
1694 goto err;
1695 if (read(from, buf, 512) != 512)
1696 goto err;
1697 ddf = buf;
1698 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1699 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1700 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1701 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1702 goto err;
1703
1704 offset = dsize - 512;
1705 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1706 offset = be64_to_cpu(ddf->primary_lba) << 9;
1707 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1708 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1709
1710 bytes = dsize - offset;
1711
1712 if (lseek64(from, offset, 0) < 0 ||
1713 lseek64(to, offset, 0) < 0)
1714 goto err;
1715 while (written < bytes) {
1716 int n = bytes - written;
1717 if (n > 4096)
1718 n = 4096;
1719 if (read(from, buf, n) != n)
1720 goto err;
1721 if (write(to, buf, n) != n)
1722 goto err;
1723 written += n;
1724 }
1725 free(buf);
1726 return 0;
1727 err:
1728 free(buf);
1729 return 1;
1730 }
1731
1732 static void detail_super_ddf(struct supertype *st, char *homehost)
1733 {
1734 /* FIXME later
1735 * Could print DDF GUID
1736 * Need to find which array
1737 * If whole, briefly list all arrays
1738 * If one, give name
1739 */
1740 }
1741
1742 static const char *vendors_with_variable_volume_UUID[] = {
1743 "LSI ",
1744 };
1745
1746 static int volume_id_is_reliable(const struct ddf_super *ddf)
1747 {
1748 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1749 int i;
1750 for (i = 0; i < n; i++)
1751 if (!memcmp(ddf->controller.guid,
1752 vendors_with_variable_volume_UUID[i], 8))
1753 return 0;
1754 return 1;
1755 }
1756
1757 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1758 unsigned int vcnum, int uuid[4])
1759 {
1760 char buf[DDF_GUID_LEN+18], sha[20], *p;
1761 struct sha1_ctx ctx;
1762 if (volume_id_is_reliable(ddf)) {
1763 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1764 return;
1765 }
1766 /*
1767 * Some fake RAID BIOSes (in particular, LSI ones) change the
1768 * VD GUID at every boot. These GUIDs are not suitable for
1769 * identifying an array. Luckily the header GUID appears to
1770 * remain constant.
1771 * We construct a pseudo-UUID from the header GUID and those
1772 * properties of the subarray that we expect to remain constant.
1773 */
1774 memset(buf, 0, sizeof(buf));
1775 p = buf;
1776 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1777 p += DDF_GUID_LEN;
1778 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1779 p += 16;
1780 *((__u16 *) p) = vcnum;
1781 sha1_init_ctx(&ctx);
1782 sha1_process_bytes(buf, sizeof(buf), &ctx);
1783 sha1_finish_ctx(&ctx, sha);
1784 memcpy(uuid, sha, 4*4);
1785 }
1786
1787 static void brief_detail_super_ddf(struct supertype *st)
1788 {
1789 struct mdinfo info;
1790 char nbuf[64];
1791 struct ddf_super *ddf = st->sb;
1792 unsigned int vcnum = get_vd_num_of_subarray(st);
1793 if (vcnum == DDF_CONTAINER)
1794 uuid_from_super_ddf(st, info.uuid);
1795 else if (vcnum == DDF_NOTFOUND)
1796 return;
1797 else
1798 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1799 fname_from_uuid(st, &info, nbuf,':');
1800 printf(" UUID=%s", nbuf + 5);
1801 }
1802 #endif
1803
1804 static int match_home_ddf(struct supertype *st, char *homehost)
1805 {
1806 /* It matches 'this' host if the controller is a
1807 * Linux-MD controller with vendor_data matching
1808 * the hostname. It would be nice if we could
1809 * test against controller found in /sys or somewhere...
1810 */
1811 struct ddf_super *ddf = st->sb;
1812 unsigned int len;
1813
1814 if (!homehost)
1815 return 0;
1816 len = strlen(homehost);
1817
1818 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1819 len < sizeof(ddf->controller.vendor_data) &&
1820 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1821 ddf->controller.vendor_data[len] == 0);
1822 }
1823
1824 #ifndef MDASSEMBLE
1825 static int find_index_in_bvd(const struct ddf_super *ddf,
1826 const struct vd_config *conf, unsigned int n,
1827 unsigned int *n_bvd)
1828 {
1829 /*
1830 * Find the index of the n-th valid physical disk in this BVD.
1831 * Unused entries can be sprinkled in with the used entries,
1832 * but don't count.
1833 */
1834 unsigned int i, j;
1835 for (i = 0, j = 0;
1836 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1837 i++) {
1838 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1839 if (n == j) {
1840 *n_bvd = i;
1841 return 1;
1842 }
1843 j++;
1844 }
1845 }
1846 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1847 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1848 return 0;
1849 }
1850
1851 /* Given a member array instance number, and a raid disk within that instance,
1852 * find the vd_config structure. The offset of the given disk in the phys_refnum
1853 * table is returned in n_bvd.
1854 * For two-level members with a secondary raid level the vd_config for
1855 * the appropriate BVD is returned.
1856 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1857 */
1858 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1859 unsigned int n,
1860 unsigned int *n_bvd, struct vcl **vcl)
1861 {
1862 struct vcl *v;
1863
1864 for (v = ddf->conflist; v; v = v->next) {
1865 unsigned int nsec, ibvd = 0;
1866 struct vd_config *conf;
1867 if (inst != v->vcnum)
1868 continue;
1869 conf = &v->conf;
1870 if (conf->sec_elmnt_count == 1) {
1871 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1872 *vcl = v;
1873 return conf;
1874 } else
1875 goto bad;
1876 }
1877 if (v->other_bvds == NULL) {
1878 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1879 __func__, conf->sec_elmnt_count);
1880 goto bad;
1881 }
1882 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1883 if (conf->sec_elmnt_seq != nsec) {
1884 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1885 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1886 == nsec)
1887 break;
1888 }
1889 if (ibvd == conf->sec_elmnt_count)
1890 goto bad;
1891 conf = v->other_bvds[ibvd-1];
1892 }
1893 if (!find_index_in_bvd(ddf, conf,
1894 n - nsec*conf->sec_elmnt_count, n_bvd))
1895 goto bad;
1896 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1897 , __func__, n, *n_bvd, ibvd, inst);
1898 *vcl = v;
1899 return conf;
1900 }
1901 bad:
1902 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1903 return NULL;
1904 }
1905 #endif
1906
1907 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1908 {
1909 /* Find the entry in phys_disk which has the given refnum
1910 * and return it's index
1911 */
1912 unsigned int i;
1913 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1914 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1915 return i;
1916 return -1;
1917 }
1918
1919 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1920 {
1921 char buf[20];
1922 struct sha1_ctx ctx;
1923 sha1_init_ctx(&ctx);
1924 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1925 sha1_finish_ctx(&ctx, buf);
1926 memcpy(uuid, buf, 4*4);
1927 }
1928
1929 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1930 {
1931 /* The uuid returned here is used for:
1932 * uuid to put into bitmap file (Create, Grow)
1933 * uuid for backup header when saving critical section (Grow)
1934 * comparing uuids when re-adding a device into an array
1935 * In these cases the uuid required is that of the data-array,
1936 * not the device-set.
1937 * uuid to recognise same set when adding a missing device back
1938 * to an array. This is a uuid for the device-set.
1939 *
1940 * For each of these we can make do with a truncated
1941 * or hashed uuid rather than the original, as long as
1942 * everyone agrees.
1943 * In the case of SVD we assume the BVD is of interest,
1944 * though that might be the case if a bitmap were made for
1945 * a mirrored SVD - worry about that later.
1946 * So we need to find the VD configuration record for the
1947 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1948 * The first 16 bytes of the sha1 of these is used.
1949 */
1950 struct ddf_super *ddf = st->sb;
1951 struct vcl *vcl = ddf->currentconf;
1952
1953 if (vcl)
1954 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1955 else
1956 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1957 }
1958
1959 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1960 {
1961 struct ddf_super *ddf = st->sb;
1962 int map_disks = info->array.raid_disks;
1963 __u32 *cptr;
1964
1965 if (ddf->currentconf) {
1966 getinfo_super_ddf_bvd(st, info, map);
1967 return;
1968 }
1969 memset(info, 0, sizeof(*info));
1970
1971 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1972 info->array.level = LEVEL_CONTAINER;
1973 info->array.layout = 0;
1974 info->array.md_minor = -1;
1975 cptr = (__u32 *)(ddf->anchor.guid + 16);
1976 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1977
1978 info->array.utime = 0;
1979 info->array.chunk_size = 0;
1980 info->container_enough = 1;
1981
1982 info->disk.major = 0;
1983 info->disk.minor = 0;
1984 if (ddf->dlist) {
1985 struct phys_disk_entry *pde = NULL;
1986 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1987 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1988
1989 info->data_offset = be64_to_cpu(ddf->phys->
1990 entries[info->disk.raid_disk].
1991 config_size);
1992 info->component_size = ddf->dlist->size - info->data_offset;
1993 if (info->disk.raid_disk >= 0)
1994 pde = ddf->phys->entries + info->disk.raid_disk;
1995 if (pde &&
1996 !(be16_to_cpu(pde->state) & DDF_Failed))
1997 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1998 else
1999 info->disk.state = 1 << MD_DISK_FAULTY;
2000
2001 info->events = be32_to_cpu(ddf->active->seq);
2002 } else {
2003 info->disk.number = -1;
2004 info->disk.raid_disk = -1;
2005 // info->disk.raid_disk = find refnum in the table and use index;
2006 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2007 }
2008
2009 info->recovery_start = MaxSector;
2010 info->reshape_active = 0;
2011 info->recovery_blocked = 0;
2012 info->name[0] = 0;
2013
2014 info->array.major_version = -1;
2015 info->array.minor_version = -2;
2016 strcpy(info->text_version, "ddf");
2017 info->safe_mode_delay = 0;
2018
2019 uuid_from_super_ddf(st, info->uuid);
2020
2021 if (map) {
2022 int i;
2023 for (i = 0 ; i < map_disks; i++) {
2024 if (i < info->array.raid_disks &&
2025 !(be16_to_cpu(ddf->phys->entries[i].state)
2026 & DDF_Failed))
2027 map[i] = 1;
2028 else
2029 map[i] = 0;
2030 }
2031 }
2032 }
2033
2034 /* size of name must be at least 17 bytes! */
2035 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2036 {
2037 int j;
2038 memcpy(name, ddf->virt->entries[i].name, 16);
2039 name[16] = 0;
2040 for(j = 0; j < 16; j++)
2041 if (name[j] == ' ')
2042 name[j] = 0;
2043 }
2044
2045 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2046 {
2047 struct ddf_super *ddf = st->sb;
2048 struct vcl *vc = ddf->currentconf;
2049 int cd = ddf->currentdev;
2050 int n_prim;
2051 int j;
2052 struct dl *dl;
2053 int map_disks = info->array.raid_disks;
2054 __u32 *cptr;
2055 struct vd_config *conf;
2056
2057 memset(info, 0, sizeof(*info));
2058 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2059 return;
2060 info->array.md_minor = -1;
2061 cptr = (__u32 *)(vc->conf.guid + 16);
2062 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2063 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2064 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2065 info->custom_array_size = 0;
2066
2067 conf = &vc->conf;
2068 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2069 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2070 int ibvd = cd / n_prim - 1;
2071 cd %= n_prim;
2072 conf = vc->other_bvds[ibvd];
2073 }
2074
2075 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2076 info->data_offset =
2077 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2078 if (vc->block_sizes)
2079 info->component_size = vc->block_sizes[cd];
2080 else
2081 info->component_size = be64_to_cpu(conf->blocks);
2082
2083 for (dl = ddf->dlist; dl ; dl = dl->next)
2084 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2085 break;
2086 }
2087
2088 info->disk.major = 0;
2089 info->disk.minor = 0;
2090 info->disk.state = 0;
2091 if (dl && dl->pdnum >= 0) {
2092 info->disk.major = dl->major;
2093 info->disk.minor = dl->minor;
2094 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2095 * be16_to_cpu(conf->prim_elmnt_count);
2096 info->disk.number = dl->pdnum;
2097 info->disk.state = 0;
2098 if (info->disk.number >= 0 &&
2099 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2100 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2101 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2102 info->events = be32_to_cpu(ddf->active->seq);
2103 }
2104
2105 info->container_member = ddf->currentconf->vcnum;
2106
2107 info->recovery_start = MaxSector;
2108 info->resync_start = 0;
2109 info->reshape_active = 0;
2110 info->recovery_blocked = 0;
2111 if (!(ddf->virt->entries[info->container_member].state
2112 & DDF_state_inconsistent) &&
2113 (ddf->virt->entries[info->container_member].init_state
2114 & DDF_initstate_mask)
2115 == DDF_init_full)
2116 info->resync_start = MaxSector;
2117
2118 uuid_from_super_ddf(st, info->uuid);
2119
2120 info->array.major_version = -1;
2121 info->array.minor_version = -2;
2122 sprintf(info->text_version, "/%s/%d",
2123 st->container_devnm,
2124 info->container_member);
2125 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2126
2127 _ddf_array_name(info->name, ddf, info->container_member);
2128
2129 if (map)
2130 for (j = 0; j < map_disks; j++) {
2131 map[j] = 0;
2132 if (j < info->array.raid_disks) {
2133 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2134 if (i >= 0 &&
2135 (be16_to_cpu(ddf->phys->entries[i].state)
2136 & DDF_Online) &&
2137 !(be16_to_cpu(ddf->phys->entries[i].state)
2138 & DDF_Failed))
2139 map[i] = 1;
2140 }
2141 }
2142 }
2143
2144 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2145 char *update,
2146 char *devname, int verbose,
2147 int uuid_set, char *homehost)
2148 {
2149 /* For 'assemble' and 'force' we need to return non-zero if any
2150 * change was made. For others, the return value is ignored.
2151 * Update options are:
2152 * force-one : This device looks a bit old but needs to be included,
2153 * update age info appropriately.
2154 * assemble: clear any 'faulty' flag to allow this device to
2155 * be assembled.
2156 * force-array: Array is degraded but being forced, mark it clean
2157 * if that will be needed to assemble it.
2158 *
2159 * newdev: not used ????
2160 * grow: Array has gained a new device - this is currently for
2161 * linear only
2162 * resync: mark as dirty so a resync will happen.
2163 * uuid: Change the uuid of the array to match what is given
2164 * homehost: update the recorded homehost
2165 * name: update the name - preserving the homehost
2166 * _reshape_progress: record new reshape_progress position.
2167 *
2168 * Following are not relevant for this version:
2169 * sparc2.2 : update from old dodgey metadata
2170 * super-minor: change the preferred_minor number
2171 * summaries: update redundant counters.
2172 */
2173 int rv = 0;
2174 // struct ddf_super *ddf = st->sb;
2175 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2176 // struct virtual_entry *ve = find_ve(ddf);
2177
2178 /* we don't need to handle "force-*" or "assemble" as
2179 * there is no need to 'trick' the kernel. When the metadata is
2180 * first updated to activate the array, all the implied modifications
2181 * will just happen.
2182 */
2183
2184 if (strcmp(update, "grow") == 0) {
2185 /* FIXME */
2186 } else if (strcmp(update, "resync") == 0) {
2187 // info->resync_checkpoint = 0;
2188 } else if (strcmp(update, "homehost") == 0) {
2189 /* homehost is stored in controller->vendor_data,
2190 * or it is when we are the vendor
2191 */
2192 // if (info->vendor_is_local)
2193 // strcpy(ddf->controller.vendor_data, homehost);
2194 rv = -1;
2195 } else if (strcmp(update, "name") == 0) {
2196 /* name is stored in virtual_entry->name */
2197 // memset(ve->name, ' ', 16);
2198 // strncpy(ve->name, info->name, 16);
2199 rv = -1;
2200 } else if (strcmp(update, "_reshape_progress") == 0) {
2201 /* We don't support reshape yet */
2202 } else if (strcmp(update, "assemble") == 0 ) {
2203 /* Do nothing, just succeed */
2204 rv = 0;
2205 } else
2206 rv = -1;
2207
2208 // update_all_csum(ddf);
2209
2210 return rv;
2211 }
2212
2213 static void make_header_guid(char *guid)
2214 {
2215 be32 stamp;
2216 /* Create a DDF Header of Virtual Disk GUID */
2217
2218 /* 24 bytes of fiction required.
2219 * first 8 are a 'vendor-id' - "Linux-MD"
2220 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2221 * Remaining 8 random number plus timestamp
2222 */
2223 memcpy(guid, T10, sizeof(T10));
2224 stamp = cpu_to_be32(0xdeadbeef);
2225 memcpy(guid+8, &stamp, 4);
2226 stamp = cpu_to_be32(0);
2227 memcpy(guid+12, &stamp, 4);
2228 stamp = cpu_to_be32(time(0) - DECADE);
2229 memcpy(guid+16, &stamp, 4);
2230 stamp._v32 = random32();
2231 memcpy(guid+20, &stamp, 4);
2232 }
2233
2234 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2235 {
2236 unsigned int i;
2237 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2238 if (all_ff(ddf->virt->entries[i].guid))
2239 return i;
2240 }
2241 return DDF_NOTFOUND;
2242 }
2243
2244 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2245 const char *name)
2246 {
2247 unsigned int i;
2248 if (name == NULL)
2249 return DDF_NOTFOUND;
2250 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2251 if (all_ff(ddf->virt->entries[i].guid))
2252 continue;
2253 if (!strncmp(name, ddf->virt->entries[i].name,
2254 sizeof(ddf->virt->entries[i].name)))
2255 return i;
2256 }
2257 return DDF_NOTFOUND;
2258 }
2259
2260 #ifndef MDASSEMBLE
2261 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2262 const char *guid)
2263 {
2264 unsigned int i;
2265 if (guid == NULL || all_ff(guid))
2266 return DDF_NOTFOUND;
2267 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2268 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2269 return i;
2270 return DDF_NOTFOUND;
2271 }
2272 #endif
2273
2274 static int init_super_ddf(struct supertype *st,
2275 mdu_array_info_t *info,
2276 unsigned long long size, char *name, char *homehost,
2277 int *uuid, unsigned long long data_offset)
2278 {
2279 /* This is primarily called by Create when creating a new array.
2280 * We will then get add_to_super called for each component, and then
2281 * write_init_super called to write it out to each device.
2282 * For DDF, Create can create on fresh devices or on a pre-existing
2283 * array.
2284 * To create on a pre-existing array a different method will be called.
2285 * This one is just for fresh drives.
2286 *
2287 * We need to create the entire 'ddf' structure which includes:
2288 * DDF headers - these are easy.
2289 * Controller data - a Sector describing this controller .. not that
2290 * this is a controller exactly.
2291 * Physical Disk Record - one entry per device, so
2292 * leave plenty of space.
2293 * Virtual Disk Records - again, just leave plenty of space.
2294 * This just lists VDs, doesn't give details.
2295 * Config records - describe the VDs that use this disk
2296 * DiskData - describes 'this' device.
2297 * BadBlockManagement - empty
2298 * Diag Space - empty
2299 * Vendor Logs - Could we put bitmaps here?
2300 *
2301 */
2302 struct ddf_super *ddf;
2303 char hostname[17];
2304 int hostlen;
2305 int max_phys_disks, max_virt_disks;
2306 unsigned long long sector;
2307 int clen;
2308 int i;
2309 int pdsize, vdsize;
2310 struct phys_disk *pd;
2311 struct virtual_disk *vd;
2312
2313 if (data_offset != INVALID_SECTORS) {
2314 pr_err("data-offset not supported by DDF\n");
2315 return 0;
2316 }
2317
2318 if (st->sb)
2319 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2320 data_offset);
2321
2322 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2323 pr_err("%s could not allocate superblock\n", __func__);
2324 return 0;
2325 }
2326 memset(ddf, 0, sizeof(*ddf));
2327 st->sb = ddf;
2328
2329 if (info == NULL) {
2330 /* zeroing superblock */
2331 return 0;
2332 }
2333
2334 /* At least 32MB *must* be reserved for the ddf. So let's just
2335 * start 32MB from the end, and put the primary header there.
2336 * Don't do secondary for now.
2337 * We don't know exactly where that will be yet as it could be
2338 * different on each device. So just set up the lengths.
2339 */
2340
2341 ddf->anchor.magic = DDF_HEADER_MAGIC;
2342 make_header_guid(ddf->anchor.guid);
2343
2344 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2345 ddf->anchor.seq = cpu_to_be32(1);
2346 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2347 ddf->anchor.openflag = 0xFF;
2348 ddf->anchor.foreignflag = 0;
2349 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2350 ddf->anchor.pad0 = 0xff;
2351 memset(ddf->anchor.pad1, 0xff, 12);
2352 memset(ddf->anchor.header_ext, 0xff, 32);
2353 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2354 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2355 ddf->anchor.type = DDF_HEADER_ANCHOR;
2356 memset(ddf->anchor.pad2, 0xff, 3);
2357 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2358 /* Put this at bottom of 32M reserved.. */
2359 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2360 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2361 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2362 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2363 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2364 ddf->max_part = 64;
2365 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2366 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2367 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2368 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2369 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2370 memset(ddf->anchor.pad3, 0xff, 54);
2371 /* Controller section is one sector long immediately
2372 * after the ddf header */
2373 sector = 1;
2374 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2375 ddf->anchor.controller_section_length = cpu_to_be32(1);
2376 sector += 1;
2377
2378 /* phys is 8 sectors after that */
2379 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2380 sizeof(struct phys_disk_entry)*max_phys_disks,
2381 512);
2382 switch(pdsize/512) {
2383 case 2: case 8: case 32: case 128: case 512: break;
2384 default: abort();
2385 }
2386 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2387 ddf->anchor.phys_section_length =
2388 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2389 sector += pdsize/512;
2390
2391 /* virt is another 32 sectors */
2392 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2393 sizeof(struct virtual_entry) * max_virt_disks,
2394 512);
2395 switch(vdsize/512) {
2396 case 2: case 8: case 32: case 128: case 512: break;
2397 default: abort();
2398 }
2399 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2400 ddf->anchor.virt_section_length =
2401 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2402 sector += vdsize/512;
2403
2404 clen = ddf->conf_rec_len * (ddf->max_part+1);
2405 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2406 ddf->anchor.config_section_length = cpu_to_be32(clen);
2407 sector += clen;
2408
2409 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2410 ddf->anchor.data_section_length = cpu_to_be32(1);
2411 sector += 1;
2412
2413 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2414 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2415 ddf->anchor.diag_space_length = cpu_to_be32(0);
2416 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2417 ddf->anchor.vendor_length = cpu_to_be32(0);
2418 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2419
2420 memset(ddf->anchor.pad4, 0xff, 256);
2421
2422 memcpy(&ddf->primary, &ddf->anchor, 512);
2423 memcpy(&ddf->secondary, &ddf->anchor, 512);
2424
2425 ddf->primary.openflag = 1; /* I guess.. */
2426 ddf->primary.type = DDF_HEADER_PRIMARY;
2427
2428 ddf->secondary.openflag = 1; /* I guess.. */
2429 ddf->secondary.type = DDF_HEADER_SECONDARY;
2430
2431 ddf->active = &ddf->primary;
2432
2433 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2434
2435 /* 24 more bytes of fiction required.
2436 * first 8 are a 'vendor-id' - "Linux-MD"
2437 * Remaining 16 are serial number.... maybe a hostname would do?
2438 */
2439 memcpy(ddf->controller.guid, T10, sizeof(T10));
2440 gethostname(hostname, sizeof(hostname));
2441 hostname[sizeof(hostname) - 1] = 0;
2442 hostlen = strlen(hostname);
2443 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2444 for (i = strlen(T10) ; i+hostlen < 24; i++)
2445 ddf->controller.guid[i] = ' ';
2446
2447 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2448 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2449 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2450 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2451 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2452 memset(ddf->controller.pad, 0xff, 8);
2453 memset(ddf->controller.vendor_data, 0xff, 448);
2454 if (homehost && strlen(homehost) < 440)
2455 strcpy((char*)ddf->controller.vendor_data, homehost);
2456
2457 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2458 pr_err("%s could not allocate pd\n", __func__);
2459 return 0;
2460 }
2461 ddf->phys = pd;
2462 ddf->pdsize = pdsize;
2463
2464 memset(pd, 0xff, pdsize);
2465 memset(pd, 0, sizeof(*pd));
2466 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2467 pd->used_pdes = cpu_to_be16(0);
2468 pd->max_pdes = cpu_to_be16(max_phys_disks);
2469 memset(pd->pad, 0xff, 52);
2470 for (i = 0; i < max_phys_disks; i++)
2471 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2472
2473 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2474 pr_err("%s could not allocate vd\n", __func__);
2475 return 0;
2476 }
2477 ddf->virt = vd;
2478 ddf->vdsize = vdsize;
2479 memset(vd, 0, vdsize);
2480 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2481 vd->populated_vdes = cpu_to_be16(0);
2482 vd->max_vdes = cpu_to_be16(max_virt_disks);
2483 memset(vd->pad, 0xff, 52);
2484
2485 for (i=0; i<max_virt_disks; i++)
2486 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2487
2488 st->sb = ddf;
2489 ddf_set_updates_pending(ddf, NULL);
2490 return 1;
2491 }
2492
2493 static int chunk_to_shift(int chunksize)
2494 {
2495 return ffs(chunksize/512)-1;
2496 }
2497
2498 #ifndef MDASSEMBLE
2499 struct extent {
2500 unsigned long long start, size;
2501 };
2502 static int cmp_extent(const void *av, const void *bv)
2503 {
2504 const struct extent *a = av;
2505 const struct extent *b = bv;
2506 if (a->start < b->start)
2507 return -1;
2508 if (a->start > b->start)
2509 return 1;
2510 return 0;
2511 }
2512
2513 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2514 {
2515 /* Find a list of used extents on the give physical device
2516 * (dnum) of the given ddf.
2517 * Return a malloced array of 'struct extent'
2518 */
2519 struct extent *rv;
2520 int n = 0;
2521 unsigned int i;
2522 __u16 state;
2523
2524 if (dl->pdnum < 0)
2525 return NULL;
2526 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2527
2528 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2529 return NULL;
2530
2531 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2532
2533 for (i = 0; i < ddf->max_part; i++) {
2534 const struct vd_config *bvd;
2535 unsigned int ibvd;
2536 struct vcl *v = dl->vlist[i];
2537 if (v == NULL ||
2538 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2539 &bvd, &ibvd) == DDF_NOTFOUND)
2540 continue;
2541 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2542 rv[n].size = be64_to_cpu(bvd->blocks);
2543 n++;
2544 }
2545 qsort(rv, n, sizeof(*rv), cmp_extent);
2546
2547 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2548 rv[n].size = 0;
2549 return rv;
2550 }
2551 #endif
2552
2553 static int init_super_ddf_bvd(struct supertype *st,
2554 mdu_array_info_t *info,
2555 unsigned long long size,
2556 char *name, char *homehost,
2557 int *uuid, unsigned long long data_offset)
2558 {
2559 /* We are creating a BVD inside a pre-existing container.
2560 * so st->sb is already set.
2561 * We need to create a new vd_config and a new virtual_entry
2562 */
2563 struct ddf_super *ddf = st->sb;
2564 unsigned int venum, i;
2565 struct virtual_entry *ve;
2566 struct vcl *vcl;
2567 struct vd_config *vc;
2568
2569 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2570 pr_err("This ddf already has an array called %s\n", name);
2571 return 0;
2572 }
2573 venum = find_unused_vde(ddf);
2574 if (venum == DDF_NOTFOUND) {
2575 pr_err("Cannot find spare slot for virtual disk\n");
2576 return 0;
2577 }
2578 ve = &ddf->virt->entries[venum];
2579
2580 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2581 * timestamp, random number
2582 */
2583 make_header_guid(ve->guid);
2584 ve->unit = cpu_to_be16(info->md_minor);
2585 ve->pad0 = 0xFFFF;
2586 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2587 DDF_GUID_LEN);
2588 ve->type = cpu_to_be16(0);
2589 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2590 if (info->state & 1) /* clean */
2591 ve->init_state = DDF_init_full;
2592 else
2593 ve->init_state = DDF_init_not;
2594
2595 memset(ve->pad1, 0xff, 14);
2596 memset(ve->name, ' ', 16);
2597 if (name)
2598 strncpy(ve->name, name, 16);
2599 ddf->virt->populated_vdes =
2600 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2601
2602 /* Now create a new vd_config */
2603 if (posix_memalign((void**)&vcl, 512,
2604 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2605 pr_err("%s could not allocate vd_config\n", __func__);
2606 return 0;
2607 }
2608 vcl->vcnum = venum;
2609 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2610 vc = &vcl->conf;
2611
2612 vc->magic = DDF_VD_CONF_MAGIC;
2613 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2614 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2615 vc->seqnum = cpu_to_be32(1);
2616 memset(vc->pad0, 0xff, 24);
2617 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2618 if (layout_md2ddf(info, vc) == -1 ||
2619 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2620 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2621 __func__, info->level, info->layout, info->raid_disks);
2622 free(vcl);
2623 return 0;
2624 }
2625 vc->sec_elmnt_seq = 0;
2626 if (alloc_other_bvds(ddf, vcl) != 0) {
2627 pr_err("%s could not allocate other bvds\n",
2628 __func__);
2629 free(vcl);
2630 return 0;
2631 }
2632 vc->blocks = cpu_to_be64(info->size * 2);
2633 vc->array_blocks = cpu_to_be64(
2634 calc_array_size(info->level, info->raid_disks, info->layout,
2635 info->chunk_size, info->size*2));
2636 memset(vc->pad1, 0xff, 8);
2637 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2638 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2639 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2640 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2641 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2642 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2643 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2644 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2645 memset(vc->cache_pol, 0, 8);
2646 vc->bg_rate = 0x80;
2647 memset(vc->pad2, 0xff, 3);
2648 memset(vc->pad3, 0xff, 52);
2649 memset(vc->pad4, 0xff, 192);
2650 memset(vc->v0, 0xff, 32);
2651 memset(vc->v1, 0xff, 32);
2652 memset(vc->v2, 0xff, 16);
2653 memset(vc->v3, 0xff, 16);
2654 memset(vc->vendor, 0xff, 32);
2655
2656 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2657 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2658
2659 for (i = 1; i < vc->sec_elmnt_count; i++) {
2660 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2661 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2662 }
2663
2664 vcl->next = ddf->conflist;
2665 ddf->conflist = vcl;
2666 ddf->currentconf = vcl;
2667 ddf_set_updates_pending(ddf, NULL);
2668 return 1;
2669 }
2670
2671 #ifndef MDASSEMBLE
2672 static void add_to_super_ddf_bvd(struct supertype *st,
2673 mdu_disk_info_t *dk, int fd, char *devname)
2674 {
2675 /* fd and devname identify a device within the ddf container (st).
2676 * dk identifies a location in the new BVD.
2677 * We need to find suitable free space in that device and update
2678 * the phys_refnum and lba_offset for the newly created vd_config.
2679 * We might also want to update the type in the phys_disk
2680 * section.
2681 *
2682 * Alternately: fd == -1 and we have already chosen which device to
2683 * use and recorded in dlist->raid_disk;
2684 */
2685 struct dl *dl;
2686 struct ddf_super *ddf = st->sb;
2687 struct vd_config *vc;
2688 unsigned int i;
2689 unsigned long long blocks, pos, esize;
2690 struct extent *ex;
2691 unsigned int raid_disk = dk->raid_disk;
2692
2693 if (fd == -1) {
2694 for (dl = ddf->dlist; dl ; dl = dl->next)
2695 if (dl->raiddisk == dk->raid_disk)
2696 break;
2697 } else {
2698 for (dl = ddf->dlist; dl ; dl = dl->next)
2699 if (dl->major == dk->major &&
2700 dl->minor == dk->minor)
2701 break;
2702 }
2703 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2704 return;
2705
2706 vc = &ddf->currentconf->conf;
2707 if (vc->sec_elmnt_count > 1) {
2708 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2709 if (raid_disk >= n)
2710 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2711 raid_disk %= n;
2712 }
2713
2714 ex = get_extents(ddf, dl);
2715 if (!ex)
2716 return;
2717
2718 i = 0; pos = 0;
2719 blocks = be64_to_cpu(vc->blocks);
2720 if (ddf->currentconf->block_sizes)
2721 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2722
2723 /* First-fit */
2724 do {
2725 esize = ex[i].start - pos;
2726 if (esize >= blocks)
2727 break;
2728 pos = ex[i].start + ex[i].size;
2729 i++;
2730 } while (ex[i-1].size);
2731
2732 free(ex);
2733 if (esize < blocks)
2734 return;
2735
2736 ddf->currentdev = dk->raid_disk;
2737 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2738 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2739
2740 for (i = 0; i < ddf->max_part ; i++)
2741 if (dl->vlist[i] == NULL)
2742 break;
2743 if (i == ddf->max_part)
2744 return;
2745 dl->vlist[i] = ddf->currentconf;
2746
2747 if (fd >= 0)
2748 dl->fd = fd;
2749 if (devname)
2750 dl->devname = devname;
2751
2752 /* Check if we can mark array as optimal yet */
2753 i = ddf->currentconf->vcnum;
2754 ddf->virt->entries[i].state =
2755 (ddf->virt->entries[i].state & ~DDF_state_mask)
2756 | get_svd_state(ddf, ddf->currentconf);
2757 be16_clear(ddf->phys->entries[dl->pdnum].type,
2758 cpu_to_be16(DDF_Global_Spare));
2759 be16_set(ddf->phys->entries[dl->pdnum].type,
2760 cpu_to_be16(DDF_Active_in_VD));
2761 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2762 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2763 ddf->currentconf->vcnum, guid_str(vc->guid),
2764 dk->raid_disk);
2765 ddf_set_updates_pending(ddf, vc);
2766 }
2767
2768 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2769 {
2770 unsigned int i;
2771 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2772 if (all_ff(ddf->phys->entries[i].guid))
2773 return i;
2774 }
2775 return DDF_NOTFOUND;
2776 }
2777
2778 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2779 {
2780 __u64 cfs, t;
2781 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2782 t = be64_to_cpu(dl->secondary_lba);
2783 if (t != ~(__u64)0)
2784 cfs = min(cfs, t);
2785 /*
2786 * Some vendor DDF structures interpret workspace_lba
2787 * very differently than we do: Make a sanity check on the value.
2788 */
2789 t = be64_to_cpu(dl->workspace_lba);
2790 if (t < cfs) {
2791 __u64 wsp = cfs - t;
2792 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2793 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2794 __func__, dl->major, dl->minor, wsp);
2795 } else
2796 cfs = t;
2797 }
2798 pde->config_size = cpu_to_be64(cfs);
2799 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2800 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2801 }
2802
2803 /* Add a device to a container, either while creating it or while
2804 * expanding a pre-existing container
2805 */
2806 static int add_to_super_ddf(struct supertype *st,
2807 mdu_disk_info_t *dk, int fd, char *devname,
2808 unsigned long long data_offset)
2809 {
2810 struct ddf_super *ddf = st->sb;
2811 struct dl *dd;
2812 time_t now;
2813 struct tm *tm;
2814 unsigned long long size;
2815 struct phys_disk_entry *pde;
2816 unsigned int n, i;
2817 struct stat stb;
2818 __u32 *tptr;
2819
2820 if (ddf->currentconf) {
2821 add_to_super_ddf_bvd(st, dk, fd, devname);
2822 return 0;
2823 }
2824
2825 /* This is device numbered dk->number. We need to create
2826 * a phys_disk entry and a more detailed disk_data entry.
2827 */
2828 fstat(fd, &stb);
2829 n = find_unused_pde(ddf);
2830 if (n == DDF_NOTFOUND) {
2831 pr_err("%s: No free slot in array, cannot add disk\n",
2832 __func__);
2833 return 1;
2834 }
2835 pde = &ddf->phys->entries[n];
2836 get_dev_size(fd, NULL, &size);
2837 if (size <= 32*1024*1024) {
2838 pr_err("%s: device size must be at least 32MB\n",
2839 __func__);
2840 return 1;
2841 }
2842 size >>= 9;
2843
2844 if (posix_memalign((void**)&dd, 512,
2845 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2846 pr_err("%s could allocate buffer for new disk, aborting\n",
2847 __func__);
2848 return 1;
2849 }
2850 dd->major = major(stb.st_rdev);
2851 dd->minor = minor(stb.st_rdev);
2852 dd->devname = devname;
2853 dd->fd = fd;
2854 dd->spare = NULL;
2855
2856 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2857 now = time(0);
2858 tm = localtime(&now);
2859 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2860 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2861 tptr = (__u32 *)(dd->disk.guid + 16);
2862 *tptr++ = random32();
2863 *tptr = random32();
2864
2865 do {
2866 /* Cannot be bothered finding a CRC of some irrelevant details*/
2867 dd->disk.refnum._v32 = random32();
2868 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2869 i > 0; i--)
2870 if (be32_eq(ddf->phys->entries[i-1].refnum,
2871 dd->disk.refnum))
2872 break;
2873 } while (i > 0);
2874
2875 dd->disk.forced_ref = 1;
2876 dd->disk.forced_guid = 1;
2877 memset(dd->disk.vendor, ' ', 32);
2878 memcpy(dd->disk.vendor, "Linux", 5);
2879 memset(dd->disk.pad, 0xff, 442);
2880 for (i = 0; i < ddf->max_part ; i++)
2881 dd->vlist[i] = NULL;
2882
2883 dd->pdnum = n;
2884
2885 if (st->update_tail) {
2886 int len = (sizeof(struct phys_disk) +
2887 sizeof(struct phys_disk_entry));
2888 struct phys_disk *pd;
2889
2890 pd = xmalloc(len);
2891 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2892 pd->used_pdes = cpu_to_be16(n);
2893 pde = &pd->entries[0];
2894 dd->mdupdate = pd;
2895 } else
2896 ddf->phys->used_pdes = cpu_to_be16(
2897 1 + be16_to_cpu(ddf->phys->used_pdes));
2898
2899 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2900 pde->refnum = dd->disk.refnum;
2901 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2902 pde->state = cpu_to_be16(DDF_Online);
2903 dd->size = size;
2904 /*
2905 * If there is already a device in dlist, try to reserve the same
2906 * amount of workspace. Otherwise, use 32MB.
2907 * We checked disk size above already.
2908 */
2909 #define __calc_lba(new, old, lba, mb) do { \
2910 unsigned long long dif; \
2911 if ((old) != NULL) \
2912 dif = (old)->size - be64_to_cpu((old)->lba); \
2913 else \
2914 dif = (new)->size; \
2915 if ((new)->size > dif) \
2916 (new)->lba = cpu_to_be64((new)->size - dif); \
2917 else \
2918 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2919 } while (0)
2920 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2921 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2922 if (ddf->dlist == NULL ||
2923 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2924 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2925 _set_config_size(pde, dd);
2926
2927 sprintf(pde->path, "%17.17s","Information: nil") ;
2928 memset(pde->pad, 0xff, 6);
2929
2930 if (st->update_tail) {
2931 dd->next = ddf->add_list;
2932 ddf->add_list = dd;
2933 } else {
2934 dd->next = ddf->dlist;
2935 ddf->dlist = dd;
2936 ddf_set_updates_pending(ddf, NULL);
2937 }
2938
2939 return 0;
2940 }
2941
2942 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2943 {
2944 struct ddf_super *ddf = st->sb;
2945 struct dl *dl;
2946
2947 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2948 * disappeared from the container.
2949 * We need to arrange that it disappears from the metadata and
2950 * internal data structures too.
2951 * Most of the work is done by ddf_process_update which edits
2952 * the metadata and closes the file handle and attaches the memory
2953 * where free_updates will free it.
2954 */
2955 for (dl = ddf->dlist; dl ; dl = dl->next)
2956 if (dl->major == dk->major &&
2957 dl->minor == dk->minor)
2958 break;
2959 if (!dl || dl->pdnum < 0)
2960 return -1;
2961
2962 if (st->update_tail) {
2963 int len = (sizeof(struct phys_disk) +
2964 sizeof(struct phys_disk_entry));
2965 struct phys_disk *pd;
2966
2967 pd = xmalloc(len);
2968 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2969 pd->used_pdes = cpu_to_be16(dl->pdnum);
2970 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2971 append_metadata_update(st, pd, len);
2972 }
2973 return 0;
2974 }
2975 #endif
2976
2977 /*
2978 * This is the write_init_super method for a ddf container. It is
2979 * called when creating a container or adding another device to a
2980 * container.
2981 */
2982
2983 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2984 {
2985 unsigned long long sector;
2986 struct ddf_header *header;
2987 int fd, i, n_config, conf_size, buf_size;
2988 int ret = 0;
2989 char *conf;
2990
2991 fd = d->fd;
2992
2993 switch (type) {
2994 case DDF_HEADER_PRIMARY:
2995 header = &ddf->primary;
2996 sector = be64_to_cpu(header->primary_lba);
2997 break;
2998 case DDF_HEADER_SECONDARY:
2999 header = &ddf->secondary;
3000 sector = be64_to_cpu(header->secondary_lba);
3001 break;
3002 default:
3003 return 0;
3004 }
3005 if (sector == ~(__u64)0)
3006 return 0;
3007
3008 header->type = type;
3009 header->openflag = 1;
3010 header->crc = calc_crc(header, 512);
3011
3012 lseek64(fd, sector<<9, 0);
3013 if (write(fd, header, 512) < 0)
3014 goto out;
3015
3016 ddf->controller.crc = calc_crc(&ddf->controller, 512);
3017 if (write(fd, &ddf->controller, 512) < 0)
3018 goto out;
3019
3020 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
3021 if (write(fd, ddf->phys, ddf->pdsize) < 0)
3022 goto out;
3023 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
3024 if (write(fd, ddf->virt, ddf->vdsize) < 0)
3025 goto out;
3026
3027 /* Now write lots of config records. */
3028 n_config = ddf->max_part;
3029 conf_size = ddf->conf_rec_len * 512;
3030 conf = ddf->conf;
3031 buf_size = conf_size * (n_config + 1);
3032 if (!conf) {
3033 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
3034 goto out;
3035 ddf->conf = conf;
3036 }
3037 for (i = 0 ; i <= n_config ; i++) {
3038 struct vcl *c;
3039 struct vd_config *vdc = NULL;
3040 if (i == n_config) {
3041 c = (struct vcl *)d->spare;
3042 if (c)
3043 vdc = &c->conf;
3044 } else {
3045 unsigned int dummy;
3046 c = d->vlist[i];
3047 if (c)
3048 get_pd_index_from_refnum(
3049 c, d->disk.refnum,
3050 ddf->mppe,
3051 (const struct vd_config **)&vdc,
3052 &dummy);
3053 }
3054 if (vdc) {
3055 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3056 i, be32_to_cpu(d->disk.refnum),
3057 guid_str(vdc->guid),
3058 vdc->sec_elmnt_seq);
3059 vdc->crc = calc_crc(vdc, conf_size);
3060 memcpy(conf + i*conf_size, vdc, conf_size);
3061 } else
3062 memset(conf + i*conf_size, 0xff, conf_size);
3063 }
3064 if (write(fd, conf, buf_size) != buf_size)
3065 goto out;
3066
3067 d->disk.crc = calc_crc(&d->disk, 512);
3068 if (write(fd, &d->disk, 512) < 0)
3069 goto out;
3070
3071 ret = 1;
3072 out:
3073 header->openflag = 0;
3074 header->crc = calc_crc(header, 512);
3075
3076 lseek64(fd, sector<<9, 0);
3077 if (write(fd, header, 512) < 0)
3078 ret = 0;
3079
3080 return ret;
3081 }
3082
3083 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3084 {
3085 unsigned long long size;
3086 int fd = d->fd;
3087 if (fd < 0)
3088 return 0;
3089
3090 /* We need to fill in the primary, (secondary) and workspace
3091 * lba's in the headers, set their checksums,
3092 * Also checksum phys, virt....
3093 *
3094 * Then write everything out, finally the anchor is written.
3095 */
3096 get_dev_size(fd, NULL, &size);
3097 size /= 512;
3098 memcpy(&ddf->anchor, ddf->active, 512);
3099 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3100 ddf->anchor.workspace_lba = d->workspace_lba;
3101 else
3102 ddf->anchor.workspace_lba =
3103 cpu_to_be64(size - 32*1024*2);
3104 if (be64_to_cpu(d->primary_lba) != 0ULL)
3105 ddf->anchor.primary_lba = d->primary_lba;
3106 else
3107 ddf->anchor.primary_lba =
3108 cpu_to_be64(size - 16*1024*2);
3109 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3110 ddf->anchor.secondary_lba = d->secondary_lba;
3111 else
3112 ddf->anchor.secondary_lba =
3113 cpu_to_be64(size - 32*1024*2);
3114 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
3115 memcpy(&ddf->primary, &ddf->anchor, 512);
3116 memcpy(&ddf->secondary, &ddf->anchor, 512);
3117
3118 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3119 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3120 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3121
3122 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3123 return 0;
3124
3125 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3126 return 0;
3127
3128 lseek64(fd, (size-1)*512, SEEK_SET);
3129 if (write(fd, &ddf->anchor, 512) < 0)
3130 return 0;
3131
3132 return 1;
3133 }
3134
3135 #ifndef MDASSEMBLE
3136 static int __write_init_super_ddf(struct supertype *st)
3137 {
3138 struct ddf_super *ddf = st->sb;
3139 struct dl *d;
3140 int attempts = 0;
3141 int successes = 0;
3142
3143 pr_state(ddf, __func__);
3144
3145 /* try to write updated metadata,
3146 * if we catch a failure move on to the next disk
3147 */
3148 for (d = ddf->dlist; d; d=d->next) {
3149 attempts++;
3150 successes += _write_super_to_disk(ddf, d);
3151 }
3152
3153 return attempts != successes;
3154 }
3155
3156 static int write_init_super_ddf(struct supertype *st)
3157 {
3158 struct ddf_super *ddf = st->sb;
3159 struct vcl *currentconf = ddf->currentconf;
3160
3161 /* We are done with currentconf - reset it so st refers to the container */
3162 ddf->currentconf = NULL;
3163
3164 if (st->update_tail) {
3165 /* queue the virtual_disk and vd_config as metadata updates */
3166 struct virtual_disk *vd;
3167 struct vd_config *vc;
3168 int len, tlen;
3169 unsigned int i;
3170
3171 if (!currentconf) {
3172 /* Must be adding a physical disk to the container */
3173 int len = (sizeof(struct phys_disk) +
3174 sizeof(struct phys_disk_entry));
3175
3176 /* adding a disk to the container. */
3177 if (!ddf->add_list)
3178 return 0;
3179
3180 append_metadata_update(st, ddf->add_list->mdupdate, len);
3181 ddf->add_list->mdupdate = NULL;
3182 return 0;
3183 }
3184
3185 /* Newly created VD */
3186
3187 /* First the virtual disk. We have a slightly fake header */
3188 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3189 vd = xmalloc(len);
3190 *vd = *ddf->virt;
3191 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3192 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3193 append_metadata_update(st, vd, len);
3194
3195 /* Then the vd_config */
3196 len = ddf->conf_rec_len * 512;
3197 tlen = len * currentconf->conf.sec_elmnt_count;
3198 vc = xmalloc(tlen);
3199 memcpy(vc, &currentconf->conf, len);
3200 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3201 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3202 len);
3203 append_metadata_update(st, vc, tlen);
3204
3205 /* FIXME I need to close the fds! */
3206 return 0;
3207 } else {
3208 struct dl *d;
3209 if (!currentconf)
3210 for (d = ddf->dlist; d; d=d->next)
3211 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3212 return __write_init_super_ddf(st);
3213 }
3214 }
3215
3216 #endif
3217
3218 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3219 unsigned long long data_offset)
3220 {
3221 /* We must reserve the last 32Meg */
3222 if (devsize <= 32*1024*2)
3223 return 0;
3224 return devsize - 32*1024*2;
3225 }
3226
3227 #ifndef MDASSEMBLE
3228
3229 static int reserve_space(struct supertype *st, int raiddisks,
3230 unsigned long long size, int chunk,
3231 unsigned long long *freesize)
3232 {
3233 /* Find 'raiddisks' spare extents at least 'size' big (but
3234 * only caring about multiples of 'chunk') and remember
3235 * them. If size==0, find the largest size possible.
3236 * Report available size in *freesize
3237 * If space cannot be found, fail.
3238 */
3239 struct dl *dl;
3240 struct ddf_super *ddf = st->sb;
3241 int cnt = 0;
3242
3243 for (dl = ddf->dlist; dl ; dl=dl->next) {
3244 dl->raiddisk = -1;
3245 dl->esize = 0;
3246 }
3247 /* Now find largest extent on each device */
3248 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3249 struct extent *e = get_extents(ddf, dl);
3250 unsigned long long pos = 0;
3251 int i = 0;
3252 int found = 0;
3253 unsigned long long minsize = size;
3254
3255 if (size == 0)
3256 minsize = chunk;
3257
3258 if (!e)
3259 continue;
3260 do {
3261 unsigned long long esize;
3262 esize = e[i].start - pos;
3263 if (esize >= minsize) {
3264 found = 1;
3265 minsize = esize;
3266 }
3267 pos = e[i].start + e[i].size;
3268 i++;
3269 } while (e[i-1].size);
3270 if (found) {
3271 cnt++;
3272 dl->esize = minsize;
3273 }
3274 free(e);
3275 }
3276 if (cnt < raiddisks) {
3277 pr_err("not enough devices with space to create array.\n");
3278 return 0; /* No enough free spaces large enough */
3279 }
3280 if (size == 0) {
3281 /* choose the largest size of which there are at least 'raiddisk' */
3282 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3283 struct dl *dl2;
3284 if (dl->esize <= size)
3285 continue;
3286 /* This is bigger than 'size', see if there are enough */
3287 cnt = 0;
3288 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3289 if (dl2->esize >= dl->esize)
3290 cnt++;
3291 if (cnt >= raiddisks)
3292 size = dl->esize;
3293 }
3294 if (chunk) {
3295 size = size / chunk;
3296 size *= chunk;
3297 }
3298 *freesize = size;
3299 if (size < 32) {
3300 pr_err("not enough spare devices to create array.\n");
3301 return 0;
3302 }
3303 }
3304 /* We have a 'size' of which there are enough spaces.
3305 * We simply do a first-fit */
3306 cnt = 0;
3307 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3308 if (dl->esize < size)
3309 continue;
3310
3311 dl->raiddisk = cnt;
3312 cnt++;
3313 }
3314 return 1;
3315 }
3316
3317 static int validate_geometry_ddf(struct supertype *st,
3318 int level, int layout, int raiddisks,
3319 int *chunk, unsigned long long size,
3320 unsigned long long data_offset,
3321 char *dev, unsigned long long *freesize,
3322 int verbose)
3323 {
3324 int fd;
3325 struct mdinfo *sra;
3326 int cfd;
3327
3328 /* ddf potentially supports lots of things, but it depends on
3329 * what devices are offered (and maybe kernel version?)
3330 * If given unused devices, we will make a container.
3331 * If given devices in a container, we will make a BVD.
3332 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3333 */
3334
3335 if (*chunk == UnSet)
3336 *chunk = DEFAULT_CHUNK;
3337
3338 if (level == LEVEL_NONE)
3339 level = LEVEL_CONTAINER;
3340 if (level == LEVEL_CONTAINER) {
3341 /* Must be a fresh device to add to a container */
3342 return validate_geometry_ddf_container(st, level, layout,
3343 raiddisks, *chunk,
3344 size, data_offset, dev,
3345 freesize,
3346 verbose);
3347 }
3348
3349 if (!dev) {
3350 mdu_array_info_t array = {
3351 .level = level,
3352 .layout = layout,
3353 .raid_disks = raiddisks
3354 };
3355 struct vd_config conf;
3356 if (layout_md2ddf(&array, &conf) == -1) {
3357 if (verbose)
3358 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3359 level, layout, raiddisks);
3360 return 0;
3361 }
3362 /* Should check layout? etc */
3363
3364 if (st->sb && freesize) {
3365 /* --create was given a container to create in.
3366 * So we need to check that there are enough
3367 * free spaces and return the amount of space.
3368 * We may as well remember which drives were
3369 * chosen so that add_to_super/getinfo_super
3370 * can return them.
3371 */
3372 return reserve_space(st, raiddisks, size, *chunk, freesize);
3373 }
3374 return 1;
3375 }
3376
3377 if (st->sb) {
3378 /* A container has already been opened, so we are
3379 * creating in there. Maybe a BVD, maybe an SVD.
3380 * Should make a distinction one day.
3381 */
3382 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3383 chunk, size, data_offset, dev,
3384 freesize,
3385 verbose);
3386 }
3387 /* This is the first device for the array.
3388 * If it is a container, we read it in and do automagic allocations,
3389 * no other devices should be given.
3390 * Otherwise it must be a member device of a container, and we
3391 * do manual allocation.
3392 * Later we should check for a BVD and make an SVD.
3393 */
3394 fd = open(dev, O_RDONLY|O_EXCL, 0);
3395 if (fd >= 0) {
3396 sra = sysfs_read(fd, NULL, GET_VERSION);
3397 close(fd);
3398 if (sra && sra->array.major_version == -1 &&
3399 strcmp(sra->text_version, "ddf") == 0) {
3400 /* load super */
3401 /* find space for 'n' devices. */
3402 /* remember the devices */
3403 /* Somehow return the fact that we have enough */
3404 }
3405
3406 if (verbose)
3407 pr_err("ddf: Cannot create this array "
3408 "on device %s - a container is required.\n",
3409 dev);
3410 return 0;
3411 }
3412 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3413 if (verbose)
3414 pr_err("ddf: Cannot open %s: %s\n",
3415 dev, strerror(errno));
3416 return 0;
3417 }
3418 /* Well, it is in use by someone, maybe a 'ddf' container. */
3419 cfd = open_container(fd);
3420 if (cfd < 0) {
3421 close(fd);
3422 if (verbose)
3423 pr_err("ddf: Cannot use %s: %s\n",
3424 dev, strerror(EBUSY));
3425 return 0;
3426 }
3427 sra = sysfs_read(cfd, NULL, GET_VERSION);
3428 close(fd);
3429 if (sra && sra->array.major_version == -1 &&
3430 strcmp(sra->text_version, "ddf") == 0) {
3431 /* This is a member of a ddf container. Load the container
3432 * and try to create a bvd
3433 */
3434 struct ddf_super *ddf;
3435 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3436 st->sb = ddf;
3437 strcpy(st->container_devnm, fd2devnm(cfd));
3438 close(cfd);
3439 return validate_geometry_ddf_bvd(st, level, layout,
3440 raiddisks, chunk, size,
3441 data_offset,
3442 dev, freesize,
3443 verbose);
3444 }
3445 close(cfd);
3446 } else /* device may belong to a different container */
3447 return 0;
3448
3449 return 1;
3450 }
3451
3452 static int
3453 validate_geometry_ddf_container(struct supertype *st,
3454 int level, int layout, int raiddisks,
3455 int chunk, unsigned long long size,
3456 unsigned long long data_offset,
3457 char *dev, unsigned long long *freesize,
3458 int verbose)
3459 {
3460 int fd;
3461 unsigned long long ldsize;
3462
3463 if (level != LEVEL_CONTAINER)
3464 return 0;
3465 if (!dev)
3466 return 1;
3467
3468 fd = open(dev, O_RDONLY|O_EXCL, 0);
3469 if (fd < 0) {
3470 if (verbose)
3471 pr_err("ddf: Cannot open %s: %s\n",
3472 dev, strerror(errno));
3473 return 0;
3474 }
3475 if (!get_dev_size(fd, dev, &ldsize)) {
3476 close(fd);
3477 return 0;
3478 }
3479 close(fd);
3480
3481 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3482 if (*freesize == 0)
3483 return 0;
3484
3485 return 1;
3486 }
3487
3488 static int validate_geometry_ddf_bvd(struct supertype *st,
3489 int level, int layout, int raiddisks,
3490 int *chunk, unsigned long long size,
3491 unsigned long long data_offset,
3492 char *dev, unsigned long long *freesize,
3493 int verbose)
3494 {
3495 struct stat stb;
3496 struct ddf_super *ddf = st->sb;
3497 struct dl *dl;
3498 unsigned long long pos = 0;
3499 unsigned long long maxsize;
3500 struct extent *e;
3501 int i;
3502 /* ddf/bvd supports lots of things, but not containers */
3503 if (level == LEVEL_CONTAINER) {
3504 if (verbose)
3505 pr_err("DDF cannot create a container within an container\n");
3506 return 0;
3507 }
3508 /* We must have the container info already read in. */
3509 if (!ddf)
3510 return 0;
3511
3512 if (!dev) {
3513 /* General test: make sure there is space for
3514 * 'raiddisks' device extents of size 'size'.
3515 */
3516 unsigned long long minsize = size;
3517 int dcnt = 0;
3518 if (minsize == 0)
3519 minsize = 8;
3520 for (dl = ddf->dlist; dl ; dl = dl->next) {
3521 int found = 0;
3522 pos = 0;
3523
3524 i = 0;
3525 e = get_extents(ddf, dl);
3526 if (!e) continue;
3527 do {
3528 unsigned long long esize;
3529 esize = e[i].start - pos;
3530 if (esize >= minsize)
3531 found = 1;
3532 pos = e[i].start + e[i].size;
3533 i++;
3534 } while (e[i-1].size);
3535 if (found)
3536 dcnt++;
3537 free(e);
3538 }
3539 if (dcnt < raiddisks) {
3540 if (verbose)
3541 pr_err("ddf: Not enough devices with "
3542 "space for this array (%d < %d)\n",
3543 dcnt, raiddisks);
3544 return 0;
3545 }
3546 return 1;
3547 }
3548 /* This device must be a member of the set */
3549 if (stat(dev, &stb) < 0)
3550 return 0;
3551 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3552 return 0;
3553 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3554 if (dl->major == (int)major(stb.st_rdev) &&
3555 dl->minor == (int)minor(stb.st_rdev))
3556 break;
3557 }
3558 if (!dl) {
3559 if (verbose)
3560 pr_err("ddf: %s is not in the "
3561 "same DDF set\n",
3562 dev);
3563 return 0;
3564 }
3565 e = get_extents(ddf, dl);
3566 maxsize = 0;
3567 i = 0;
3568 if (e)
3569 do {
3570 unsigned long long esize;
3571 esize = e[i].start - pos;
3572 if (esize >= maxsize)
3573 maxsize = esize;
3574 pos = e[i].start + e[i].size;
3575 i++;
3576 } while (e[i-1].size);
3577 *freesize = maxsize;
3578 // FIXME here I am
3579
3580 return 1;
3581 }
3582
3583 static int load_super_ddf_all(struct supertype *st, int fd,
3584 void **sbp, char *devname)
3585 {
3586 struct mdinfo *sra;
3587 struct ddf_super *super;
3588 struct mdinfo *sd, *best = NULL;
3589 int bestseq = 0;
3590 int seq;
3591 char nm[20];
3592 int dfd;
3593
3594 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3595 if (!sra)
3596 return 1;
3597 if (sra->array.major_version != -1 ||
3598 sra->array.minor_version != -2 ||
3599 strcmp(sra->text_version, "ddf") != 0)
3600 return 1;
3601
3602 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3603 return 1;
3604 memset(super, 0, sizeof(*super));
3605
3606 /* first, try each device, and choose the best ddf */
3607 for (sd = sra->devs ; sd ; sd = sd->next) {
3608 int rv;
3609 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3610 dfd = dev_open(nm, O_RDONLY);
3611 if (dfd < 0)
3612 return 2;
3613 rv = load_ddf_headers(dfd, super, NULL);
3614 close(dfd);
3615 if (rv == 0) {
3616 seq = be32_to_cpu(super->active->seq);
3617 if (super->active->openflag)
3618 seq--;
3619 if (!best || seq > bestseq) {
3620 bestseq = seq;
3621 best = sd;
3622 }
3623 }
3624 }
3625 if (!best)
3626 return 1;
3627 /* OK, load this ddf */
3628 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3629 dfd = dev_open(nm, O_RDONLY);
3630 if (dfd < 0)
3631 return 1;
3632 load_ddf_headers(dfd, super, NULL);
3633 load_ddf_global(dfd, super, NULL);
3634 close(dfd);
3635 /* Now we need the device-local bits */
3636 for (sd = sra->devs ; sd ; sd = sd->next) {
3637 int rv;
3638
3639 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3640 dfd = dev_open(nm, O_RDWR);
3641 if (dfd < 0)
3642 return 2;
3643 rv = load_ddf_headers(dfd, super, NULL);
3644 if (rv == 0)
3645 rv = load_ddf_local(dfd, super, NULL, 1);
3646 if (rv)
3647 return 1;
3648 }
3649
3650 *sbp = super;
3651 if (st->ss == NULL) {
3652 st->ss = &super_ddf;
3653 st->minor_version = 0;
3654 st->max_devs = 512;
3655 }
3656 strcpy(st->container_devnm, fd2devnm(fd));
3657 return 0;
3658 }
3659
3660 static int load_container_ddf(struct supertype *st, int fd,
3661 char *devname)
3662 {
3663 return load_super_ddf_all(st, fd, &st->sb, devname);
3664 }
3665
3666 #endif /* MDASSEMBLE */
3667
3668 static int check_secondary(const struct vcl *vc)
3669 {
3670 const struct vd_config *conf = &vc->conf;
3671 int i;
3672
3673 /* The only DDF secondary RAID level md can support is
3674 * RAID 10, if the stripe sizes and Basic volume sizes
3675 * are all equal.
3676 * Other configurations could in theory be supported by exposing
3677 * the BVDs to user space and using device mapper for the secondary
3678 * mapping. So far we don't support that.
3679 */
3680
3681 __u64 sec_elements[4] = {0, 0, 0, 0};
3682 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3683 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3684
3685 if (vc->other_bvds == NULL) {
3686 pr_err("No BVDs for secondary RAID found\n");
3687 return -1;
3688 }
3689 if (conf->prl != DDF_RAID1) {
3690 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3691 return -1;
3692 }
3693 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3694 pr_err("Secondary RAID level %d is unsupported\n",
3695 conf->srl);
3696 return -1;
3697 }
3698 __set_sec_seen(conf->sec_elmnt_seq);
3699 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3700 const struct vd_config *bvd = vc->other_bvds[i];
3701 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3702 continue;
3703 if (bvd->srl != conf->srl) {
3704 pr_err("Inconsistent secondary RAID level across BVDs\n");
3705 return -1;
3706 }
3707 if (bvd->prl != conf->prl) {
3708 pr_err("Different RAID levels for BVDs are unsupported\n");
3709 return -1;
3710 }
3711 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3712 pr_err("All BVDs must have the same number of primary elements\n");
3713 return -1;
3714 }
3715 if (bvd->chunk_shift != conf->chunk_shift) {
3716 pr_err("Different strip sizes for BVDs are unsupported\n");
3717 return -1;
3718 }
3719 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3720 pr_err("Different BVD sizes are unsupported\n");
3721 return -1;
3722 }
3723 __set_sec_seen(bvd->sec_elmnt_seq);
3724 }
3725 for (i = 0; i < conf->sec_elmnt_count; i++) {
3726 if (!__was_sec_seen(i)) {
3727 pr_err("BVD %d is missing\n", i);
3728 return -1;
3729 }
3730 }
3731 return 0;
3732 }
3733
3734 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3735 be32 refnum, unsigned int nmax,
3736 const struct vd_config **bvd,
3737 unsigned int *idx)
3738 {
3739 unsigned int i, j, n, sec, cnt;
3740
3741 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3742 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3743
3744 for (i = 0, j = 0 ; i < nmax ; i++) {
3745 /* j counts valid entries for this BVD */
3746 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3747 *bvd = &vc->conf;
3748 *idx = i;
3749 return sec * cnt + j;
3750 }
3751 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3752 j++;
3753 }
3754 if (vc->other_bvds == NULL)
3755 goto bad;
3756
3757 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3758 struct vd_config *vd = vc->other_bvds[n-1];
3759 sec = vd->sec_elmnt_seq;
3760 if (sec == DDF_UNUSED_BVD)
3761 continue;
3762 for (i = 0, j = 0 ; i < nmax ; i++) {
3763 if (be32_eq(vd->phys_refnum[i], refnum)) {
3764 *bvd = vd;
3765 *idx = i;
3766 return sec * cnt + j;
3767 }
3768 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3769 j++;
3770 }
3771 }
3772 bad:
3773 *bvd = NULL;
3774 return DDF_NOTFOUND;
3775 }
3776
3777 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3778 {
3779 /* Given a container loaded by load_super_ddf_all,
3780 * extract information about all the arrays into
3781 * an mdinfo tree.
3782 *
3783 * For each vcl in conflist: create an mdinfo, fill it in,
3784 * then look for matching devices (phys_refnum) in dlist
3785 * and create appropriate device mdinfo.
3786 */
3787 struct ddf_super *ddf = st->sb;
3788 struct mdinfo *rest = NULL;
3789 struct vcl *vc;
3790
3791 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3792 unsigned int i;
3793 struct mdinfo *this;
3794 char *ep;
3795 __u32 *cptr;
3796 unsigned int pd;
3797
3798 if (subarray &&
3799 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3800 *ep != '\0'))
3801 continue;
3802
3803 if (vc->conf.sec_elmnt_count > 1) {
3804 if (check_secondary(vc) != 0)
3805 continue;
3806 }
3807
3808 this = xcalloc(1, sizeof(*this));
3809 this->next = rest;
3810 rest = this;
3811
3812 if (layout_ddf2md(&vc->conf, &this->array))
3813 continue;
3814 this->array.md_minor = -1;
3815 this->array.major_version = -1;
3816 this->array.minor_version = -2;
3817 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3818 cptr = (__u32 *)(vc->conf.guid + 16);
3819 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3820 this->array.utime = DECADE +
3821 be32_to_cpu(vc->conf.timestamp);
3822 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3823
3824 i = vc->vcnum;
3825 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3826 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3827 DDF_init_full) {
3828 this->array.state = 0;
3829 this->resync_start = 0;
3830 } else {
3831 this->array.state = 1;
3832 this->resync_start = MaxSector;
3833 }
3834 _ddf_array_name(this->name, ddf, i);
3835 memset(this->uuid, 0, sizeof(this->uuid));
3836 this->component_size = be64_to_cpu(vc->conf.blocks);
3837 this->array.size = this->component_size / 2;
3838 this->container_member = i;
3839
3840 ddf->currentconf = vc;
3841 uuid_from_super_ddf(st, this->uuid);
3842 if (!subarray)
3843 ddf->currentconf = NULL;
3844
3845 sprintf(this->text_version, "/%s/%d",
3846 st->container_devnm, this->container_member);
3847
3848 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3849 struct mdinfo *dev;
3850 struct dl *d;
3851 const struct vd_config *bvd;
3852 unsigned int iphys;
3853 int stt;
3854
3855 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3856 == 0xFFFFFFFF)
3857 continue;
3858
3859 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3860 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3861 != DDF_Online)
3862 continue;
3863
3864 i = get_pd_index_from_refnum(
3865 vc, ddf->phys->entries[pd].refnum,
3866 ddf->mppe, &bvd, &iphys);
3867 if (i == DDF_NOTFOUND)
3868 continue;
3869
3870 this->array.working_disks++;
3871
3872 for (d = ddf->dlist; d ; d=d->next)
3873 if (be32_eq(d->disk.refnum,
3874 ddf->phys->entries[pd].refnum))
3875 break;
3876 if (d == NULL)
3877 /* Haven't found that one yet, maybe there are others */
3878 continue;
3879
3880 dev = xcalloc(1, sizeof(*dev));
3881 dev->next = this->devs;
3882 this->devs = dev;
3883
3884 dev->disk.number = be32_to_cpu(d->disk.refnum);
3885 dev->disk.major = d->major;
3886 dev->disk.minor = d->minor;
3887 dev->disk.raid_disk = i;
3888 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3889 dev->recovery_start = MaxSector;
3890
3891 dev->events = be32_to_cpu(ddf->active->seq);
3892 dev->data_offset =
3893 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3894 dev->component_size = be64_to_cpu(bvd->blocks);
3895 if (d->devname)
3896 strcpy(dev->name, d->devname);
3897 }
3898 }
3899 return rest;
3900 }
3901
3902 static int store_super_ddf(struct supertype *st, int fd)
3903 {
3904 struct ddf_super *ddf = st->sb;
3905 unsigned long long dsize;
3906 void *buf;
3907 int rc;
3908
3909 if (!ddf)
3910 return 1;
3911
3912 if (!get_dev_size(fd, NULL, &dsize))
3913 return 1;
3914
3915 if (ddf->dlist || ddf->conflist) {
3916 struct stat sta;
3917 struct dl *dl;
3918 int ofd, ret;
3919
3920 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3921 pr_err("%s: file descriptor for invalid device\n",
3922 __func__);
3923 return 1;
3924 }
3925 for (dl = ddf->dlist; dl; dl = dl->next)
3926 if (dl->major == (int)major(sta.st_rdev) &&
3927 dl->minor == (int)minor(sta.st_rdev))
3928 break;
3929 if (!dl) {
3930 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3931 (int)major(sta.st_rdev),
3932 (int)minor(sta.st_rdev));
3933 return 1;
3934 }
3935 ofd = dl->fd;
3936 dl->fd = fd;
3937 ret = (_write_super_to_disk(ddf, dl) != 1);
3938 dl->fd = ofd;
3939 return ret;
3940 }
3941
3942 if (posix_memalign(&buf, 512, 512) != 0)
3943 return 1;
3944 memset(buf, 0, 512);
3945
3946 lseek64(fd, dsize-512, 0);
3947 rc = write(fd, buf, 512);
3948 free(buf);
3949 if (rc < 0)
3950 return 1;
3951 return 0;
3952 }
3953
3954 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3955 {
3956 /*
3957 * return:
3958 * 0 same, or first was empty, and second was copied
3959 * 1 second had wrong magic number - but that isn't possible
3960 * 2 wrong uuid
3961 * 3 wrong other info
3962 */
3963 struct ddf_super *first = st->sb;
3964 struct ddf_super *second = tst->sb;
3965 struct dl *dl1, *dl2;
3966 struct vcl *vl1, *vl2;
3967 unsigned int max_vds, max_pds, pd, vd;
3968
3969 if (!first) {
3970 st->sb = tst->sb;
3971 tst->sb = NULL;
3972 return 0;
3973 }
3974
3975 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3976 return 2;
3977
3978 /* It is only OK to compare info in the anchor. Anything else
3979 * could be changing due to a reconfig so must be ignored.
3980 * guid really should be enough anyway.
3981 */
3982
3983 if (!be32_eq(first->active->seq, second->active->seq)) {
3984 dprintf("%s: sequence number mismatch %u<->%u\n", __func__,
3985 be32_to_cpu(first->active->seq),
3986 be32_to_cpu(second->active->seq));
3987 return 0;
3988 }
3989
3990 /*
3991 * At this point we are fairly sure that the meta data matches.
3992 * But the new disk may contain additional local data.
3993 * Add it to the super block.
3994 */
3995 max_vds = be16_to_cpu(first->active->max_vd_entries);
3996 max_pds = be16_to_cpu(first->phys->max_pdes);
3997 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3998 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3999 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
4000 DDF_GUID_LEN))
4001 break;
4002 if (vl1) {
4003 if (vl1->other_bvds != NULL &&
4004 vl1->conf.sec_elmnt_seq !=
4005 vl2->conf.sec_elmnt_seq) {
4006 dprintf("%s: adding BVD %u\n", __func__,
4007 vl2->conf.sec_elmnt_seq);
4008 add_other_bvd(vl1, &vl2->conf,
4009 first->conf_rec_len*512);
4010 }
4011 continue;
4012 }
4013
4014 if (posix_memalign((void **)&vl1, 512,
4015 (first->conf_rec_len*512 +
4016 offsetof(struct vcl, conf))) != 0) {
4017 pr_err("%s could not allocate vcl buf\n",
4018 __func__);
4019 return 3;
4020 }
4021
4022 vl1->next = first->conflist;
4023 vl1->block_sizes = NULL;
4024 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4025 if (alloc_other_bvds(first, vl1) != 0) {
4026 pr_err("%s could not allocate other bvds\n",
4027 __func__);
4028 free(vl1);
4029 return 3;
4030 }
4031 for (vd = 0; vd < max_vds; vd++)
4032 if (!memcmp(first->virt->entries[vd].guid,
4033 vl1->conf.guid, DDF_GUID_LEN))
4034 break;
4035 vl1->vcnum = vd;
4036 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4037 first->conflist = vl1;
4038 }
4039
4040 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4041 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4042 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4043 break;
4044 if (dl1)
4045 continue;
4046
4047 if (posix_memalign((void **)&dl1, 512,
4048 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4049 != 0) {
4050 pr_err("%s could not allocate disk info buffer\n",
4051 __func__);
4052 return 3;
4053 }
4054 memcpy(dl1, dl2, sizeof(*dl1));
4055 dl1->mdupdate = NULL;
4056 dl1->next = first->dlist;
4057 dl1->fd = -1;
4058 for (pd = 0; pd < max_pds; pd++)
4059 if (be32_eq(first->phys->entries[pd].refnum,
4060 dl1->disk.refnum))
4061 break;
4062 dl1->pdnum = pd < max_pds ? (int)pd : -1;
4063 if (dl2->spare) {
4064 if (posix_memalign((void **)&dl1->spare, 512,
4065 first->conf_rec_len*512) != 0) {
4066 pr_err("%s could not allocate spare info buf\n",
4067 __func__);
4068 return 3;
4069 }
4070 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4071 }
4072 for (vd = 0 ; vd < first->max_part ; vd++) {
4073 if (!dl2->vlist[vd]) {
4074 dl1->vlist[vd] = NULL;
4075 continue;
4076 }
4077 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4078 if (!memcmp(vl1->conf.guid,
4079 dl2->vlist[vd]->conf.guid,
4080 DDF_GUID_LEN))
4081 break;
4082 dl1->vlist[vd] = vl1;
4083 }
4084 }
4085 first->dlist = dl1;
4086 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4087 be32_to_cpu(dl1->disk.refnum));
4088 }
4089
4090 return 0;
4091 }
4092
4093 #ifndef MDASSEMBLE
4094 /*
4095 * A new array 'a' has been started which claims to be instance 'inst'
4096 * within container 'c'.
4097 * We need to confirm that the array matches the metadata in 'c' so
4098 * that we don't corrupt any metadata.
4099 */
4100 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4101 {
4102 struct ddf_super *ddf = c->sb;
4103 int n = atoi(inst);
4104 struct mdinfo *dev;
4105 struct dl *dl;
4106 static const char faulty[] = "faulty";
4107
4108 if (all_ff(ddf->virt->entries[n].guid)) {
4109 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4110 return -ENODEV;
4111 }
4112 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4113 guid_str(ddf->virt->entries[n].guid));
4114 for (dev = a->info.devs; dev; dev = dev->next) {
4115 for (dl = ddf->dlist; dl; dl = dl->next)
4116 if (dl->major == dev->disk.major &&
4117 dl->minor == dev->disk.minor)
4118 break;
4119 if (!dl || dl->pdnum < 0) {
4120 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4121 __func__, dev->disk.major, dev->disk.minor, n);
4122 return -1;
4123 }
4124 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4125 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4126 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4127 __func__, n, dl->major, dl->minor,
4128 be16_to_cpu(
4129 ddf->phys->entries[dl->pdnum].state));
4130 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4131 sizeof(faulty) - 1)
4132 pr_err("Write to state_fd failed\n");
4133 dev->curr_state = DS_FAULTY;
4134 }
4135 }
4136 a->info.container_member = n;
4137 return 0;
4138 }
4139
4140 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4141 {
4142 /* This member array is being activated. If any devices
4143 * are missing they must now be marked as failed.
4144 */
4145 struct vd_config *vc;
4146 unsigned int n_bvd;
4147 struct vcl *vcl;
4148 struct dl *dl;
4149 int pd;
4150 int n;
4151 int state;
4152
4153 for (n = 0; ; n++) {
4154 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4155 if (!vc)
4156 break;
4157 for (dl = ddf->dlist; dl; dl = dl->next)
4158 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4159 break;
4160 if (dl)
4161 /* Found this disk, so not missing */
4162 continue;
4163
4164 /* Mark the device as failed/missing. */
4165 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4166 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4167 cpu_to_be16(DDF_Online))) {
4168 be16_clear(ddf->phys->entries[pd].state,
4169 cpu_to_be16(DDF_Online));
4170 be16_set(ddf->phys->entries[pd].state,
4171 cpu_to_be16(DDF_Failed|DDF_Missing));
4172 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4173 ddf_set_updates_pending(ddf, vc);
4174 }
4175
4176 /* Mark the array as Degraded */
4177 state = get_svd_state(ddf, vcl);
4178 if (ddf->virt->entries[inst].state !=
4179 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4180 | state)) {
4181 ddf->virt->entries[inst].state =
4182 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4183 | state;
4184 a->check_degraded = 1;
4185 ddf_set_updates_pending(ddf, vc);
4186 }
4187 }
4188 }
4189
4190 /*
4191 * The array 'a' is to be marked clean in the metadata.
4192 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4193 * clean up to the point (in sectors). If that cannot be recorded in the
4194 * metadata, then leave it as dirty.
4195 *
4196 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4197 * !global! virtual_disk.virtual_entry structure.
4198 */
4199 static int ddf_set_array_state(struct active_array *a, int consistent)
4200 {
4201 struct ddf_super *ddf = a->container->sb;
4202 int inst = a->info.container_member;
4203 int old = ddf->virt->entries[inst].state;
4204 if (consistent == 2) {
4205 handle_missing(ddf, a, inst);
4206 /* Should check if a recovery should be started FIXME */
4207 consistent = 1;
4208 if (!is_resync_complete(&a->info))
4209 consistent = 0;
4210 }
4211 if (consistent)
4212 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4213 else
4214 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4215 if (old != ddf->virt->entries[inst].state)
4216 ddf_set_updates_pending(ddf, NULL);
4217
4218 old = ddf->virt->entries[inst].init_state;
4219 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4220 if (is_resync_complete(&a->info))
4221 ddf->virt->entries[inst].init_state |= DDF_init_full;
4222 else if (a->info.resync_start == 0)
4223 ddf->virt->entries[inst].init_state |= DDF_init_not;
4224 else
4225 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4226 if (old != ddf->virt->entries[inst].init_state)
4227 ddf_set_updates_pending(ddf, NULL);
4228
4229 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4230 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4231 consistent?"clean":"dirty",
4232 a->info.resync_start);
4233 return consistent;
4234 }
4235
4236 static int get_bvd_state(const struct ddf_super *ddf,
4237 const struct vd_config *vc)
4238 {
4239 unsigned int i, n_bvd, working = 0;
4240 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4241 int pd, st, state;
4242 for (i = 0; i < n_prim; i++) {
4243 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4244 continue;
4245 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4246 if (pd < 0)
4247 continue;
4248 st = be16_to_cpu(ddf->phys->entries[pd].state);
4249 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4250 == DDF_Online)
4251 working++;
4252 }
4253
4254 state = DDF_state_degraded;
4255 if (working == n_prim)
4256 state = DDF_state_optimal;
4257 else
4258 switch (vc->prl) {
4259 case DDF_RAID0:
4260 case DDF_CONCAT:
4261 case DDF_JBOD:
4262 state = DDF_state_failed;
4263 break;
4264 case DDF_RAID1:
4265 if (working == 0)
4266 state = DDF_state_failed;
4267 else if (working >= 2)
4268 state = DDF_state_part_optimal;
4269 break;
4270 case DDF_RAID4:
4271 case DDF_RAID5:
4272 if (working < n_prim - 1)
4273 state = DDF_state_failed;
4274 break;
4275 case DDF_RAID6:
4276 if (working < n_prim - 2)
4277 state = DDF_state_failed;
4278 else if (working == n_prim - 1)
4279 state = DDF_state_part_optimal;
4280 break;
4281 }
4282 return state;
4283 }
4284
4285 static int secondary_state(int state, int other, int seclevel)
4286 {
4287 if (state == DDF_state_optimal && other == DDF_state_optimal)
4288 return DDF_state_optimal;
4289 if (seclevel == DDF_2MIRRORED) {
4290 if (state == DDF_state_optimal || other == DDF_state_optimal)
4291 return DDF_state_part_optimal;
4292 if (state == DDF_state_failed && other == DDF_state_failed)
4293 return DDF_state_failed;
4294 return DDF_state_degraded;
4295 } else {
4296 if (state == DDF_state_failed || other == DDF_state_failed)
4297 return DDF_state_failed;
4298 if (state == DDF_state_degraded || other == DDF_state_degraded)
4299 return DDF_state_degraded;
4300 return DDF_state_part_optimal;
4301 }
4302 }
4303
4304 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4305 {
4306 int state = get_bvd_state(ddf, &vcl->conf);
4307 unsigned int i;
4308 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4309 state = secondary_state(
4310 state,
4311 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4312 vcl->conf.srl);
4313 }
4314 return state;
4315 }
4316
4317 /*
4318 * The state of each disk is stored in the global phys_disk structure
4319 * in phys_disk.entries[n].state.
4320 * This makes various combinations awkward.
4321 * - When a device fails in any array, it must be failed in all arrays
4322 * that include a part of this device.
4323 * - When a component is rebuilding, we cannot include it officially in the
4324 * array unless this is the only array that uses the device.
4325 *
4326 * So: when transitioning:
4327 * Online -> failed, just set failed flag. monitor will propagate
4328 * spare -> online, the device might need to be added to the array.
4329 * spare -> failed, just set failed. Don't worry if in array or not.
4330 */
4331 static void ddf_set_disk(struct active_array *a, int n, int state)
4332 {
4333 struct ddf_super *ddf = a->container->sb;
4334 unsigned int inst = a->info.container_member, n_bvd;
4335 struct vcl *vcl;
4336 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4337 &n_bvd, &vcl);
4338 int pd;
4339 struct mdinfo *mdi;
4340 struct dl *dl;
4341 int update = 0;
4342
4343 dprintf("%s: %d to %x\n", __func__, n, state);
4344 if (vc == NULL) {
4345 dprintf("ddf: cannot find instance %d!!\n", inst);
4346 return;
4347 }
4348 /* Find the matching slot in 'info'. */
4349 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4350 if (mdi->disk.raid_disk == n)
4351 break;
4352 if (!mdi) {
4353 pr_err("%s: cannot find raid disk %d\n",
4354 __func__, n);
4355 return;
4356 }
4357
4358 /* and find the 'dl' entry corresponding to that. */
4359 for (dl = ddf->dlist; dl; dl = dl->next)
4360 if (mdi->state_fd >= 0 &&
4361 mdi->disk.major == dl->major &&
4362 mdi->disk.minor == dl->minor)
4363 break;
4364 if (!dl) {
4365 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4366 __func__, n,
4367 mdi->disk.major, mdi->disk.minor);
4368 return;
4369 }
4370
4371 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4372 if (pd < 0 || pd != dl->pdnum) {
4373 /* disk doesn't currently exist or has changed.
4374 * If it is now in_sync, insert it. */
4375 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4376 __func__, dl->pdnum, dl->major, dl->minor,
4377 be32_to_cpu(dl->disk.refnum));
4378 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4379 __func__, inst, n_bvd,
4380 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4381 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4382 pd = dl->pdnum; /* FIXME: is this really correct ? */
4383 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4384 LBA_OFFSET(ddf, vc)[n_bvd] =
4385 cpu_to_be64(mdi->data_offset);
4386 be16_clear(ddf->phys->entries[pd].type,
4387 cpu_to_be16(DDF_Global_Spare));
4388 be16_set(ddf->phys->entries[pd].type,
4389 cpu_to_be16(DDF_Active_in_VD));
4390 update = 1;
4391 }
4392 } else {
4393 be16 old = ddf->phys->entries[pd].state;
4394 if (state & DS_FAULTY)
4395 be16_set(ddf->phys->entries[pd].state,
4396 cpu_to_be16(DDF_Failed));
4397 if (state & DS_INSYNC) {
4398 be16_set(ddf->phys->entries[pd].state,
4399 cpu_to_be16(DDF_Online));
4400 be16_clear(ddf->phys->entries[pd].state,
4401 cpu_to_be16(DDF_Rebuilding));
4402 }
4403 if (!be16_eq(old, ddf->phys->entries[pd].state))
4404 update = 1;
4405 }
4406
4407 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4408 be32_to_cpu(dl->disk.refnum), state,
4409 be16_to_cpu(ddf->phys->entries[pd].state));
4410
4411 /* Now we need to check the state of the array and update
4412 * virtual_disk.entries[n].state.
4413 * It needs to be one of "optimal", "degraded", "failed".
4414 * I don't understand 'deleted' or 'missing'.
4415 */
4416 state = get_svd_state(ddf, vcl);
4417
4418 if (ddf->virt->entries[inst].state !=
4419 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4420 | state)) {
4421 ddf->virt->entries[inst].state =
4422 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4423 | state;
4424 update = 1;
4425 }
4426 if (update)
4427 ddf_set_updates_pending(ddf, vc);
4428 }
4429
4430 static void ddf_sync_metadata(struct supertype *st)
4431 {
4432 /*
4433 * Write all data to all devices.
4434 * Later, we might be able to track whether only local changes
4435 * have been made, or whether any global data has been changed,
4436 * but ddf is sufficiently weird that it probably always
4437 * changes global data ....
4438 */
4439 struct ddf_super *ddf = st->sb;
4440 if (!ddf->updates_pending)
4441 return;
4442 ddf->updates_pending = 0;
4443 __write_init_super_ddf(st);
4444 dprintf("ddf: sync_metadata\n");
4445 }
4446
4447 static int del_from_conflist(struct vcl **list, const char *guid)
4448 {
4449 struct vcl **p;
4450 int found = 0;
4451 for (p = list; p && *p; p = &((*p)->next))
4452 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4453 found = 1;
4454 *p = (*p)->next;
4455 }
4456 return found;
4457 }
4458
4459 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4460 {
4461 struct dl *dl;
4462 unsigned int vdnum, i;
4463 vdnum = find_vde_by_guid(ddf, guid);
4464 if (vdnum == DDF_NOTFOUND) {
4465 pr_err("%s: could not find VD %s\n", __func__,
4466 guid_str(guid));
4467 return -1;
4468 }
4469 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4470 pr_err("%s: could not find conf %s\n", __func__,
4471 guid_str(guid));
4472 return -1;
4473 }
4474 for (dl = ddf->dlist; dl; dl = dl->next)
4475 for (i = 0; i < ddf->max_part; i++)
4476 if (dl->vlist[i] != NULL &&
4477 !memcmp(dl->vlist[i]->conf.guid, guid,
4478 DDF_GUID_LEN))
4479 dl->vlist[i] = NULL;
4480 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4481 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4482 return 0;
4483 }
4484
4485 static int kill_subarray_ddf(struct supertype *st)
4486 {
4487 struct ddf_super *ddf = st->sb;
4488 /*
4489 * currentconf is set in container_content_ddf,
4490 * called with subarray arg
4491 */
4492 struct vcl *victim = ddf->currentconf;
4493 struct vd_config *conf;
4494 unsigned int vdnum;
4495
4496 ddf->currentconf = NULL;
4497 if (!victim) {
4498 pr_err("%s: nothing to kill\n", __func__);
4499 return -1;
4500 }
4501 conf = &victim->conf;
4502 vdnum = find_vde_by_guid(ddf, conf->guid);
4503 if (vdnum == DDF_NOTFOUND) {
4504 pr_err("%s: could not find VD %s\n", __func__,
4505 guid_str(conf->guid));
4506 return -1;
4507 }
4508 if (st->update_tail) {
4509 struct virtual_disk *vd;
4510 int len = sizeof(struct virtual_disk)
4511 + sizeof(struct virtual_entry);
4512 vd = xmalloc(len);
4513 if (vd == NULL) {
4514 pr_err("%s: failed to allocate %d bytes\n", __func__,
4515 len);
4516 return -1;
4517 }
4518 memset(vd, 0 , len);
4519 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4520 vd->populated_vdes = cpu_to_be16(0);
4521 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4522 /* we use DDF_state_deleted as marker */
4523 vd->entries[0].state = DDF_state_deleted;
4524 append_metadata_update(st, vd, len);
4525 } else {
4526 _kill_subarray_ddf(ddf, conf->guid);
4527 ddf_set_updates_pending(ddf, NULL);
4528 ddf_sync_metadata(st);
4529 }
4530 return 0;
4531 }
4532
4533 static void copy_matching_bvd(struct ddf_super *ddf,
4534 struct vd_config *conf,
4535 const struct metadata_update *update)
4536 {
4537 unsigned int mppe =
4538 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4539 unsigned int len = ddf->conf_rec_len * 512;
4540 char *p;
4541 struct vd_config *vc;
4542 for (p = update->buf; p < update->buf + update->len; p += len) {
4543 vc = (struct vd_config *) p;
4544 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4545 memcpy(conf->phys_refnum, vc->phys_refnum,
4546 mppe * (sizeof(__u32) + sizeof(__u64)));
4547 return;
4548 }
4549 }
4550 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4551 conf->sec_elmnt_seq, guid_str(conf->guid));
4552 }
4553
4554 static void ddf_process_update(struct supertype *st,
4555 struct metadata_update *update)
4556 {
4557 /* Apply this update to the metadata.
4558 * The first 4 bytes are a DDF_*_MAGIC which guides
4559 * our actions.
4560 * Possible update are:
4561 * DDF_PHYS_RECORDS_MAGIC
4562 * Add a new physical device or remove an old one.
4563 * Changes to this record only happen implicitly.
4564 * used_pdes is the device number.
4565 * DDF_VIRT_RECORDS_MAGIC
4566 * Add a new VD. Possibly also change the 'access' bits.
4567 * populated_vdes is the entry number.
4568 * DDF_VD_CONF_MAGIC
4569 * New or updated VD. the VIRT_RECORD must already
4570 * exist. For an update, phys_refnum and lba_offset
4571 * (at least) are updated, and the VD_CONF must
4572 * be written to precisely those devices listed with
4573 * a phys_refnum.
4574 * DDF_SPARE_ASSIGN_MAGIC
4575 * replacement Spare Assignment Record... but for which device?
4576 *
4577 * So, e.g.:
4578 * - to create a new array, we send a VIRT_RECORD and
4579 * a VD_CONF. Then assemble and start the array.
4580 * - to activate a spare we send a VD_CONF to add the phys_refnum
4581 * and offset. This will also mark the spare as active with
4582 * a spare-assignment record.
4583 */
4584 struct ddf_super *ddf = st->sb;
4585 be32 *magic = (be32 *)update->buf;
4586 struct phys_disk *pd;
4587 struct virtual_disk *vd;
4588 struct vd_config *vc;
4589 struct vcl *vcl;
4590 struct dl *dl;
4591 unsigned int ent;
4592 unsigned int pdnum, pd2, len;
4593
4594 dprintf("Process update %x\n", be32_to_cpu(*magic));
4595
4596 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4597 if (update->len != (sizeof(struct phys_disk) +
4598 sizeof(struct phys_disk_entry)))
4599 return;
4600 pd = (struct phys_disk*)update->buf;
4601
4602 ent = be16_to_cpu(pd->used_pdes);
4603 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4604 return;
4605 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4606 struct dl **dlp;
4607 /* removing this disk. */
4608 be16_set(ddf->phys->entries[ent].state,
4609 cpu_to_be16(DDF_Missing));
4610 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4611 struct dl *dl = *dlp;
4612 if (dl->pdnum == (signed)ent) {
4613 close(dl->fd);
4614 dl->fd = -1;
4615 /* FIXME this doesn't free
4616 * dl->devname */
4617 update->space = dl;
4618 *dlp = dl->next;
4619 break;
4620 }
4621 }
4622 ddf_set_updates_pending(ddf, NULL);
4623 return;
4624 }
4625 if (!all_ff(ddf->phys->entries[ent].guid))
4626 return;
4627 ddf->phys->entries[ent] = pd->entries[0];
4628 ddf->phys->used_pdes = cpu_to_be16
4629 (1 + be16_to_cpu(ddf->phys->used_pdes));
4630 ddf_set_updates_pending(ddf, NULL);
4631 if (ddf->add_list) {
4632 struct active_array *a;
4633 struct dl *al = ddf->add_list;
4634 ddf->add_list = al->next;
4635
4636 al->next = ddf->dlist;
4637 ddf->dlist = al;
4638
4639 /* As a device has been added, we should check
4640 * for any degraded devices that might make
4641 * use of this spare */
4642 for (a = st->arrays ; a; a=a->next)
4643 a->check_degraded = 1;
4644 }
4645 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4646 if (update->len != (sizeof(struct virtual_disk) +
4647 sizeof(struct virtual_entry)))
4648 return;
4649 vd = (struct virtual_disk*)update->buf;
4650
4651 if (vd->entries[0].state == DDF_state_deleted) {
4652 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4653 return;
4654 } else {
4655 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4656 if (ent != DDF_NOTFOUND) {
4657 dprintf("%s: VD %s exists already in slot %d\n",
4658 __func__, guid_str(vd->entries[0].guid),
4659 ent);
4660 return;
4661 }
4662 ent = find_unused_vde(ddf);
4663 if (ent == DDF_NOTFOUND)
4664 return;
4665 ddf->virt->entries[ent] = vd->entries[0];
4666 ddf->virt->populated_vdes =
4667 cpu_to_be16(
4668 1 + be16_to_cpu(
4669 ddf->virt->populated_vdes));
4670 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4671 __func__, guid_str(vd->entries[0].guid), ent,
4672 ddf->virt->entries[ent].state,
4673 ddf->virt->entries[ent].init_state);
4674 }
4675 ddf_set_updates_pending(ddf, NULL);
4676 }
4677
4678 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4679 vc = (struct vd_config*)update->buf;
4680 len = ddf->conf_rec_len * 512;
4681 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4682 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4683 __func__, guid_str(vc->guid), update->len,
4684 vc->sec_elmnt_count);
4685 return;
4686 }
4687 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4688 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4689 break;
4690 dprintf("%s: conf update for %s (%s)\n", __func__,
4691 guid_str(vc->guid), (vcl ? "old" : "new"));
4692 if (vcl) {
4693 /* An update, just copy the phys_refnum and lba_offset
4694 * fields
4695 */
4696 unsigned int i;
4697 unsigned int k;
4698 copy_matching_bvd(ddf, &vcl->conf, update);
4699 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4700 dprintf("BVD %u has %08x at %llu\n", 0,
4701 be32_to_cpu(vcl->conf.phys_refnum[k]),
4702 be64_to_cpu(LBA_OFFSET(ddf,
4703 &vcl->conf)[k]));
4704 for (i = 1; i < vc->sec_elmnt_count; i++) {
4705 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4706 update);
4707 for (k = 0; k < be16_to_cpu(
4708 vc->prim_elmnt_count); k++)
4709 dprintf("BVD %u has %08x at %llu\n", i,
4710 be32_to_cpu
4711 (vcl->other_bvds[i-1]->
4712 phys_refnum[k]),
4713 be64_to_cpu
4714 (LBA_OFFSET
4715 (ddf,
4716 vcl->other_bvds[i-1])[k]));
4717 }
4718 } else {
4719 /* A new VD_CONF */
4720 unsigned int i;
4721 if (!update->space)
4722 return;
4723 vcl = update->space;
4724 update->space = NULL;
4725 vcl->next = ddf->conflist;
4726 memcpy(&vcl->conf, vc, len);
4727 ent = find_vde_by_guid(ddf, vc->guid);
4728 if (ent == DDF_NOTFOUND)
4729 return;
4730 vcl->vcnum = ent;
4731 ddf->conflist = vcl;
4732 for (i = 1; i < vc->sec_elmnt_count; i++)
4733 memcpy(vcl->other_bvds[i-1],
4734 update->buf + len * i, len);
4735 }
4736 /* Set DDF_Transition on all Failed devices - to help
4737 * us detect those that are no longer in use
4738 */
4739 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4740 pdnum++)
4741 if (be16_and(ddf->phys->entries[pdnum].state,
4742 cpu_to_be16(DDF_Failed)))
4743 be16_set(ddf->phys->entries[pdnum].state,
4744 cpu_to_be16(DDF_Transition));
4745 /* Now make sure vlist is correct for each dl. */
4746 for (dl = ddf->dlist; dl; dl = dl->next) {
4747 unsigned int vn = 0;
4748 int in_degraded = 0;
4749
4750 if (dl->pdnum < 0)
4751 continue;
4752 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4753 unsigned int dn, ibvd;
4754 const struct vd_config *conf;
4755 int vstate;
4756 dn = get_pd_index_from_refnum(vcl,
4757 dl->disk.refnum,
4758 ddf->mppe,
4759 &conf, &ibvd);
4760 if (dn == DDF_NOTFOUND)
4761 continue;
4762 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4763 dl->pdnum,
4764 be32_to_cpu(dl->disk.refnum),
4765 guid_str(conf->guid),
4766 conf->sec_elmnt_seq, vn);
4767 /* Clear the Transition flag */
4768 if (be16_and
4769 (ddf->phys->entries[dl->pdnum].state,
4770 cpu_to_be16(DDF_Failed)))
4771 be16_clear(ddf->phys
4772 ->entries[dl->pdnum].state,
4773 cpu_to_be16(DDF_Transition));
4774 dl->vlist[vn++] = vcl;
4775 vstate = ddf->virt->entries[vcl->vcnum].state
4776 & DDF_state_mask;
4777 if (vstate == DDF_state_degraded ||
4778 vstate == DDF_state_part_optimal)
4779 in_degraded = 1;
4780 }
4781 while (vn < ddf->max_part)
4782 dl->vlist[vn++] = NULL;
4783 if (dl->vlist[0]) {
4784 be16_clear(ddf->phys->entries[dl->pdnum].type,
4785 cpu_to_be16(DDF_Global_Spare));
4786 if (!be16_and(ddf->phys
4787 ->entries[dl->pdnum].type,
4788 cpu_to_be16(DDF_Active_in_VD))) {
4789 be16_set(ddf->phys
4790 ->entries[dl->pdnum].type,
4791 cpu_to_be16(DDF_Active_in_VD));
4792 if (in_degraded)
4793 be16_set(ddf->phys
4794 ->entries[dl->pdnum]
4795 .state,
4796 cpu_to_be16
4797 (DDF_Rebuilding));
4798 }
4799 }
4800 if (dl->spare) {
4801 be16_clear(ddf->phys->entries[dl->pdnum].type,
4802 cpu_to_be16(DDF_Global_Spare));
4803 be16_set(ddf->phys->entries[dl->pdnum].type,
4804 cpu_to_be16(DDF_Spare));
4805 }
4806 if (!dl->vlist[0] && !dl->spare) {
4807 be16_set(ddf->phys->entries[dl->pdnum].type,
4808 cpu_to_be16(DDF_Global_Spare));
4809 be16_clear(ddf->phys->entries[dl->pdnum].type,
4810 cpu_to_be16(DDF_Spare));
4811 be16_clear(ddf->phys->entries[dl->pdnum].type,
4812 cpu_to_be16(DDF_Active_in_VD));
4813 }
4814 }
4815
4816 /* Now remove any 'Failed' devices that are not part
4817 * of any VD. They will have the Transition flag set.
4818 * Once done, we need to update all dl->pdnum numbers.
4819 */
4820 pd2 = 0;
4821 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4822 pdnum++) {
4823 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4824 0xFFFFFFFF)
4825 continue;
4826 if (be16_and(ddf->phys->entries[pdnum].state,
4827 cpu_to_be16(DDF_Failed))
4828 && be16_and(ddf->phys->entries[pdnum].state,
4829 cpu_to_be16(DDF_Transition))) {
4830 /* skip this one unless in dlist*/
4831 for (dl = ddf->dlist; dl; dl = dl->next)
4832 if (dl->pdnum == (int)pdnum)
4833 break;
4834 if (!dl)
4835 continue;
4836 }
4837 if (pdnum == pd2)
4838 pd2++;
4839 else {
4840 ddf->phys->entries[pd2] =
4841 ddf->phys->entries[pdnum];
4842 for (dl = ddf->dlist; dl; dl = dl->next)
4843 if (dl->pdnum == (int)pdnum)
4844 dl->pdnum = pd2;
4845 pd2++;
4846 }
4847 }
4848 ddf->phys->used_pdes = cpu_to_be16(pd2);
4849 while (pd2 < pdnum) {
4850 memset(ddf->phys->entries[pd2].guid, 0xff,
4851 DDF_GUID_LEN);
4852 pd2++;
4853 }
4854
4855 ddf_set_updates_pending(ddf, vc);
4856 }
4857 /* case DDF_SPARE_ASSIGN_MAGIC */
4858 }
4859
4860 static void ddf_prepare_update(struct supertype *st,
4861 struct metadata_update *update)
4862 {
4863 /* This update arrived at managemon.
4864 * We are about to pass it to monitor.
4865 * If a malloc is needed, do it here.
4866 */
4867 struct ddf_super *ddf = st->sb;
4868 be32 *magic = (be32 *)update->buf;
4869 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4870 struct vcl *vcl;
4871 struct vd_config *conf = (struct vd_config *) update->buf;
4872 if (posix_memalign(&update->space, 512,
4873 offsetof(struct vcl, conf)
4874 + ddf->conf_rec_len * 512) != 0) {
4875 update->space = NULL;
4876 return;
4877 }
4878 vcl = update->space;
4879 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4880 if (alloc_other_bvds(ddf, vcl) != 0) {
4881 free(update->space);
4882 update->space = NULL;
4883 }
4884 }
4885 }
4886
4887 /*
4888 * Check degraded state of a RAID10.
4889 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4890 */
4891 static int raid10_degraded(struct mdinfo *info)
4892 {
4893 int n_prim, n_bvds;
4894 int i;
4895 struct mdinfo *d;
4896 char *found;
4897 int ret = -1;
4898
4899 n_prim = info->array.layout & ~0x100;
4900 n_bvds = info->array.raid_disks / n_prim;
4901 found = xmalloc(n_bvds);
4902 if (found == NULL)
4903 return ret;
4904 memset(found, 0, n_bvds);
4905 for (d = info->devs; d; d = d->next) {
4906 i = d->disk.raid_disk / n_prim;
4907 if (i >= n_bvds) {
4908 pr_err("%s: BUG: invalid raid disk\n", __func__);
4909 goto out;
4910 }
4911 if (d->state_fd > 0)
4912 found[i]++;
4913 }
4914 ret = 2;
4915 for (i = 0; i < n_bvds; i++)
4916 if (!found[i]) {
4917 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4918 ret = 0;
4919 goto out;
4920 } else if (found[i] < n_prim) {
4921 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4922 n_bvds);
4923 ret = 1;
4924 }
4925 out:
4926 free(found);
4927 return ret;
4928 }
4929
4930 /*
4931 * Check if the array 'a' is degraded but not failed.
4932 * If it is, find as many spares as are available and needed and
4933 * arrange for their inclusion.
4934 * We only choose devices which are not already in the array,
4935 * and prefer those with a spare-assignment to this array.
4936 * Otherwise we choose global spares - assuming always that
4937 * there is enough room.
4938 * For each spare that we assign, we return an 'mdinfo' which
4939 * describes the position for the device in the array.
4940 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4941 * the new phys_refnum and lba_offset values.
4942 *
4943 * Only worry about BVDs at the moment.
4944 */
4945 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4946 struct metadata_update **updates)
4947 {
4948 int working = 0;
4949 struct mdinfo *d;
4950 struct ddf_super *ddf = a->container->sb;
4951 int global_ok = 0;
4952 struct mdinfo *rv = NULL;
4953 struct mdinfo *di;
4954 struct metadata_update *mu;
4955 struct dl *dl;
4956 int i;
4957 unsigned int j;
4958 struct vcl *vcl;
4959 struct vd_config *vc;
4960 unsigned int n_bvd;
4961
4962 for (d = a->info.devs ; d ; d = d->next) {
4963 if ((d->curr_state & DS_FAULTY) &&
4964 d->state_fd >= 0)
4965 /* wait for Removal to happen */
4966 return NULL;
4967 if (d->state_fd >= 0)
4968 working ++;
4969 }
4970
4971 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4972 a->info.array.raid_disks,
4973 a->info.array.level);
4974 if (working == a->info.array.raid_disks)
4975 return NULL; /* array not degraded */
4976 switch (a->info.array.level) {
4977 case 1:
4978 if (working == 0)
4979 return NULL; /* failed */
4980 break;
4981 case 4:
4982 case 5:
4983 if (working < a->info.array.raid_disks - 1)
4984 return NULL; /* failed */
4985 break;
4986 case 6:
4987 if (working < a->info.array.raid_disks - 2)
4988 return NULL; /* failed */
4989 break;
4990 case 10:
4991 if (raid10_degraded(&a->info) < 1)
4992 return NULL;
4993 break;
4994 default: /* concat or stripe */
4995 return NULL; /* failed */
4996 }
4997
4998 /* For each slot, if it is not working, find a spare */
4999 dl = ddf->dlist;
5000 for (i = 0; i < a->info.array.raid_disks; i++) {
5001 for (d = a->info.devs ; d ; d = d->next)
5002 if (d->disk.raid_disk == i)
5003 break;
5004 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
5005 if (d && (d->state_fd >= 0))
5006 continue;
5007
5008 /* OK, this device needs recovery. Find a spare */
5009 again:
5010 for ( ; dl ; dl = dl->next) {
5011 unsigned long long esize;
5012 unsigned long long pos;
5013 struct mdinfo *d2;
5014 int is_global = 0;
5015 int is_dedicated = 0;
5016 struct extent *ex;
5017 unsigned int j;
5018 be16 state;
5019
5020 if (dl->pdnum < 0)
5021 continue;
5022 state = ddf->phys->entries[dl->pdnum].state;
5023 if (be16_and(state,
5024 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
5025 !be16_and(state,
5026 cpu_to_be16(DDF_Online)))
5027 continue;
5028
5029 /* If in this array, skip */
5030 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
5031 if (d2->state_fd >= 0 &&
5032 d2->disk.major == dl->major &&
5033 d2->disk.minor == dl->minor) {
5034 dprintf("%x:%x (%08x) already in array\n",
5035 dl->major, dl->minor,
5036 be32_to_cpu(dl->disk.refnum));
5037 break;
5038 }
5039 if (d2)
5040 continue;
5041 if (be16_and(ddf->phys->entries[dl->pdnum].type,
5042 cpu_to_be16(DDF_Spare))) {
5043 /* Check spare assign record */
5044 if (dl->spare) {
5045 if (dl->spare->type & DDF_spare_dedicated) {
5046 /* check spare_ents for guid */
5047 for (j = 0 ;
5048 j < be16_to_cpu
5049 (dl->spare
5050 ->populated);
5051 j++) {
5052 if (memcmp(dl->spare->spare_ents[j].guid,
5053 ddf->virt->entries[a->info.container_member].guid,
5054 DDF_GUID_LEN) == 0)
5055 is_dedicated = 1;
5056 }
5057 } else
5058 is_global = 1;
5059 }
5060 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5061 cpu_to_be16(DDF_Global_Spare))) {
5062 is_global = 1;
5063 } else if (!be16_and(ddf->phys
5064 ->entries[dl->pdnum].state,
5065 cpu_to_be16(DDF_Failed))) {
5066 /* we can possibly use some of this */
5067 is_global = 1;
5068 }
5069 if ( ! (is_dedicated ||
5070 (is_global && global_ok))) {
5071 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5072 is_dedicated, is_global);
5073 continue;
5074 }
5075
5076 /* We are allowed to use this device - is there space?
5077 * We need a->info.component_size sectors */
5078 ex = get_extents(ddf, dl);
5079 if (!ex) {
5080 dprintf("cannot get extents\n");
5081 continue;
5082 }
5083 j = 0; pos = 0;
5084 esize = 0;
5085
5086 do {
5087 esize = ex[j].start - pos;
5088 if (esize >= a->info.component_size)
5089 break;
5090 pos = ex[j].start + ex[j].size;
5091 j++;
5092 } while (ex[j-1].size);
5093
5094 free(ex);
5095 if (esize < a->info.component_size) {
5096 dprintf("%x:%x has no room: %llu %llu\n",
5097 dl->major, dl->minor,
5098 esize, a->info.component_size);
5099 /* No room */
5100 continue;
5101 }
5102
5103 /* Cool, we have a device with some space at pos */
5104 di = xcalloc(1, sizeof(*di));
5105 di->disk.number = i;
5106 di->disk.raid_disk = i;
5107 di->disk.major = dl->major;
5108 di->disk.minor = dl->minor;
5109 di->disk.state = 0;
5110 di->recovery_start = 0;
5111 di->data_offset = pos;
5112 di->component_size = a->info.component_size;
5113 di->next = rv;
5114 rv = di;
5115 dprintf("%x:%x (%08x) to be %d at %llu\n",
5116 dl->major, dl->minor,
5117 be32_to_cpu(dl->disk.refnum), i, pos);
5118
5119 break;
5120 }
5121 if (!dl && ! global_ok) {
5122 /* not enough dedicated spares, try global */
5123 global_ok = 1;
5124 dl = ddf->dlist;
5125 goto again;
5126 }
5127 }
5128
5129 if (!rv)
5130 /* No spares found */
5131 return rv;
5132 /* Now 'rv' has a list of devices to return.
5133 * Create a metadata_update record to update the
5134 * phys_refnum and lba_offset values
5135 */
5136 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5137 &n_bvd, &vcl);
5138 if (vc == NULL)
5139 return NULL;
5140
5141 mu = xmalloc(sizeof(*mu));
5142 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5143 free(mu);
5144 mu = NULL;
5145 }
5146
5147 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5148 mu->buf = xmalloc(mu->len);
5149 mu->space = NULL;
5150 mu->space_list = NULL;
5151 mu->next = *updates;
5152 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5153 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5154 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5155 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5156
5157 vc = (struct vd_config*)mu->buf;
5158 for (di = rv ; di ; di = di->next) {
5159 unsigned int i_sec, i_prim;
5160 i_sec = di->disk.raid_disk
5161 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5162 i_prim = di->disk.raid_disk
5163 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5164 vc = (struct vd_config *)(mu->buf
5165 + i_sec * ddf->conf_rec_len * 512);
5166 for (dl = ddf->dlist; dl; dl = dl->next)
5167 if (dl->major == di->disk.major
5168 && dl->minor == di->disk.minor)
5169 break;
5170 if (!dl || dl->pdnum < 0) {
5171 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5172 __func__, di->disk.raid_disk,
5173 di->disk.major, di->disk.minor);
5174 return NULL;
5175 }
5176 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5177 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5178 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5179 be32_to_cpu(vc->phys_refnum[i_prim]),
5180 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5181 }
5182 *updates = mu;
5183 return rv;
5184 }
5185 #endif /* MDASSEMBLE */
5186
5187 static int ddf_level_to_layout(int level)
5188 {
5189 switch(level) {
5190 case 0:
5191 case 1:
5192 return 0;
5193 case 5:
5194 return ALGORITHM_LEFT_SYMMETRIC;
5195 case 6:
5196 return ALGORITHM_ROTATING_N_CONTINUE;
5197 case 10:
5198 return 0x102;
5199 default:
5200 return UnSet;
5201 }
5202 }
5203
5204 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5205 {
5206 if (level && *level == UnSet)
5207 *level = LEVEL_CONTAINER;
5208
5209 if (level && layout && *layout == UnSet)
5210 *layout = ddf_level_to_layout(*level);
5211 }
5212
5213 struct superswitch super_ddf = {
5214 #ifndef MDASSEMBLE
5215 .examine_super = examine_super_ddf,
5216 .brief_examine_super = brief_examine_super_ddf,
5217 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5218 .export_examine_super = export_examine_super_ddf,
5219 .detail_super = detail_super_ddf,
5220 .brief_detail_super = brief_detail_super_ddf,
5221 .validate_geometry = validate_geometry_ddf,
5222 .write_init_super = write_init_super_ddf,
5223 .add_to_super = add_to_super_ddf,
5224 .remove_from_super = remove_from_super_ddf,
5225 .load_container = load_container_ddf,
5226 .copy_metadata = copy_metadata_ddf,
5227 .kill_subarray = kill_subarray_ddf,
5228 #endif
5229 .match_home = match_home_ddf,
5230 .uuid_from_super= uuid_from_super_ddf,
5231 .getinfo_super = getinfo_super_ddf,
5232 .update_super = update_super_ddf,
5233
5234 .avail_size = avail_size_ddf,
5235
5236 .compare_super = compare_super_ddf,
5237
5238 .load_super = load_super_ddf,
5239 .init_super = init_super_ddf,
5240 .store_super = store_super_ddf,
5241 .free_super = free_super_ddf,
5242 .match_metadata_desc = match_metadata_desc_ddf,
5243 .container_content = container_content_ddf,
5244 .default_geometry = default_geometry_ddf,
5245
5246 .external = 1,
5247
5248 #ifndef MDASSEMBLE
5249 /* for mdmon */
5250 .open_new = ddf_open_new,
5251 .set_array_state= ddf_set_array_state,
5252 .set_disk = ddf_set_disk,
5253 .sync_metadata = ddf_sync_metadata,
5254 .process_update = ddf_process_update,
5255 .prepare_update = ddf_prepare_update,
5256 .activate_spare = ddf_activate_spare,
5257 #endif
5258 .name = "ddf",
5259 };