]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: allow for unused slots when creating map list for getinfo_super_ddf.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes; /* This is a counter, not a max - the list
233 * of used entries may not be dense */
234 be16 max_pdes;
235 __u8 pad[52];
236 struct phys_disk_entry {
237 char guid[DDF_GUID_LEN];
238 be32 refnum;
239 be16 type;
240 be16 state;
241 be64 config_size; /* DDF structures must be after here */
242 char path[18]; /* Another horrible structure really
243 * but is "used for information
244 * purposes only" */
245 __u8 pad[6];
246 } entries[0];
247 };
248
249 /* phys_disk_entry.type is a bitmap - bigendian remember */
250 #define DDF_Forced_PD_GUID 1
251 #define DDF_Active_in_VD 2
252 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
253 #define DDF_Spare 8 /* overrides Global_spare */
254 #define DDF_Foreign 16
255 #define DDF_Legacy 32 /* no DDF on this device */
256
257 #define DDF_Interface_mask 0xf00
258 #define DDF_Interface_SCSI 0x100
259 #define DDF_Interface_SAS 0x200
260 #define DDF_Interface_SATA 0x300
261 #define DDF_Interface_FC 0x400
262
263 /* phys_disk_entry.state is a bigendian bitmap */
264 #define DDF_Online 1
265 #define DDF_Failed 2 /* overrides 1,4,8 */
266 #define DDF_Rebuilding 4
267 #define DDF_Transition 8
268 #define DDF_SMART 16
269 #define DDF_ReadErrors 32
270 #define DDF_Missing 64
271
272 /* The content of the virt_section global scope */
273 struct virtual_disk {
274 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
275 be32 crc;
276 be16 populated_vdes;
277 be16 max_vdes;
278 __u8 pad[52];
279 struct virtual_entry {
280 char guid[DDF_GUID_LEN];
281 be16 unit;
282 __u16 pad0; /* 0xffff */
283 be16 guid_crc;
284 be16 type;
285 __u8 state;
286 __u8 init_state;
287 __u8 pad1[14];
288 char name[16];
289 } entries[0];
290 };
291
292 /* virtual_entry.type is a bitmap - bigendian */
293 #define DDF_Shared 1
294 #define DDF_Enforce_Groups 2
295 #define DDF_Unicode 4
296 #define DDF_Owner_Valid 8
297
298 /* virtual_entry.state is a bigendian bitmap */
299 #define DDF_state_mask 0x7
300 #define DDF_state_optimal 0x0
301 #define DDF_state_degraded 0x1
302 #define DDF_state_deleted 0x2
303 #define DDF_state_missing 0x3
304 #define DDF_state_failed 0x4
305 #define DDF_state_part_optimal 0x5
306
307 #define DDF_state_morphing 0x8
308 #define DDF_state_inconsistent 0x10
309
310 /* virtual_entry.init_state is a bigendian bitmap */
311 #define DDF_initstate_mask 0x03
312 #define DDF_init_not 0x00
313 #define DDF_init_quick 0x01 /* initialisation is progress.
314 * i.e. 'state_inconsistent' */
315 #define DDF_init_full 0x02
316
317 #define DDF_access_mask 0xc0
318 #define DDF_access_rw 0x00
319 #define DDF_access_ro 0x80
320 #define DDF_access_blocked 0xc0
321
322 /* The content of the config_section - local scope
323 * It has multiple records each config_record_len sectors
324 * They can be vd_config or spare_assign
325 */
326
327 struct vd_config {
328 be32 magic; /* DDF_VD_CONF_MAGIC */
329 be32 crc;
330 char guid[DDF_GUID_LEN];
331 be32 timestamp;
332 be32 seqnum;
333 __u8 pad0[24];
334 be16 prim_elmnt_count;
335 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
336 __u8 prl;
337 __u8 rlq;
338 __u8 sec_elmnt_count;
339 __u8 sec_elmnt_seq;
340 __u8 srl;
341 be64 blocks; /* blocks per component could be different
342 * on different component devices...(only
343 * for concat I hope) */
344 be64 array_blocks; /* blocks in array */
345 __u8 pad1[8];
346 be32 spare_refs[8]; /* This is used to detect missing spares.
347 * As we don't have an interface for that
348 * the values are ignored.
349 */
350 __u8 cache_pol[8];
351 __u8 bg_rate;
352 __u8 pad2[3];
353 __u8 pad3[52];
354 __u8 pad4[192];
355 __u8 v0[32]; /* reserved- 0xff */
356 __u8 v1[32]; /* reserved- 0xff */
357 __u8 v2[16]; /* reserved- 0xff */
358 __u8 v3[16]; /* reserved- 0xff */
359 __u8 vendor[32];
360 be32 phys_refnum[0]; /* refnum of each disk in sequence */
361 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
362 bvd are always the same size */
363 };
364 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
365
366 /* vd_config.cache_pol[7] is a bitmap */
367 #define DDF_cache_writeback 1 /* else writethrough */
368 #define DDF_cache_wadaptive 2 /* only applies if writeback */
369 #define DDF_cache_readahead 4
370 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
371 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
372 #define DDF_cache_wallowed 32 /* enable write caching */
373 #define DDF_cache_rallowed 64 /* enable read caching */
374
375 struct spare_assign {
376 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
377 be32 crc;
378 be32 timestamp;
379 __u8 reserved[7];
380 __u8 type;
381 be16 populated; /* SAEs used */
382 be16 max; /* max SAEs */
383 __u8 pad[8];
384 struct spare_assign_entry {
385 char guid[DDF_GUID_LEN];
386 be16 secondary_element;
387 __u8 pad[6];
388 } spare_ents[0];
389 };
390 /* spare_assign.type is a bitmap */
391 #define DDF_spare_dedicated 0x1 /* else global */
392 #define DDF_spare_revertible 0x2 /* else committable */
393 #define DDF_spare_active 0x4 /* else not active */
394 #define DDF_spare_affinity 0x8 /* enclosure affinity */
395
396 /* The data_section contents - local scope */
397 struct disk_data {
398 be32 magic; /* DDF_PHYS_DATA_MAGIC */
399 be32 crc;
400 char guid[DDF_GUID_LEN];
401 be32 refnum; /* crc of some magic drive data ... */
402 __u8 forced_ref; /* set when above was not result of magic */
403 __u8 forced_guid; /* set if guid was forced rather than magic */
404 __u8 vendor[32];
405 __u8 pad[442];
406 };
407
408 /* bbm_section content */
409 struct bad_block_log {
410 be32 magic;
411 be32 crc;
412 be16 entry_count;
413 be32 spare_count;
414 __u8 pad[10];
415 be64 first_spare;
416 struct mapped_block {
417 be64 defective_start;
418 be32 replacement_start;
419 be16 remap_count;
420 __u8 pad[2];
421 } entries[0];
422 };
423
424 /* Struct for internally holding ddf structures */
425 /* The DDF structure stored on each device is potentially
426 * quite different, as some data is global and some is local.
427 * The global data is:
428 * - ddf header
429 * - controller_data
430 * - Physical disk records
431 * - Virtual disk records
432 * The local data is:
433 * - Configuration records
434 * - Physical Disk data section
435 * ( and Bad block and vendor which I don't care about yet).
436 *
437 * The local data is parsed into separate lists as it is read
438 * and reconstructed for writing. This means that we only need
439 * to make config changes once and they are automatically
440 * propagated to all devices.
441 * The global (config and disk data) records are each in a list
442 * of separate data structures. When writing we find the entry
443 * or entries applicable to the particular device.
444 */
445 struct ddf_super {
446 struct ddf_header anchor, primary, secondary;
447 struct ddf_controller_data controller;
448 struct ddf_header *active;
449 struct phys_disk *phys;
450 struct virtual_disk *virt;
451 char *conf;
452 int pdsize, vdsize;
453 unsigned int max_part, mppe, conf_rec_len;
454 int currentdev;
455 int updates_pending;
456 struct vcl {
457 union {
458 char space[512];
459 struct {
460 struct vcl *next;
461 unsigned int vcnum; /* index into ->virt */
462 /* For an array with a secondary level there are
463 * multiple vd_config structures, all with the same
464 * guid but with different sec_elmnt_seq.
465 * One of these structures is in 'conf' below.
466 * The others are in other_bvds, not in any
467 * particular order.
468 */
469 struct vd_config **other_bvds;
470 __u64 *block_sizes; /* NULL if all the same */
471 };
472 };
473 struct vd_config conf;
474 } *conflist, *currentconf;
475 struct dl {
476 union {
477 char space[512];
478 struct {
479 struct dl *next;
480 int major, minor;
481 char *devname;
482 int fd;
483 unsigned long long size; /* sectors */
484 be64 primary_lba; /* sectors */
485 be64 secondary_lba; /* sectors */
486 be64 workspace_lba; /* sectors */
487 int pdnum; /* index in ->phys */
488 struct spare_assign *spare;
489 void *mdupdate; /* hold metadata update */
490
491 /* These fields used by auto-layout */
492 int raiddisk; /* slot to fill in autolayout */
493 __u64 esize;
494 int displayed;
495 };
496 };
497 struct disk_data disk;
498 struct vcl *vlist[0]; /* max_part in size */
499 } *dlist, *add_list;
500 };
501
502 #ifndef MDASSEMBLE
503 static int load_super_ddf_all(struct supertype *st, int fd,
504 void **sbp, char *devname);
505 static int get_svd_state(const struct ddf_super *, const struct vcl *);
506 static int
507 validate_geometry_ddf_container(struct supertype *st,
508 int level, int layout, int raiddisks,
509 int chunk, unsigned long long size,
510 unsigned long long data_offset,
511 char *dev, unsigned long long *freesize,
512 int verbose);
513
514 static int validate_geometry_ddf_bvd(struct supertype *st,
515 int level, int layout, int raiddisks,
516 int *chunk, unsigned long long size,
517 unsigned long long data_offset,
518 char *dev, unsigned long long *freesize,
519 int verbose);
520 #endif
521
522 static void free_super_ddf(struct supertype *st);
523 static int all_ff(const char *guid);
524 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
525 be32 refnum, unsigned int nmax,
526 const struct vd_config **bvd,
527 unsigned int *idx);
528 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
529 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
530 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
531 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
532 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
533 static int init_super_ddf_bvd(struct supertype *st,
534 mdu_array_info_t *info,
535 unsigned long long size,
536 char *name, char *homehost,
537 int *uuid, unsigned long long data_offset);
538
539 #ifndef offsetof
540 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
541 #endif
542
543 #if DEBUG
544 static void pr_state(struct ddf_super *ddf, const char *msg)
545 {
546 unsigned int i;
547 dprintf("%s/%s: ", __func__, msg);
548 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
549 if (all_ff(ddf->virt->entries[i].guid))
550 continue;
551 dprintf("%u(s=%02x i=%02x) ", i,
552 ddf->virt->entries[i].state,
553 ddf->virt->entries[i].init_state);
554 }
555 dprintf("\n");
556 }
557 #else
558 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
559 #endif
560
561 static void _ddf_set_updates_pending(struct ddf_super *ddf, struct vd_config *vc,
562 const char *func)
563 {
564 if (vc) {
565 vc->timestamp = cpu_to_be32(time(0)-DECADE);
566 vc->seqnum = cpu_to_be32(be32_to_cpu(vc->seqnum) + 1);
567 }
568 if (ddf->updates_pending)
569 return;
570 ddf->updates_pending = 1;
571 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
572 pr_state(ddf, func);
573 }
574
575 #define ddf_set_updates_pending(x,v) _ddf_set_updates_pending((x), (v), __func__)
576
577 static be32 calc_crc(void *buf, int len)
578 {
579 /* crcs are always at the same place as in the ddf_header */
580 struct ddf_header *ddf = buf;
581 be32 oldcrc = ddf->crc;
582 __u32 newcrc;
583 ddf->crc = cpu_to_be32(0xffffffff);
584
585 newcrc = crc32(0, buf, len);
586 ddf->crc = oldcrc;
587 /* The crc is stored (like everything) bigendian, so convert
588 * here for simplicity
589 */
590 return cpu_to_be32(newcrc);
591 }
592
593 #define DDF_INVALID_LEVEL 0xff
594 #define DDF_NO_SECONDARY 0xff
595 static int err_bad_md_layout(const mdu_array_info_t *array)
596 {
597 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
598 array->level, array->layout, array->raid_disks);
599 return -1;
600 }
601
602 static int layout_md2ddf(const mdu_array_info_t *array,
603 struct vd_config *conf)
604 {
605 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
606 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
607 __u8 sec_elmnt_count = 1;
608 __u8 srl = DDF_NO_SECONDARY;
609
610 switch (array->level) {
611 case LEVEL_LINEAR:
612 prl = DDF_CONCAT;
613 break;
614 case 0:
615 rlq = DDF_RAID0_SIMPLE;
616 prl = DDF_RAID0;
617 break;
618 case 1:
619 switch (array->raid_disks) {
620 case 2:
621 rlq = DDF_RAID1_SIMPLE;
622 break;
623 case 3:
624 rlq = DDF_RAID1_MULTI;
625 break;
626 default:
627 return err_bad_md_layout(array);
628 }
629 prl = DDF_RAID1;
630 break;
631 case 4:
632 if (array->layout != 0)
633 return err_bad_md_layout(array);
634 rlq = DDF_RAID4_N;
635 prl = DDF_RAID4;
636 break;
637 case 5:
638 switch (array->layout) {
639 case ALGORITHM_LEFT_ASYMMETRIC:
640 rlq = DDF_RAID5_N_RESTART;
641 break;
642 case ALGORITHM_RIGHT_ASYMMETRIC:
643 rlq = DDF_RAID5_0_RESTART;
644 break;
645 case ALGORITHM_LEFT_SYMMETRIC:
646 rlq = DDF_RAID5_N_CONTINUE;
647 break;
648 case ALGORITHM_RIGHT_SYMMETRIC:
649 /* not mentioned in standard */
650 default:
651 return err_bad_md_layout(array);
652 }
653 prl = DDF_RAID5;
654 break;
655 case 6:
656 switch (array->layout) {
657 case ALGORITHM_ROTATING_N_RESTART:
658 rlq = DDF_RAID5_N_RESTART;
659 break;
660 case ALGORITHM_ROTATING_ZERO_RESTART:
661 rlq = DDF_RAID6_0_RESTART;
662 break;
663 case ALGORITHM_ROTATING_N_CONTINUE:
664 rlq = DDF_RAID5_N_CONTINUE;
665 break;
666 default:
667 return err_bad_md_layout(array);
668 }
669 prl = DDF_RAID6;
670 break;
671 case 10:
672 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
673 rlq = DDF_RAID1_SIMPLE;
674 prim_elmnt_count = cpu_to_be16(2);
675 sec_elmnt_count = array->raid_disks / 2;
676 srl = DDF_2SPANNED;
677 prl = DDF_RAID1;
678 } else if (array->raid_disks % 3 == 0
679 && array->layout == 0x103) {
680 rlq = DDF_RAID1_MULTI;
681 prim_elmnt_count = cpu_to_be16(3);
682 sec_elmnt_count = array->raid_disks / 3;
683 srl = DDF_2SPANNED;
684 prl = DDF_RAID1;
685 } else if (array->layout == 0x201) {
686 prl = DDF_RAID1E;
687 rlq = DDF_RAID1E_OFFSET;
688 } else if (array->layout == 0x102) {
689 prl = DDF_RAID1E;
690 rlq = DDF_RAID1E_ADJACENT;
691 } else
692 return err_bad_md_layout(array);
693 break;
694 default:
695 return err_bad_md_layout(array);
696 }
697 conf->prl = prl;
698 conf->prim_elmnt_count = prim_elmnt_count;
699 conf->rlq = rlq;
700 conf->srl = srl;
701 conf->sec_elmnt_count = sec_elmnt_count;
702 return 0;
703 }
704
705 static int err_bad_ddf_layout(const struct vd_config *conf)
706 {
707 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
708 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
709 return -1;
710 }
711
712 static int layout_ddf2md(const struct vd_config *conf,
713 mdu_array_info_t *array)
714 {
715 int level = LEVEL_UNSUPPORTED;
716 int layout = 0;
717 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
718
719 if (conf->sec_elmnt_count > 1) {
720 /* see also check_secondary() */
721 if (conf->prl != DDF_RAID1 ||
722 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
723 pr_err("Unsupported secondary RAID level %u/%u\n",
724 conf->prl, conf->srl);
725 return -1;
726 }
727 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
728 layout = 0x102;
729 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
730 layout = 0x103;
731 else
732 return err_bad_ddf_layout(conf);
733 raiddisks *= conf->sec_elmnt_count;
734 level = 10;
735 goto good;
736 }
737
738 switch (conf->prl) {
739 case DDF_CONCAT:
740 level = LEVEL_LINEAR;
741 break;
742 case DDF_RAID0:
743 if (conf->rlq != DDF_RAID0_SIMPLE)
744 return err_bad_ddf_layout(conf);
745 level = 0;
746 break;
747 case DDF_RAID1:
748 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
749 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
750 return err_bad_ddf_layout(conf);
751 level = 1;
752 break;
753 case DDF_RAID1E:
754 if (conf->rlq == DDF_RAID1E_ADJACENT)
755 layout = 0x102;
756 else if (conf->rlq == DDF_RAID1E_OFFSET)
757 layout = 0x201;
758 else
759 return err_bad_ddf_layout(conf);
760 level = 10;
761 break;
762 case DDF_RAID4:
763 if (conf->rlq != DDF_RAID4_N)
764 return err_bad_ddf_layout(conf);
765 level = 4;
766 break;
767 case DDF_RAID5:
768 switch (conf->rlq) {
769 case DDF_RAID5_N_RESTART:
770 layout = ALGORITHM_LEFT_ASYMMETRIC;
771 break;
772 case DDF_RAID5_0_RESTART:
773 layout = ALGORITHM_RIGHT_ASYMMETRIC;
774 break;
775 case DDF_RAID5_N_CONTINUE:
776 layout = ALGORITHM_LEFT_SYMMETRIC;
777 break;
778 default:
779 return err_bad_ddf_layout(conf);
780 }
781 level = 5;
782 break;
783 case DDF_RAID6:
784 switch (conf->rlq) {
785 case DDF_RAID5_N_RESTART:
786 layout = ALGORITHM_ROTATING_N_RESTART;
787 break;
788 case DDF_RAID6_0_RESTART:
789 layout = ALGORITHM_ROTATING_ZERO_RESTART;
790 break;
791 case DDF_RAID5_N_CONTINUE:
792 layout = ALGORITHM_ROTATING_N_CONTINUE;
793 break;
794 default:
795 return err_bad_ddf_layout(conf);
796 }
797 level = 6;
798 break;
799 default:
800 return err_bad_ddf_layout(conf);
801 };
802
803 good:
804 array->level = level;
805 array->layout = layout;
806 array->raid_disks = raiddisks;
807 return 0;
808 }
809
810 static int load_ddf_header(int fd, unsigned long long lba,
811 unsigned long long size,
812 int type,
813 struct ddf_header *hdr, struct ddf_header *anchor)
814 {
815 /* read a ddf header (primary or secondary) from fd/lba
816 * and check that it is consistent with anchor
817 * Need to check:
818 * magic, crc, guid, rev, and LBA's header_type, and
819 * everything after header_type must be the same
820 */
821 if (lba >= size-1)
822 return 0;
823
824 if (lseek64(fd, lba<<9, 0) < 0)
825 return 0;
826
827 if (read(fd, hdr, 512) != 512)
828 return 0;
829
830 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
831 pr_err("%s: bad header magic\n", __func__);
832 return 0;
833 }
834 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
835 pr_err("%s: bad CRC\n", __func__);
836 return 0;
837 }
838 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
839 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
840 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
841 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
842 hdr->type != type ||
843 memcmp(anchor->pad2, hdr->pad2, 512 -
844 offsetof(struct ddf_header, pad2)) != 0) {
845 pr_err("%s: header mismatch\n", __func__);
846 return 0;
847 }
848
849 /* Looks good enough to me... */
850 return 1;
851 }
852
853 static void *load_section(int fd, struct ddf_super *super, void *buf,
854 be32 offset_be, be32 len_be, int check)
855 {
856 unsigned long long offset = be32_to_cpu(offset_be);
857 unsigned long long len = be32_to_cpu(len_be);
858 int dofree = (buf == NULL);
859
860 if (check)
861 if (len != 2 && len != 8 && len != 32
862 && len != 128 && len != 512)
863 return NULL;
864
865 if (len > 1024)
866 return NULL;
867 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
868 buf = NULL;
869
870 if (!buf)
871 return NULL;
872
873 if (super->active->type == 1)
874 offset += be64_to_cpu(super->active->primary_lba);
875 else
876 offset += be64_to_cpu(super->active->secondary_lba);
877
878 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
879 if (dofree)
880 free(buf);
881 return NULL;
882 }
883 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
884 if (dofree)
885 free(buf);
886 return NULL;
887 }
888 return buf;
889 }
890
891 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
892 {
893 unsigned long long dsize;
894
895 get_dev_size(fd, NULL, &dsize);
896
897 if (lseek64(fd, dsize-512, 0) < 0) {
898 if (devname)
899 pr_err("Cannot seek to anchor block on %s: %s\n",
900 devname, strerror(errno));
901 return 1;
902 }
903 if (read(fd, &super->anchor, 512) != 512) {
904 if (devname)
905 pr_err("Cannot read anchor block on %s: %s\n",
906 devname, strerror(errno));
907 return 1;
908 }
909 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
910 if (devname)
911 pr_err("no DDF anchor found on %s\n",
912 devname);
913 return 2;
914 }
915 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
916 if (devname)
917 pr_err("bad CRC on anchor on %s\n",
918 devname);
919 return 2;
920 }
921 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
922 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
923 if (devname)
924 pr_err("can only support super revision"
925 " %.8s and earlier, not %.8s on %s\n",
926 DDF_REVISION_2, super->anchor.revision,devname);
927 return 2;
928 }
929 super->active = NULL;
930 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
931 dsize >> 9, 1,
932 &super->primary, &super->anchor) == 0) {
933 if (devname)
934 pr_err("Failed to load primary DDF header "
935 "on %s\n", devname);
936 } else
937 super->active = &super->primary;
938
939 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
940 dsize >> 9, 2,
941 &super->secondary, &super->anchor)) {
942 if (super->active == NULL
943 || (be32_to_cpu(super->primary.seq)
944 < be32_to_cpu(super->secondary.seq) &&
945 !super->secondary.openflag)
946 || (be32_to_cpu(super->primary.seq)
947 == be32_to_cpu(super->secondary.seq) &&
948 super->primary.openflag && !super->secondary.openflag)
949 )
950 super->active = &super->secondary;
951 } else if (devname &&
952 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
953 pr_err("Failed to load secondary DDF header on %s\n",
954 devname);
955 if (super->active == NULL)
956 return 2;
957 return 0;
958 }
959
960 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
961 {
962 void *ok;
963 ok = load_section(fd, super, &super->controller,
964 super->active->controller_section_offset,
965 super->active->controller_section_length,
966 0);
967 super->phys = load_section(fd, super, NULL,
968 super->active->phys_section_offset,
969 super->active->phys_section_length,
970 1);
971 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
972
973 super->virt = load_section(fd, super, NULL,
974 super->active->virt_section_offset,
975 super->active->virt_section_length,
976 1);
977 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
978 if (!ok ||
979 !super->phys ||
980 !super->virt) {
981 free(super->phys);
982 free(super->virt);
983 super->phys = NULL;
984 super->virt = NULL;
985 return 2;
986 }
987 super->conflist = NULL;
988 super->dlist = NULL;
989
990 super->max_part = be16_to_cpu(super->active->max_partitions);
991 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
992 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
993 return 0;
994 }
995
996 #define DDF_UNUSED_BVD 0xff
997 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
998 {
999 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
1000 unsigned int i, vdsize;
1001 void *p;
1002 if (n_vds == 0) {
1003 vcl->other_bvds = NULL;
1004 return 0;
1005 }
1006 vdsize = ddf->conf_rec_len * 512;
1007 if (posix_memalign(&p, 512, n_vds *
1008 (vdsize + sizeof(struct vd_config *))) != 0)
1009 return -1;
1010 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
1011 for (i = 0; i < n_vds; i++) {
1012 vcl->other_bvds[i] = p + i * vdsize;
1013 memset(vcl->other_bvds[i], 0, vdsize);
1014 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
1015 }
1016 return 0;
1017 }
1018
1019 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
1020 unsigned int len)
1021 {
1022 int i;
1023 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1024 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
1025 break;
1026
1027 if (i < vcl->conf.sec_elmnt_count-1) {
1028 if (be32_to_cpu(vd->seqnum) <=
1029 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1030 return;
1031 } else {
1032 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1033 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1034 break;
1035 if (i == vcl->conf.sec_elmnt_count-1) {
1036 pr_err("no space for sec level config %u, count is %u\n",
1037 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1038 return;
1039 }
1040 }
1041 memcpy(vcl->other_bvds[i], vd, len);
1042 }
1043
1044 static int load_ddf_local(int fd, struct ddf_super *super,
1045 char *devname, int keep)
1046 {
1047 struct dl *dl;
1048 struct stat stb;
1049 char *conf;
1050 unsigned int i;
1051 unsigned int confsec;
1052 int vnum;
1053 unsigned int max_virt_disks =
1054 be16_to_cpu(super->active->max_vd_entries);
1055 unsigned long long dsize;
1056
1057 /* First the local disk info */
1058 if (posix_memalign((void**)&dl, 512,
1059 sizeof(*dl) +
1060 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1061 pr_err("%s could not allocate disk info buffer\n",
1062 __func__);
1063 return 1;
1064 }
1065
1066 load_section(fd, super, &dl->disk,
1067 super->active->data_section_offset,
1068 super->active->data_section_length,
1069 0);
1070 dl->devname = devname ? xstrdup(devname) : NULL;
1071
1072 fstat(fd, &stb);
1073 dl->major = major(stb.st_rdev);
1074 dl->minor = minor(stb.st_rdev);
1075 dl->next = super->dlist;
1076 dl->fd = keep ? fd : -1;
1077
1078 dl->size = 0;
1079 if (get_dev_size(fd, devname, &dsize))
1080 dl->size = dsize >> 9;
1081 /* If the disks have different sizes, the LBAs will differ
1082 * between phys disks.
1083 * At this point here, the values in super->active must be valid
1084 * for this phys disk. */
1085 dl->primary_lba = super->active->primary_lba;
1086 dl->secondary_lba = super->active->secondary_lba;
1087 dl->workspace_lba = super->active->workspace_lba;
1088 dl->spare = NULL;
1089 for (i = 0 ; i < super->max_part ; i++)
1090 dl->vlist[i] = NULL;
1091 super->dlist = dl;
1092 dl->pdnum = -1;
1093 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1094 if (memcmp(super->phys->entries[i].guid,
1095 dl->disk.guid, DDF_GUID_LEN) == 0)
1096 dl->pdnum = i;
1097
1098 /* Now the config list. */
1099 /* 'conf' is an array of config entries, some of which are
1100 * probably invalid. Those which are good need to be copied into
1101 * the conflist
1102 */
1103
1104 conf = load_section(fd, super, super->conf,
1105 super->active->config_section_offset,
1106 super->active->config_section_length,
1107 0);
1108 super->conf = conf;
1109 vnum = 0;
1110 for (confsec = 0;
1111 confsec < be32_to_cpu(super->active->config_section_length);
1112 confsec += super->conf_rec_len) {
1113 struct vd_config *vd =
1114 (struct vd_config *)((char*)conf + confsec*512);
1115 struct vcl *vcl;
1116
1117 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1118 if (dl->spare)
1119 continue;
1120 if (posix_memalign((void**)&dl->spare, 512,
1121 super->conf_rec_len*512) != 0) {
1122 pr_err("%s could not allocate spare info buf\n",
1123 __func__);
1124 return 1;
1125 }
1126
1127 memcpy(dl->spare, vd, super->conf_rec_len*512);
1128 continue;
1129 }
1130 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1131 /* Must be vendor-unique - I cannot handle those */
1132 continue;
1133
1134 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1135 if (memcmp(vcl->conf.guid,
1136 vd->guid, DDF_GUID_LEN) == 0)
1137 break;
1138 }
1139
1140 if (vcl) {
1141 dl->vlist[vnum++] = vcl;
1142 if (vcl->other_bvds != NULL &&
1143 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1144 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1145 continue;
1146 }
1147 if (be32_to_cpu(vd->seqnum) <=
1148 be32_to_cpu(vcl->conf.seqnum))
1149 continue;
1150 } else {
1151 if (posix_memalign((void**)&vcl, 512,
1152 (super->conf_rec_len*512 +
1153 offsetof(struct vcl, conf))) != 0) {
1154 pr_err("%s could not allocate vcl buf\n",
1155 __func__);
1156 return 1;
1157 }
1158 vcl->next = super->conflist;
1159 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1160 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1161 if (alloc_other_bvds(super, vcl) != 0) {
1162 pr_err("%s could not allocate other bvds\n",
1163 __func__);
1164 free(vcl);
1165 return 1;
1166 };
1167 super->conflist = vcl;
1168 dl->vlist[vnum++] = vcl;
1169 }
1170 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1171 for (i=0; i < max_virt_disks ; i++)
1172 if (memcmp(super->virt->entries[i].guid,
1173 vcl->conf.guid, DDF_GUID_LEN)==0)
1174 break;
1175 if (i < max_virt_disks)
1176 vcl->vcnum = i;
1177 }
1178
1179 return 0;
1180 }
1181
1182 static int load_super_ddf(struct supertype *st, int fd,
1183 char *devname)
1184 {
1185 unsigned long long dsize;
1186 struct ddf_super *super;
1187 int rv;
1188
1189 if (get_dev_size(fd, devname, &dsize) == 0)
1190 return 1;
1191
1192 if (test_partition(fd))
1193 /* DDF is not allowed on partitions */
1194 return 1;
1195
1196 /* 32M is a lower bound */
1197 if (dsize <= 32*1024*1024) {
1198 if (devname)
1199 pr_err("%s is too small for ddf: "
1200 "size is %llu sectors.\n",
1201 devname, dsize>>9);
1202 return 1;
1203 }
1204 if (dsize & 511) {
1205 if (devname)
1206 pr_err("%s is an odd size for ddf: "
1207 "size is %llu bytes.\n",
1208 devname, dsize);
1209 return 1;
1210 }
1211
1212 free_super_ddf(st);
1213
1214 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1215 pr_err("malloc of %zu failed.\n",
1216 sizeof(*super));
1217 return 1;
1218 }
1219 memset(super, 0, sizeof(*super));
1220
1221 rv = load_ddf_headers(fd, super, devname);
1222 if (rv) {
1223 free(super);
1224 return rv;
1225 }
1226
1227 /* Have valid headers and have chosen the best. Let's read in the rest*/
1228
1229 rv = load_ddf_global(fd, super, devname);
1230
1231 if (rv) {
1232 if (devname)
1233 pr_err("Failed to load all information "
1234 "sections on %s\n", devname);
1235 free(super);
1236 return rv;
1237 }
1238
1239 rv = load_ddf_local(fd, super, devname, 0);
1240
1241 if (rv) {
1242 if (devname)
1243 pr_err("Failed to load all information "
1244 "sections on %s\n", devname);
1245 free(super);
1246 return rv;
1247 }
1248
1249 /* Should possibly check the sections .... */
1250
1251 st->sb = super;
1252 if (st->ss == NULL) {
1253 st->ss = &super_ddf;
1254 st->minor_version = 0;
1255 st->max_devs = 512;
1256 }
1257 return 0;
1258
1259 }
1260
1261 static void free_super_ddf(struct supertype *st)
1262 {
1263 struct ddf_super *ddf = st->sb;
1264 if (ddf == NULL)
1265 return;
1266 free(ddf->phys);
1267 free(ddf->virt);
1268 free(ddf->conf);
1269 while (ddf->conflist) {
1270 struct vcl *v = ddf->conflist;
1271 ddf->conflist = v->next;
1272 if (v->block_sizes)
1273 free(v->block_sizes);
1274 if (v->other_bvds)
1275 /*
1276 v->other_bvds[0] points to beginning of buffer,
1277 see alloc_other_bvds()
1278 */
1279 free(v->other_bvds[0]);
1280 free(v);
1281 }
1282 while (ddf->dlist) {
1283 struct dl *d = ddf->dlist;
1284 ddf->dlist = d->next;
1285 if (d->fd >= 0)
1286 close(d->fd);
1287 if (d->spare)
1288 free(d->spare);
1289 free(d);
1290 }
1291 while (ddf->add_list) {
1292 struct dl *d = ddf->add_list;
1293 ddf->add_list = d->next;
1294 if (d->fd >= 0)
1295 close(d->fd);
1296 if (d->spare)
1297 free(d->spare);
1298 free(d);
1299 }
1300 free(ddf);
1301 st->sb = NULL;
1302 }
1303
1304 static struct supertype *match_metadata_desc_ddf(char *arg)
1305 {
1306 /* 'ddf' only supports containers */
1307 struct supertype *st;
1308 if (strcmp(arg, "ddf") != 0 &&
1309 strcmp(arg, "default") != 0
1310 )
1311 return NULL;
1312
1313 st = xcalloc(1, sizeof(*st));
1314 st->ss = &super_ddf;
1315 st->max_devs = 512;
1316 st->minor_version = 0;
1317 st->sb = NULL;
1318 return st;
1319 }
1320
1321 #ifndef MDASSEMBLE
1322
1323 static mapping_t ddf_state[] = {
1324 { "Optimal", 0},
1325 { "Degraded", 1},
1326 { "Deleted", 2},
1327 { "Missing", 3},
1328 { "Failed", 4},
1329 { "Partially Optimal", 5},
1330 { "-reserved-", 6},
1331 { "-reserved-", 7},
1332 { NULL, 0}
1333 };
1334
1335 static mapping_t ddf_init_state[] = {
1336 { "Not Initialised", 0},
1337 { "QuickInit in Progress", 1},
1338 { "Fully Initialised", 2},
1339 { "*UNKNOWN*", 3},
1340 { NULL, 0}
1341 };
1342 static mapping_t ddf_access[] = {
1343 { "Read/Write", 0},
1344 { "Reserved", 1},
1345 { "Read Only", 2},
1346 { "Blocked (no access)", 3},
1347 { NULL ,0}
1348 };
1349
1350 static mapping_t ddf_level[] = {
1351 { "RAID0", DDF_RAID0},
1352 { "RAID1", DDF_RAID1},
1353 { "RAID3", DDF_RAID3},
1354 { "RAID4", DDF_RAID4},
1355 { "RAID5", DDF_RAID5},
1356 { "RAID1E",DDF_RAID1E},
1357 { "JBOD", DDF_JBOD},
1358 { "CONCAT",DDF_CONCAT},
1359 { "RAID5E",DDF_RAID5E},
1360 { "RAID5EE",DDF_RAID5EE},
1361 { "RAID6", DDF_RAID6},
1362 { NULL, 0}
1363 };
1364 static mapping_t ddf_sec_level[] = {
1365 { "Striped", DDF_2STRIPED},
1366 { "Mirrored", DDF_2MIRRORED},
1367 { "Concat", DDF_2CONCAT},
1368 { "Spanned", DDF_2SPANNED},
1369 { NULL, 0}
1370 };
1371 #endif
1372
1373 static int all_ff(const char *guid)
1374 {
1375 int i;
1376 for (i = 0; i < DDF_GUID_LEN; i++)
1377 if (guid[i] != (char)0xff)
1378 return 0;
1379 return 1;
1380 }
1381
1382 static const char *guid_str(const char *guid)
1383 {
1384 static char buf[DDF_GUID_LEN*2+1];
1385 int i;
1386 char *p = buf;
1387 for (i = 0; i < DDF_GUID_LEN; i++) {
1388 unsigned char c = guid[i];
1389 if (c >= 32 && c < 127)
1390 p += sprintf(p, "%c", c);
1391 else
1392 p += sprintf(p, "%02x", c);
1393 }
1394 *p = '\0';
1395 return (const char *) buf;
1396 }
1397
1398 #ifndef MDASSEMBLE
1399 static void print_guid(char *guid, int tstamp)
1400 {
1401 /* A GUIDs are part (or all) ASCII and part binary.
1402 * They tend to be space padded.
1403 * We print the GUID in HEX, then in parentheses add
1404 * any initial ASCII sequence, and a possible
1405 * time stamp from bytes 16-19
1406 */
1407 int l = DDF_GUID_LEN;
1408 int i;
1409
1410 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1411 if ((i&3)==0 && i != 0) printf(":");
1412 printf("%02X", guid[i]&255);
1413 }
1414
1415 printf("\n (");
1416 while (l && guid[l-1] == ' ')
1417 l--;
1418 for (i=0 ; i<l ; i++) {
1419 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1420 fputc(guid[i], stdout);
1421 else
1422 break;
1423 }
1424 if (tstamp) {
1425 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1426 char tbuf[100];
1427 struct tm *tm;
1428 tm = localtime(&then);
1429 strftime(tbuf, 100, " %D %T",tm);
1430 fputs(tbuf, stdout);
1431 }
1432 printf(")");
1433 }
1434
1435 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1436 {
1437 int crl = sb->conf_rec_len;
1438 struct vcl *vcl;
1439
1440 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1441 unsigned int i;
1442 struct vd_config *vc = &vcl->conf;
1443
1444 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1445 continue;
1446 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1447 continue;
1448
1449 /* Ok, we know about this VD, let's give more details */
1450 printf(" Raid Devices[%d] : %d (", n,
1451 be16_to_cpu(vc->prim_elmnt_count));
1452 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1453 int j;
1454 int cnt = be16_to_cpu(sb->phys->max_pdes);
1455 for (j=0; j<cnt; j++)
1456 if (be32_eq(vc->phys_refnum[i],
1457 sb->phys->entries[j].refnum))
1458 break;
1459 if (i) printf(" ");
1460 if (j < cnt)
1461 printf("%d", j);
1462 else
1463 printf("--");
1464 }
1465 printf(")\n");
1466 if (vc->chunk_shift != 255)
1467 printf(" Chunk Size[%d] : %d sectors\n", n,
1468 1 << vc->chunk_shift);
1469 printf(" Raid Level[%d] : %s\n", n,
1470 map_num(ddf_level, vc->prl)?:"-unknown-");
1471 if (vc->sec_elmnt_count != 1) {
1472 printf(" Secondary Position[%d] : %d of %d\n", n,
1473 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1474 printf(" Secondary Level[%d] : %s\n", n,
1475 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1476 }
1477 printf(" Device Size[%d] : %llu\n", n,
1478 be64_to_cpu(vc->blocks)/2);
1479 printf(" Array Size[%d] : %llu\n", n,
1480 be64_to_cpu(vc->array_blocks)/2);
1481 }
1482 }
1483
1484 static void examine_vds(struct ddf_super *sb)
1485 {
1486 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1487 unsigned int i;
1488 printf(" Virtual Disks : %d\n", cnt);
1489
1490 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1491 struct virtual_entry *ve = &sb->virt->entries[i];
1492 if (all_ff(ve->guid))
1493 continue;
1494 printf("\n");
1495 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1496 printf("\n");
1497 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1498 printf(" state[%d] : %s, %s%s\n", i,
1499 map_num(ddf_state, ve->state & 7),
1500 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1501 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1502 printf(" init state[%d] : %s\n", i,
1503 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1504 printf(" access[%d] : %s\n", i,
1505 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1506 printf(" Name[%d] : %.16s\n", i, ve->name);
1507 examine_vd(i, sb, ve->guid);
1508 }
1509 if (cnt) printf("\n");
1510 }
1511
1512 static void examine_pds(struct ddf_super *sb)
1513 {
1514 int cnt = be16_to_cpu(sb->phys->max_pdes);
1515 int i;
1516 struct dl *dl;
1517 int unlisted = 0;
1518 printf(" Physical Disks : %d\n", cnt);
1519 printf(" Number RefNo Size Device Type/State\n");
1520
1521 for (dl = sb->dlist; dl; dl = dl->next)
1522 dl->displayed = 0;
1523
1524 for (i=0 ; i<cnt ; i++) {
1525 struct phys_disk_entry *pd = &sb->phys->entries[i];
1526 int type = be16_to_cpu(pd->type);
1527 int state = be16_to_cpu(pd->state);
1528
1529 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1530 /* Not in use */
1531 continue;
1532 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1533 //printf("\n");
1534 printf(" %3d %08x ", i,
1535 be32_to_cpu(pd->refnum));
1536 printf("%8lluK ",
1537 be64_to_cpu(pd->config_size)>>1);
1538 for (dl = sb->dlist; dl ; dl = dl->next) {
1539 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1540 char *dv = map_dev(dl->major, dl->minor, 0);
1541 if (dv) {
1542 printf("%-15s", dv);
1543 break;
1544 }
1545 }
1546 }
1547 if (!dl)
1548 printf("%15s","");
1549 else
1550 dl->displayed = 1;
1551 printf(" %s%s%s%s%s",
1552 (type&2) ? "active":"",
1553 (type&4) ? "Global-Spare":"",
1554 (type&8) ? "spare" : "",
1555 (type&16)? ", foreign" : "",
1556 (type&32)? "pass-through" : "");
1557 if (state & DDF_Failed)
1558 /* This over-rides these three */
1559 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1560 printf("/%s%s%s%s%s%s%s",
1561 (state&1)? "Online": "Offline",
1562 (state&2)? ", Failed": "",
1563 (state&4)? ", Rebuilding": "",
1564 (state&8)? ", in-transition": "",
1565 (state&16)? ", SMART-errors": "",
1566 (state&32)? ", Unrecovered-Read-Errors": "",
1567 (state&64)? ", Missing" : "");
1568 printf("\n");
1569 }
1570 for (dl = sb->dlist; dl; dl = dl->next) {
1571 char *dv;
1572 if (dl->displayed)
1573 continue;
1574 if (!unlisted)
1575 printf(" Physical disks not in metadata!:\n");
1576 unlisted = 1;
1577 dv = map_dev(dl->major, dl->minor, 0);
1578 printf(" %08x %s\n", be32_to_cpu(dl->disk.refnum),
1579 dv ? dv : "-unknown-");
1580 }
1581 if (unlisted)
1582 printf("\n");
1583 }
1584
1585 static void examine_super_ddf(struct supertype *st, char *homehost)
1586 {
1587 struct ddf_super *sb = st->sb;
1588
1589 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1590 printf(" Version : %.8s\n", sb->anchor.revision);
1591 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1592 printf("\n");
1593 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1594 printf("\n");
1595 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1596 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1597 DDF_HEADER_MAGIC)
1598 ?"yes" : "no"));
1599 examine_vds(sb);
1600 examine_pds(sb);
1601 }
1602
1603 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1604 {
1605 /*
1606 * Figure out the VD number for this supertype.
1607 * Returns DDF_CONTAINER for the container itself,
1608 * and DDF_NOTFOUND on error.
1609 */
1610 struct ddf_super *ddf = st->sb;
1611 struct mdinfo *sra;
1612 char *sub, *end;
1613 unsigned int vcnum;
1614
1615 if (*st->container_devnm == '\0')
1616 return DDF_CONTAINER;
1617
1618 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1619 if (!sra || sra->array.major_version != -1 ||
1620 sra->array.minor_version != -2 ||
1621 !is_subarray(sra->text_version))
1622 return DDF_NOTFOUND;
1623
1624 sub = strchr(sra->text_version + 1, '/');
1625 if (sub != NULL)
1626 vcnum = strtoul(sub + 1, &end, 10);
1627 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1628 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1629 return DDF_NOTFOUND;
1630
1631 return vcnum;
1632 }
1633
1634 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1635 {
1636 /* We just write a generic DDF ARRAY entry
1637 */
1638 struct mdinfo info;
1639 char nbuf[64];
1640 getinfo_super_ddf(st, &info, NULL);
1641 fname_from_uuid(st, &info, nbuf, ':');
1642
1643 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1644 }
1645
1646 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1647 {
1648 /* We write a DDF ARRAY member entry for each vd, identifying container
1649 * by uuid and member by unit number and uuid.
1650 */
1651 struct ddf_super *ddf = st->sb;
1652 struct mdinfo info;
1653 unsigned int i;
1654 char nbuf[64];
1655 getinfo_super_ddf(st, &info, NULL);
1656 fname_from_uuid(st, &info, nbuf, ':');
1657
1658 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1659 struct virtual_entry *ve = &ddf->virt->entries[i];
1660 struct vcl vcl;
1661 char nbuf1[64];
1662 char namebuf[17];
1663 if (all_ff(ve->guid))
1664 continue;
1665 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1666 ddf->currentconf =&vcl;
1667 vcl.vcnum = i;
1668 uuid_from_super_ddf(st, info.uuid);
1669 fname_from_uuid(st, &info, nbuf1, ':');
1670 _ddf_array_name(namebuf, ddf, i);
1671 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1672 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1673 nbuf+5, i, nbuf1+5);
1674 }
1675 }
1676
1677 static void export_examine_super_ddf(struct supertype *st)
1678 {
1679 struct mdinfo info;
1680 char nbuf[64];
1681 getinfo_super_ddf(st, &info, NULL);
1682 fname_from_uuid(st, &info, nbuf, ':');
1683 printf("MD_METADATA=ddf\n");
1684 printf("MD_LEVEL=container\n");
1685 printf("MD_UUID=%s\n", nbuf+5);
1686 printf("MD_DEVICES=%u\n",
1687 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1688 }
1689
1690 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1691 {
1692 void *buf;
1693 unsigned long long dsize, offset;
1694 int bytes;
1695 struct ddf_header *ddf;
1696 int written = 0;
1697
1698 /* The meta consists of an anchor, a primary, and a secondary.
1699 * This all lives at the end of the device.
1700 * So it is easiest to find the earliest of primary and
1701 * secondary, and copy everything from there.
1702 *
1703 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1704 * we choose one of those
1705 */
1706
1707 if (posix_memalign(&buf, 4096, 4096) != 0)
1708 return 1;
1709
1710 if (!get_dev_size(from, NULL, &dsize))
1711 goto err;
1712
1713 if (lseek64(from, dsize-512, 0) < 0)
1714 goto err;
1715 if (read(from, buf, 512) != 512)
1716 goto err;
1717 ddf = buf;
1718 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1719 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1720 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1721 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1722 goto err;
1723
1724 offset = dsize - 512;
1725 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1726 offset = be64_to_cpu(ddf->primary_lba) << 9;
1727 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1728 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1729
1730 bytes = dsize - offset;
1731
1732 if (lseek64(from, offset, 0) < 0 ||
1733 lseek64(to, offset, 0) < 0)
1734 goto err;
1735 while (written < bytes) {
1736 int n = bytes - written;
1737 if (n > 4096)
1738 n = 4096;
1739 if (read(from, buf, n) != n)
1740 goto err;
1741 if (write(to, buf, n) != n)
1742 goto err;
1743 written += n;
1744 }
1745 free(buf);
1746 return 0;
1747 err:
1748 free(buf);
1749 return 1;
1750 }
1751
1752 static void detail_super_ddf(struct supertype *st, char *homehost)
1753 {
1754 struct ddf_super *sb = st->sb;
1755 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1756
1757 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1758 printf("\n");
1759 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1760 printf(" Virtual Disks : %d\n", cnt);
1761 printf("\n");
1762 }
1763
1764 static const char *vendors_with_variable_volume_UUID[] = {
1765 "LSI ",
1766 };
1767
1768 static int volume_id_is_reliable(const struct ddf_super *ddf)
1769 {
1770 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1771 int i;
1772 for (i = 0; i < n; i++)
1773 if (!memcmp(ddf->controller.guid,
1774 vendors_with_variable_volume_UUID[i], 8))
1775 return 0;
1776 return 1;
1777 }
1778
1779 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1780 unsigned int vcnum, int uuid[4])
1781 {
1782 char buf[DDF_GUID_LEN+18], sha[20], *p;
1783 struct sha1_ctx ctx;
1784 if (volume_id_is_reliable(ddf)) {
1785 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1786 return;
1787 }
1788 /*
1789 * Some fake RAID BIOSes (in particular, LSI ones) change the
1790 * VD GUID at every boot. These GUIDs are not suitable for
1791 * identifying an array. Luckily the header GUID appears to
1792 * remain constant.
1793 * We construct a pseudo-UUID from the header GUID and those
1794 * properties of the subarray that we expect to remain constant.
1795 */
1796 memset(buf, 0, sizeof(buf));
1797 p = buf;
1798 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1799 p += DDF_GUID_LEN;
1800 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1801 p += 16;
1802 *((__u16 *) p) = vcnum;
1803 sha1_init_ctx(&ctx);
1804 sha1_process_bytes(buf, sizeof(buf), &ctx);
1805 sha1_finish_ctx(&ctx, sha);
1806 memcpy(uuid, sha, 4*4);
1807 }
1808
1809 static void brief_detail_super_ddf(struct supertype *st)
1810 {
1811 struct mdinfo info;
1812 char nbuf[64];
1813 struct ddf_super *ddf = st->sb;
1814 unsigned int vcnum = get_vd_num_of_subarray(st);
1815 if (vcnum == DDF_CONTAINER)
1816 uuid_from_super_ddf(st, info.uuid);
1817 else if (vcnum == DDF_NOTFOUND)
1818 return;
1819 else
1820 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1821 fname_from_uuid(st, &info, nbuf,':');
1822 printf(" UUID=%s", nbuf + 5);
1823 }
1824 #endif
1825
1826 static int match_home_ddf(struct supertype *st, char *homehost)
1827 {
1828 /* It matches 'this' host if the controller is a
1829 * Linux-MD controller with vendor_data matching
1830 * the hostname. It would be nice if we could
1831 * test against controller found in /sys or somewhere...
1832 */
1833 struct ddf_super *ddf = st->sb;
1834 unsigned int len;
1835
1836 if (!homehost)
1837 return 0;
1838 len = strlen(homehost);
1839
1840 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1841 len < sizeof(ddf->controller.vendor_data) &&
1842 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1843 ddf->controller.vendor_data[len] == 0);
1844 }
1845
1846 #ifndef MDASSEMBLE
1847 static int find_index_in_bvd(const struct ddf_super *ddf,
1848 const struct vd_config *conf, unsigned int n,
1849 unsigned int *n_bvd)
1850 {
1851 /*
1852 * Find the index of the n-th valid physical disk in this BVD.
1853 * Unused entries can be sprinkled in with the used entries,
1854 * but don't count.
1855 */
1856 unsigned int i, j;
1857 for (i = 0, j = 0;
1858 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1859 i++) {
1860 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1861 if (n == j) {
1862 *n_bvd = i;
1863 return 1;
1864 }
1865 j++;
1866 }
1867 }
1868 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1869 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1870 return 0;
1871 }
1872
1873 /* Given a member array instance number, and a raid disk within that instance,
1874 * find the vd_config structure. The offset of the given disk in the phys_refnum
1875 * table is returned in n_bvd.
1876 * For two-level members with a secondary raid level the vd_config for
1877 * the appropriate BVD is returned.
1878 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1879 */
1880 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1881 unsigned int n,
1882 unsigned int *n_bvd, struct vcl **vcl)
1883 {
1884 struct vcl *v;
1885
1886 for (v = ddf->conflist; v; v = v->next) {
1887 unsigned int nsec, ibvd = 0;
1888 struct vd_config *conf;
1889 if (inst != v->vcnum)
1890 continue;
1891 conf = &v->conf;
1892 if (conf->sec_elmnt_count == 1) {
1893 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1894 *vcl = v;
1895 return conf;
1896 } else
1897 goto bad;
1898 }
1899 if (v->other_bvds == NULL) {
1900 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1901 __func__, conf->sec_elmnt_count);
1902 goto bad;
1903 }
1904 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1905 if (conf->sec_elmnt_seq != nsec) {
1906 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1907 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1908 == nsec)
1909 break;
1910 }
1911 if (ibvd == conf->sec_elmnt_count)
1912 goto bad;
1913 conf = v->other_bvds[ibvd-1];
1914 }
1915 if (!find_index_in_bvd(ddf, conf,
1916 n - nsec*conf->sec_elmnt_count, n_bvd))
1917 goto bad;
1918 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1919 , __func__, n, *n_bvd, ibvd, inst);
1920 *vcl = v;
1921 return conf;
1922 }
1923 bad:
1924 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1925 return NULL;
1926 }
1927 #endif
1928
1929 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1930 {
1931 /* Find the entry in phys_disk which has the given refnum
1932 * and return it's index
1933 */
1934 unsigned int i;
1935 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1936 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1937 return i;
1938 return -1;
1939 }
1940
1941 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1942 {
1943 char buf[20];
1944 struct sha1_ctx ctx;
1945 sha1_init_ctx(&ctx);
1946 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1947 sha1_finish_ctx(&ctx, buf);
1948 memcpy(uuid, buf, 4*4);
1949 }
1950
1951 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1952 {
1953 /* The uuid returned here is used for:
1954 * uuid to put into bitmap file (Create, Grow)
1955 * uuid for backup header when saving critical section (Grow)
1956 * comparing uuids when re-adding a device into an array
1957 * In these cases the uuid required is that of the data-array,
1958 * not the device-set.
1959 * uuid to recognise same set when adding a missing device back
1960 * to an array. This is a uuid for the device-set.
1961 *
1962 * For each of these we can make do with a truncated
1963 * or hashed uuid rather than the original, as long as
1964 * everyone agrees.
1965 * In the case of SVD we assume the BVD is of interest,
1966 * though that might be the case if a bitmap were made for
1967 * a mirrored SVD - worry about that later.
1968 * So we need to find the VD configuration record for the
1969 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1970 * The first 16 bytes of the sha1 of these is used.
1971 */
1972 struct ddf_super *ddf = st->sb;
1973 struct vcl *vcl = ddf->currentconf;
1974
1975 if (vcl)
1976 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1977 else
1978 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1979 }
1980
1981 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1982 {
1983 struct ddf_super *ddf = st->sb;
1984 int map_disks = info->array.raid_disks;
1985 __u32 *cptr;
1986
1987 if (ddf->currentconf) {
1988 getinfo_super_ddf_bvd(st, info, map);
1989 return;
1990 }
1991 memset(info, 0, sizeof(*info));
1992
1993 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1994 info->array.level = LEVEL_CONTAINER;
1995 info->array.layout = 0;
1996 info->array.md_minor = -1;
1997 cptr = (__u32 *)(ddf->anchor.guid + 16);
1998 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1999
2000 info->array.chunk_size = 0;
2001 info->container_enough = 1;
2002
2003 info->disk.major = 0;
2004 info->disk.minor = 0;
2005 if (ddf->dlist) {
2006 struct phys_disk_entry *pde = NULL;
2007 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
2008 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
2009
2010 info->data_offset = be64_to_cpu(ddf->phys->
2011 entries[info->disk.raid_disk].
2012 config_size);
2013 info->component_size = ddf->dlist->size - info->data_offset;
2014 if (info->disk.raid_disk >= 0)
2015 pde = ddf->phys->entries + info->disk.raid_disk;
2016 if (pde &&
2017 !(be16_to_cpu(pde->state) & DDF_Failed) &&
2018 !(be16_to_cpu(pde->state) & DDF_Missing))
2019 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2020 else
2021 info->disk.state = 1 << MD_DISK_FAULTY;
2022
2023 } else {
2024 /* There should always be a dlist, but just in case...*/
2025 info->disk.number = -1;
2026 info->disk.raid_disk = -1;
2027 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2028 }
2029 info->events = be32_to_cpu(ddf->active->seq);
2030 info->array.utime = DECADE + be32_to_cpu(ddf->active->timestamp);
2031
2032 info->recovery_start = MaxSector;
2033 info->reshape_active = 0;
2034 info->recovery_blocked = 0;
2035 info->name[0] = 0;
2036
2037 info->array.major_version = -1;
2038 info->array.minor_version = -2;
2039 strcpy(info->text_version, "ddf");
2040 info->safe_mode_delay = 0;
2041
2042 uuid_from_super_ddf(st, info->uuid);
2043
2044 if (map) {
2045 int i, e = 0;
2046 int max = be16_to_cpu(ddf->phys->max_pdes);
2047 for (i = e = 0 ; i < map_disks ; i++, e++) {
2048 while (e < max &&
2049 be32_to_cpu(ddf->phys->entries[e].refnum) == 0xffffffff)
2050 e++;
2051 if (i < info->array.raid_disks && e < max &&
2052 !(be16_to_cpu(ddf->phys->entries[e].state)
2053 & DDF_Failed))
2054 map[i] = 1;
2055 else
2056 map[i] = 0;
2057 }
2058 }
2059 }
2060
2061 /* size of name must be at least 17 bytes! */
2062 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2063 {
2064 int j;
2065 memcpy(name, ddf->virt->entries[i].name, 16);
2066 name[16] = 0;
2067 for(j = 0; j < 16; j++)
2068 if (name[j] == ' ')
2069 name[j] = 0;
2070 }
2071
2072 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2073 {
2074 struct ddf_super *ddf = st->sb;
2075 struct vcl *vc = ddf->currentconf;
2076 int cd = ddf->currentdev;
2077 int n_prim;
2078 int j;
2079 struct dl *dl;
2080 int map_disks = info->array.raid_disks;
2081 __u32 *cptr;
2082 struct vd_config *conf;
2083
2084 memset(info, 0, sizeof(*info));
2085 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2086 return;
2087 info->array.md_minor = -1;
2088 cptr = (__u32 *)(vc->conf.guid + 16);
2089 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2090 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2091 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2092 info->custom_array_size = be64_to_cpu(vc->conf.array_blocks);
2093
2094 conf = &vc->conf;
2095 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2096 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2097 int ibvd = cd / n_prim - 1;
2098 cd %= n_prim;
2099 conf = vc->other_bvds[ibvd];
2100 }
2101
2102 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2103 info->data_offset =
2104 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2105 if (vc->block_sizes)
2106 info->component_size = vc->block_sizes[cd];
2107 else
2108 info->component_size = be64_to_cpu(conf->blocks);
2109
2110 for (dl = ddf->dlist; dl ; dl = dl->next)
2111 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2112 break;
2113 }
2114
2115 info->disk.major = 0;
2116 info->disk.minor = 0;
2117 info->disk.state = 0;
2118 if (dl && dl->pdnum >= 0) {
2119 info->disk.major = dl->major;
2120 info->disk.minor = dl->minor;
2121 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2122 * be16_to_cpu(conf->prim_elmnt_count);
2123 info->disk.number = dl->pdnum;
2124 info->disk.state = 0;
2125 if (info->disk.number >= 0 &&
2126 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2127 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2128 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2129 info->events = be32_to_cpu(ddf->active->seq);
2130 }
2131
2132 info->container_member = ddf->currentconf->vcnum;
2133
2134 info->recovery_start = MaxSector;
2135 info->resync_start = 0;
2136 info->reshape_active = 0;
2137 info->recovery_blocked = 0;
2138 if (!(ddf->virt->entries[info->container_member].state
2139 & DDF_state_inconsistent) &&
2140 (ddf->virt->entries[info->container_member].init_state
2141 & DDF_initstate_mask)
2142 == DDF_init_full)
2143 info->resync_start = MaxSector;
2144
2145 uuid_from_super_ddf(st, info->uuid);
2146
2147 info->array.major_version = -1;
2148 info->array.minor_version = -2;
2149 sprintf(info->text_version, "/%s/%d",
2150 st->container_devnm,
2151 info->container_member);
2152 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2153
2154 _ddf_array_name(info->name, ddf, info->container_member);
2155
2156 if (map)
2157 for (j = 0; j < map_disks; j++) {
2158 map[j] = 0;
2159 if (j < info->array.raid_disks) {
2160 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2161 if (i >= 0 &&
2162 (be16_to_cpu(ddf->phys->entries[i].state)
2163 & DDF_Online) &&
2164 !(be16_to_cpu(ddf->phys->entries[i].state)
2165 & DDF_Failed))
2166 map[i] = 1;
2167 }
2168 }
2169 }
2170
2171 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2172 char *update,
2173 char *devname, int verbose,
2174 int uuid_set, char *homehost)
2175 {
2176 /* For 'assemble' and 'force' we need to return non-zero if any
2177 * change was made. For others, the return value is ignored.
2178 * Update options are:
2179 * force-one : This device looks a bit old but needs to be included,
2180 * update age info appropriately.
2181 * assemble: clear any 'faulty' flag to allow this device to
2182 * be assembled.
2183 * force-array: Array is degraded but being forced, mark it clean
2184 * if that will be needed to assemble it.
2185 *
2186 * newdev: not used ????
2187 * grow: Array has gained a new device - this is currently for
2188 * linear only
2189 * resync: mark as dirty so a resync will happen.
2190 * uuid: Change the uuid of the array to match what is given
2191 * homehost: update the recorded homehost
2192 * name: update the name - preserving the homehost
2193 * _reshape_progress: record new reshape_progress position.
2194 *
2195 * Following are not relevant for this version:
2196 * sparc2.2 : update from old dodgey metadata
2197 * super-minor: change the preferred_minor number
2198 * summaries: update redundant counters.
2199 */
2200 int rv = 0;
2201 // struct ddf_super *ddf = st->sb;
2202 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2203 // struct virtual_entry *ve = find_ve(ddf);
2204
2205 /* we don't need to handle "force-*" or "assemble" as
2206 * there is no need to 'trick' the kernel. When the metadata is
2207 * first updated to activate the array, all the implied modifications
2208 * will just happen.
2209 */
2210
2211 if (strcmp(update, "grow") == 0) {
2212 /* FIXME */
2213 } else if (strcmp(update, "resync") == 0) {
2214 // info->resync_checkpoint = 0;
2215 } else if (strcmp(update, "homehost") == 0) {
2216 /* homehost is stored in controller->vendor_data,
2217 * or it is when we are the vendor
2218 */
2219 // if (info->vendor_is_local)
2220 // strcpy(ddf->controller.vendor_data, homehost);
2221 rv = -1;
2222 } else if (strcmp(update, "name") == 0) {
2223 /* name is stored in virtual_entry->name */
2224 // memset(ve->name, ' ', 16);
2225 // strncpy(ve->name, info->name, 16);
2226 rv = -1;
2227 } else if (strcmp(update, "_reshape_progress") == 0) {
2228 /* We don't support reshape yet */
2229 } else if (strcmp(update, "assemble") == 0 ) {
2230 /* Do nothing, just succeed */
2231 rv = 0;
2232 } else
2233 rv = -1;
2234
2235 // update_all_csum(ddf);
2236
2237 return rv;
2238 }
2239
2240 static void make_header_guid(char *guid)
2241 {
2242 be32 stamp;
2243 /* Create a DDF Header of Virtual Disk GUID */
2244
2245 /* 24 bytes of fiction required.
2246 * first 8 are a 'vendor-id' - "Linux-MD"
2247 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2248 * Remaining 8 random number plus timestamp
2249 */
2250 memcpy(guid, T10, sizeof(T10));
2251 stamp = cpu_to_be32(0xdeadbeef);
2252 memcpy(guid+8, &stamp, 4);
2253 stamp = cpu_to_be32(0);
2254 memcpy(guid+12, &stamp, 4);
2255 stamp = cpu_to_be32(time(0) - DECADE);
2256 memcpy(guid+16, &stamp, 4);
2257 stamp._v32 = random32();
2258 memcpy(guid+20, &stamp, 4);
2259 }
2260
2261 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2262 {
2263 unsigned int i;
2264 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2265 if (all_ff(ddf->virt->entries[i].guid))
2266 return i;
2267 }
2268 return DDF_NOTFOUND;
2269 }
2270
2271 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2272 const char *name)
2273 {
2274 unsigned int i;
2275 if (name == NULL)
2276 return DDF_NOTFOUND;
2277 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2278 if (all_ff(ddf->virt->entries[i].guid))
2279 continue;
2280 if (!strncmp(name, ddf->virt->entries[i].name,
2281 sizeof(ddf->virt->entries[i].name)))
2282 return i;
2283 }
2284 return DDF_NOTFOUND;
2285 }
2286
2287 #ifndef MDASSEMBLE
2288 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2289 const char *guid)
2290 {
2291 unsigned int i;
2292 if (guid == NULL || all_ff(guid))
2293 return DDF_NOTFOUND;
2294 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2295 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2296 return i;
2297 return DDF_NOTFOUND;
2298 }
2299 #endif
2300
2301 static int init_super_ddf(struct supertype *st,
2302 mdu_array_info_t *info,
2303 unsigned long long size, char *name, char *homehost,
2304 int *uuid, unsigned long long data_offset)
2305 {
2306 /* This is primarily called by Create when creating a new array.
2307 * We will then get add_to_super called for each component, and then
2308 * write_init_super called to write it out to each device.
2309 * For DDF, Create can create on fresh devices or on a pre-existing
2310 * array.
2311 * To create on a pre-existing array a different method will be called.
2312 * This one is just for fresh drives.
2313 *
2314 * We need to create the entire 'ddf' structure which includes:
2315 * DDF headers - these are easy.
2316 * Controller data - a Sector describing this controller .. not that
2317 * this is a controller exactly.
2318 * Physical Disk Record - one entry per device, so
2319 * leave plenty of space.
2320 * Virtual Disk Records - again, just leave plenty of space.
2321 * This just lists VDs, doesn't give details.
2322 * Config records - describe the VDs that use this disk
2323 * DiskData - describes 'this' device.
2324 * BadBlockManagement - empty
2325 * Diag Space - empty
2326 * Vendor Logs - Could we put bitmaps here?
2327 *
2328 */
2329 struct ddf_super *ddf;
2330 char hostname[17];
2331 int hostlen;
2332 int max_phys_disks, max_virt_disks;
2333 unsigned long long sector;
2334 int clen;
2335 int i;
2336 int pdsize, vdsize;
2337 struct phys_disk *pd;
2338 struct virtual_disk *vd;
2339
2340 if (data_offset != INVALID_SECTORS) {
2341 pr_err("data-offset not supported by DDF\n");
2342 return 0;
2343 }
2344
2345 if (st->sb)
2346 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2347 data_offset);
2348
2349 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2350 pr_err("%s could not allocate superblock\n", __func__);
2351 return 0;
2352 }
2353 memset(ddf, 0, sizeof(*ddf));
2354 st->sb = ddf;
2355
2356 if (info == NULL) {
2357 /* zeroing superblock */
2358 return 0;
2359 }
2360
2361 /* At least 32MB *must* be reserved for the ddf. So let's just
2362 * start 32MB from the end, and put the primary header there.
2363 * Don't do secondary for now.
2364 * We don't know exactly where that will be yet as it could be
2365 * different on each device. So just set up the lengths.
2366 */
2367
2368 ddf->anchor.magic = DDF_HEADER_MAGIC;
2369 make_header_guid(ddf->anchor.guid);
2370
2371 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2372 ddf->anchor.seq = cpu_to_be32(1);
2373 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2374 ddf->anchor.openflag = 0xFF;
2375 ddf->anchor.foreignflag = 0;
2376 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2377 ddf->anchor.pad0 = 0xff;
2378 memset(ddf->anchor.pad1, 0xff, 12);
2379 memset(ddf->anchor.header_ext, 0xff, 32);
2380 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2381 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2382 ddf->anchor.type = DDF_HEADER_ANCHOR;
2383 memset(ddf->anchor.pad2, 0xff, 3);
2384 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2385 /* Put this at bottom of 32M reserved.. */
2386 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2387 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2388 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2389 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2390 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2391 ddf->max_part = 64;
2392 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2393 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2394 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2395 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2396 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2397 memset(ddf->anchor.pad3, 0xff, 54);
2398 /* Controller section is one sector long immediately
2399 * after the ddf header */
2400 sector = 1;
2401 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2402 ddf->anchor.controller_section_length = cpu_to_be32(1);
2403 sector += 1;
2404
2405 /* phys is 8 sectors after that */
2406 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2407 sizeof(struct phys_disk_entry)*max_phys_disks,
2408 512);
2409 switch(pdsize/512) {
2410 case 2: case 8: case 32: case 128: case 512: break;
2411 default: abort();
2412 }
2413 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2414 ddf->anchor.phys_section_length =
2415 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2416 sector += pdsize/512;
2417
2418 /* virt is another 32 sectors */
2419 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2420 sizeof(struct virtual_entry) * max_virt_disks,
2421 512);
2422 switch(vdsize/512) {
2423 case 2: case 8: case 32: case 128: case 512: break;
2424 default: abort();
2425 }
2426 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2427 ddf->anchor.virt_section_length =
2428 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2429 sector += vdsize/512;
2430
2431 clen = ddf->conf_rec_len * (ddf->max_part+1);
2432 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2433 ddf->anchor.config_section_length = cpu_to_be32(clen);
2434 sector += clen;
2435
2436 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2437 ddf->anchor.data_section_length = cpu_to_be32(1);
2438 sector += 1;
2439
2440 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2441 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2442 ddf->anchor.diag_space_length = cpu_to_be32(0);
2443 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2444 ddf->anchor.vendor_length = cpu_to_be32(0);
2445 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2446
2447 memset(ddf->anchor.pad4, 0xff, 256);
2448
2449 memcpy(&ddf->primary, &ddf->anchor, 512);
2450 memcpy(&ddf->secondary, &ddf->anchor, 512);
2451
2452 ddf->primary.openflag = 1; /* I guess.. */
2453 ddf->primary.type = DDF_HEADER_PRIMARY;
2454
2455 ddf->secondary.openflag = 1; /* I guess.. */
2456 ddf->secondary.type = DDF_HEADER_SECONDARY;
2457
2458 ddf->active = &ddf->primary;
2459
2460 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2461
2462 /* 24 more bytes of fiction required.
2463 * first 8 are a 'vendor-id' - "Linux-MD"
2464 * Remaining 16 are serial number.... maybe a hostname would do?
2465 */
2466 memcpy(ddf->controller.guid, T10, sizeof(T10));
2467 gethostname(hostname, sizeof(hostname));
2468 hostname[sizeof(hostname) - 1] = 0;
2469 hostlen = strlen(hostname);
2470 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2471 for (i = strlen(T10) ; i+hostlen < 24; i++)
2472 ddf->controller.guid[i] = ' ';
2473
2474 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2475 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2476 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2477 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2478 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2479 memset(ddf->controller.pad, 0xff, 8);
2480 memset(ddf->controller.vendor_data, 0xff, 448);
2481 if (homehost && strlen(homehost) < 440)
2482 strcpy((char*)ddf->controller.vendor_data, homehost);
2483
2484 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2485 pr_err("%s could not allocate pd\n", __func__);
2486 return 0;
2487 }
2488 ddf->phys = pd;
2489 ddf->pdsize = pdsize;
2490
2491 memset(pd, 0xff, pdsize);
2492 memset(pd, 0, sizeof(*pd));
2493 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2494 pd->used_pdes = cpu_to_be16(0);
2495 pd->max_pdes = cpu_to_be16(max_phys_disks);
2496 memset(pd->pad, 0xff, 52);
2497 for (i = 0; i < max_phys_disks; i++)
2498 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2499
2500 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2501 pr_err("%s could not allocate vd\n", __func__);
2502 return 0;
2503 }
2504 ddf->virt = vd;
2505 ddf->vdsize = vdsize;
2506 memset(vd, 0, vdsize);
2507 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2508 vd->populated_vdes = cpu_to_be16(0);
2509 vd->max_vdes = cpu_to_be16(max_virt_disks);
2510 memset(vd->pad, 0xff, 52);
2511
2512 for (i=0; i<max_virt_disks; i++)
2513 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2514
2515 st->sb = ddf;
2516 ddf_set_updates_pending(ddf, NULL);
2517 return 1;
2518 }
2519
2520 static int chunk_to_shift(int chunksize)
2521 {
2522 return ffs(chunksize/512)-1;
2523 }
2524
2525 #ifndef MDASSEMBLE
2526 struct extent {
2527 unsigned long long start, size;
2528 };
2529 static int cmp_extent(const void *av, const void *bv)
2530 {
2531 const struct extent *a = av;
2532 const struct extent *b = bv;
2533 if (a->start < b->start)
2534 return -1;
2535 if (a->start > b->start)
2536 return 1;
2537 return 0;
2538 }
2539
2540 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2541 {
2542 /* Find a list of used extents on the give physical device
2543 * (dnum) of the given ddf.
2544 * Return a malloced array of 'struct extent'
2545 */
2546 struct extent *rv;
2547 int n = 0;
2548 unsigned int i;
2549 __u16 state;
2550
2551 if (dl->pdnum < 0)
2552 return NULL;
2553 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2554
2555 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2556 return NULL;
2557
2558 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2559
2560 for (i = 0; i < ddf->max_part; i++) {
2561 const struct vd_config *bvd;
2562 unsigned int ibvd;
2563 struct vcl *v = dl->vlist[i];
2564 if (v == NULL ||
2565 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2566 &bvd, &ibvd) == DDF_NOTFOUND)
2567 continue;
2568 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2569 rv[n].size = be64_to_cpu(bvd->blocks);
2570 n++;
2571 }
2572 qsort(rv, n, sizeof(*rv), cmp_extent);
2573
2574 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2575 rv[n].size = 0;
2576 return rv;
2577 }
2578 #endif
2579
2580 static int init_super_ddf_bvd(struct supertype *st,
2581 mdu_array_info_t *info,
2582 unsigned long long size,
2583 char *name, char *homehost,
2584 int *uuid, unsigned long long data_offset)
2585 {
2586 /* We are creating a BVD inside a pre-existing container.
2587 * so st->sb is already set.
2588 * We need to create a new vd_config and a new virtual_entry
2589 */
2590 struct ddf_super *ddf = st->sb;
2591 unsigned int venum, i;
2592 struct virtual_entry *ve;
2593 struct vcl *vcl;
2594 struct vd_config *vc;
2595
2596 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2597 pr_err("This ddf already has an array called %s\n", name);
2598 return 0;
2599 }
2600 venum = find_unused_vde(ddf);
2601 if (venum == DDF_NOTFOUND) {
2602 pr_err("Cannot find spare slot for virtual disk\n");
2603 return 0;
2604 }
2605 ve = &ddf->virt->entries[venum];
2606
2607 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2608 * timestamp, random number
2609 */
2610 make_header_guid(ve->guid);
2611 ve->unit = cpu_to_be16(info->md_minor);
2612 ve->pad0 = 0xFFFF;
2613 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2614 DDF_GUID_LEN);
2615 ve->type = cpu_to_be16(0);
2616 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2617 if (info->state & 1) /* clean */
2618 ve->init_state = DDF_init_full;
2619 else
2620 ve->init_state = DDF_init_not;
2621
2622 memset(ve->pad1, 0xff, 14);
2623 memset(ve->name, ' ', 16);
2624 if (name)
2625 strncpy(ve->name, name, 16);
2626 ddf->virt->populated_vdes =
2627 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2628
2629 /* Now create a new vd_config */
2630 if (posix_memalign((void**)&vcl, 512,
2631 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2632 pr_err("%s could not allocate vd_config\n", __func__);
2633 return 0;
2634 }
2635 vcl->vcnum = venum;
2636 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2637 vc = &vcl->conf;
2638
2639 vc->magic = DDF_VD_CONF_MAGIC;
2640 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2641 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2642 vc->seqnum = cpu_to_be32(1);
2643 memset(vc->pad0, 0xff, 24);
2644 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2645 if (layout_md2ddf(info, vc) == -1 ||
2646 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2647 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2648 __func__, info->level, info->layout, info->raid_disks);
2649 free(vcl);
2650 return 0;
2651 }
2652 vc->sec_elmnt_seq = 0;
2653 if (alloc_other_bvds(ddf, vcl) != 0) {
2654 pr_err("%s could not allocate other bvds\n",
2655 __func__);
2656 free(vcl);
2657 return 0;
2658 }
2659 vc->blocks = cpu_to_be64(info->size * 2);
2660 vc->array_blocks = cpu_to_be64(
2661 calc_array_size(info->level, info->raid_disks, info->layout,
2662 info->chunk_size, info->size*2));
2663 memset(vc->pad1, 0xff, 8);
2664 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2665 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2666 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2667 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2668 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2669 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2670 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2671 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2672 memset(vc->cache_pol, 0, 8);
2673 vc->bg_rate = 0x80;
2674 memset(vc->pad2, 0xff, 3);
2675 memset(vc->pad3, 0xff, 52);
2676 memset(vc->pad4, 0xff, 192);
2677 memset(vc->v0, 0xff, 32);
2678 memset(vc->v1, 0xff, 32);
2679 memset(vc->v2, 0xff, 16);
2680 memset(vc->v3, 0xff, 16);
2681 memset(vc->vendor, 0xff, 32);
2682
2683 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2684 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2685
2686 for (i = 1; i < vc->sec_elmnt_count; i++) {
2687 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2688 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2689 }
2690
2691 vcl->next = ddf->conflist;
2692 ddf->conflist = vcl;
2693 ddf->currentconf = vcl;
2694 ddf_set_updates_pending(ddf, NULL);
2695 return 1;
2696 }
2697
2698 #ifndef MDASSEMBLE
2699 static void add_to_super_ddf_bvd(struct supertype *st,
2700 mdu_disk_info_t *dk, int fd, char *devname)
2701 {
2702 /* fd and devname identify a device within the ddf container (st).
2703 * dk identifies a location in the new BVD.
2704 * We need to find suitable free space in that device and update
2705 * the phys_refnum and lba_offset for the newly created vd_config.
2706 * We might also want to update the type in the phys_disk
2707 * section.
2708 *
2709 * Alternately: fd == -1 and we have already chosen which device to
2710 * use and recorded in dlist->raid_disk;
2711 */
2712 struct dl *dl;
2713 struct ddf_super *ddf = st->sb;
2714 struct vd_config *vc;
2715 unsigned int i;
2716 unsigned long long blocks, pos, esize;
2717 struct extent *ex;
2718 unsigned int raid_disk = dk->raid_disk;
2719
2720 if (fd == -1) {
2721 for (dl = ddf->dlist; dl ; dl = dl->next)
2722 if (dl->raiddisk == dk->raid_disk)
2723 break;
2724 } else {
2725 for (dl = ddf->dlist; dl ; dl = dl->next)
2726 if (dl->major == dk->major &&
2727 dl->minor == dk->minor)
2728 break;
2729 }
2730 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2731 return;
2732
2733 vc = &ddf->currentconf->conf;
2734 if (vc->sec_elmnt_count > 1) {
2735 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2736 if (raid_disk >= n)
2737 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2738 raid_disk %= n;
2739 }
2740
2741 ex = get_extents(ddf, dl);
2742 if (!ex)
2743 return;
2744
2745 i = 0; pos = 0;
2746 blocks = be64_to_cpu(vc->blocks);
2747 if (ddf->currentconf->block_sizes)
2748 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2749
2750 /* First-fit */
2751 do {
2752 esize = ex[i].start - pos;
2753 if (esize >= blocks)
2754 break;
2755 pos = ex[i].start + ex[i].size;
2756 i++;
2757 } while (ex[i-1].size);
2758
2759 free(ex);
2760 if (esize < blocks)
2761 return;
2762
2763 ddf->currentdev = dk->raid_disk;
2764 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2765 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2766
2767 for (i = 0; i < ddf->max_part ; i++)
2768 if (dl->vlist[i] == NULL)
2769 break;
2770 if (i == ddf->max_part)
2771 return;
2772 dl->vlist[i] = ddf->currentconf;
2773
2774 if (fd >= 0)
2775 dl->fd = fd;
2776 if (devname)
2777 dl->devname = devname;
2778
2779 /* Check if we can mark array as optimal yet */
2780 i = ddf->currentconf->vcnum;
2781 ddf->virt->entries[i].state =
2782 (ddf->virt->entries[i].state & ~DDF_state_mask)
2783 | get_svd_state(ddf, ddf->currentconf);
2784 be16_clear(ddf->phys->entries[dl->pdnum].type,
2785 cpu_to_be16(DDF_Global_Spare));
2786 be16_set(ddf->phys->entries[dl->pdnum].type,
2787 cpu_to_be16(DDF_Active_in_VD));
2788 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2789 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2790 ddf->currentconf->vcnum, guid_str(vc->guid),
2791 dk->raid_disk);
2792 ddf_set_updates_pending(ddf, vc);
2793 }
2794
2795 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2796 {
2797 unsigned int i;
2798 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2799 if (all_ff(ddf->phys->entries[i].guid))
2800 return i;
2801 }
2802 return DDF_NOTFOUND;
2803 }
2804
2805 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2806 {
2807 __u64 cfs, t;
2808 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2809 t = be64_to_cpu(dl->secondary_lba);
2810 if (t != ~(__u64)0)
2811 cfs = min(cfs, t);
2812 /*
2813 * Some vendor DDF structures interpret workspace_lba
2814 * very differently than we do: Make a sanity check on the value.
2815 */
2816 t = be64_to_cpu(dl->workspace_lba);
2817 if (t < cfs) {
2818 __u64 wsp = cfs - t;
2819 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2820 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2821 __func__, dl->major, dl->minor, wsp);
2822 } else
2823 cfs = t;
2824 }
2825 pde->config_size = cpu_to_be64(cfs);
2826 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2827 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2828 }
2829
2830 /* Add a device to a container, either while creating it or while
2831 * expanding a pre-existing container
2832 */
2833 static int add_to_super_ddf(struct supertype *st,
2834 mdu_disk_info_t *dk, int fd, char *devname,
2835 unsigned long long data_offset)
2836 {
2837 struct ddf_super *ddf = st->sb;
2838 struct dl *dd;
2839 time_t now;
2840 struct tm *tm;
2841 unsigned long long size;
2842 struct phys_disk_entry *pde;
2843 unsigned int n, i;
2844 struct stat stb;
2845 __u32 *tptr;
2846
2847 if (ddf->currentconf) {
2848 add_to_super_ddf_bvd(st, dk, fd, devname);
2849 return 0;
2850 }
2851
2852 /* This is device numbered dk->number. We need to create
2853 * a phys_disk entry and a more detailed disk_data entry.
2854 */
2855 fstat(fd, &stb);
2856 n = find_unused_pde(ddf);
2857 if (n == DDF_NOTFOUND) {
2858 pr_err("%s: No free slot in array, cannot add disk\n",
2859 __func__);
2860 return 1;
2861 }
2862 pde = &ddf->phys->entries[n];
2863 get_dev_size(fd, NULL, &size);
2864 if (size <= 32*1024*1024) {
2865 pr_err("%s: device size must be at least 32MB\n",
2866 __func__);
2867 return 1;
2868 }
2869 size >>= 9;
2870
2871 if (posix_memalign((void**)&dd, 512,
2872 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2873 pr_err("%s could allocate buffer for new disk, aborting\n",
2874 __func__);
2875 return 1;
2876 }
2877 dd->major = major(stb.st_rdev);
2878 dd->minor = minor(stb.st_rdev);
2879 dd->devname = devname;
2880 dd->fd = fd;
2881 dd->spare = NULL;
2882
2883 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2884 now = time(0);
2885 tm = localtime(&now);
2886 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2887 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2888 tptr = (__u32 *)(dd->disk.guid + 16);
2889 *tptr++ = random32();
2890 *tptr = random32();
2891
2892 do {
2893 /* Cannot be bothered finding a CRC of some irrelevant details*/
2894 dd->disk.refnum._v32 = random32();
2895 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2896 i > 0; i--)
2897 if (be32_eq(ddf->phys->entries[i-1].refnum,
2898 dd->disk.refnum))
2899 break;
2900 } while (i > 0);
2901
2902 dd->disk.forced_ref = 1;
2903 dd->disk.forced_guid = 1;
2904 memset(dd->disk.vendor, ' ', 32);
2905 memcpy(dd->disk.vendor, "Linux", 5);
2906 memset(dd->disk.pad, 0xff, 442);
2907 for (i = 0; i < ddf->max_part ; i++)
2908 dd->vlist[i] = NULL;
2909
2910 dd->pdnum = n;
2911
2912 if (st->update_tail) {
2913 int len = (sizeof(struct phys_disk) +
2914 sizeof(struct phys_disk_entry));
2915 struct phys_disk *pd;
2916
2917 pd = xmalloc(len);
2918 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2919 pd->used_pdes = cpu_to_be16(n);
2920 pde = &pd->entries[0];
2921 dd->mdupdate = pd;
2922 } else
2923 ddf->phys->used_pdes = cpu_to_be16(
2924 1 + be16_to_cpu(ddf->phys->used_pdes));
2925
2926 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2927 pde->refnum = dd->disk.refnum;
2928 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2929 pde->state = cpu_to_be16(DDF_Online);
2930 dd->size = size;
2931 /*
2932 * If there is already a device in dlist, try to reserve the same
2933 * amount of workspace. Otherwise, use 32MB.
2934 * We checked disk size above already.
2935 */
2936 #define __calc_lba(new, old, lba, mb) do { \
2937 unsigned long long dif; \
2938 if ((old) != NULL) \
2939 dif = (old)->size - be64_to_cpu((old)->lba); \
2940 else \
2941 dif = (new)->size; \
2942 if ((new)->size > dif) \
2943 (new)->lba = cpu_to_be64((new)->size - dif); \
2944 else \
2945 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2946 } while (0)
2947 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2948 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2949 if (ddf->dlist == NULL ||
2950 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2951 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2952 _set_config_size(pde, dd);
2953
2954 sprintf(pde->path, "%17.17s","Information: nil") ;
2955 memset(pde->pad, 0xff, 6);
2956
2957 if (st->update_tail) {
2958 dd->next = ddf->add_list;
2959 ddf->add_list = dd;
2960 } else {
2961 dd->next = ddf->dlist;
2962 ddf->dlist = dd;
2963 ddf_set_updates_pending(ddf, NULL);
2964 }
2965
2966 return 0;
2967 }
2968
2969 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2970 {
2971 struct ddf_super *ddf = st->sb;
2972 struct dl *dl;
2973
2974 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2975 * disappeared from the container.
2976 * We need to arrange that it disappears from the metadata and
2977 * internal data structures too.
2978 * Most of the work is done by ddf_process_update which edits
2979 * the metadata and closes the file handle and attaches the memory
2980 * where free_updates will free it.
2981 */
2982 for (dl = ddf->dlist; dl ; dl = dl->next)
2983 if (dl->major == dk->major &&
2984 dl->minor == dk->minor)
2985 break;
2986 if (!dl || dl->pdnum < 0)
2987 return -1;
2988
2989 if (st->update_tail) {
2990 int len = (sizeof(struct phys_disk) +
2991 sizeof(struct phys_disk_entry));
2992 struct phys_disk *pd;
2993
2994 pd = xmalloc(len);
2995 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2996 pd->used_pdes = cpu_to_be16(dl->pdnum);
2997 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2998 append_metadata_update(st, pd, len);
2999 }
3000 return 0;
3001 }
3002 #endif
3003
3004 /*
3005 * This is the write_init_super method for a ddf container. It is
3006 * called when creating a container or adding another device to a
3007 * container.
3008 */
3009
3010 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
3011 {
3012 unsigned long long sector;
3013 struct ddf_header *header;
3014 int fd, i, n_config, conf_size, buf_size;
3015 int ret = 0;
3016 char *conf;
3017
3018 fd = d->fd;
3019
3020 switch (type) {
3021 case DDF_HEADER_PRIMARY:
3022 header = &ddf->primary;
3023 sector = be64_to_cpu(header->primary_lba);
3024 break;
3025 case DDF_HEADER_SECONDARY:
3026 header = &ddf->secondary;
3027 sector = be64_to_cpu(header->secondary_lba);
3028 break;
3029 default:
3030 return 0;
3031 }
3032 if (sector == ~(__u64)0)
3033 return 0;
3034
3035 header->type = type;
3036 header->openflag = 1;
3037 header->crc = calc_crc(header, 512);
3038
3039 lseek64(fd, sector<<9, 0);
3040 if (write(fd, header, 512) < 0)
3041 goto out;
3042
3043 ddf->controller.crc = calc_crc(&ddf->controller, 512);
3044 if (write(fd, &ddf->controller, 512) < 0)
3045 goto out;
3046
3047 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
3048 if (write(fd, ddf->phys, ddf->pdsize) < 0)
3049 goto out;
3050 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
3051 if (write(fd, ddf->virt, ddf->vdsize) < 0)
3052 goto out;
3053
3054 /* Now write lots of config records. */
3055 n_config = ddf->max_part;
3056 conf_size = ddf->conf_rec_len * 512;
3057 conf = ddf->conf;
3058 buf_size = conf_size * (n_config + 1);
3059 if (!conf) {
3060 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
3061 goto out;
3062 ddf->conf = conf;
3063 }
3064 for (i = 0 ; i <= n_config ; i++) {
3065 struct vcl *c;
3066 struct vd_config *vdc = NULL;
3067 if (i == n_config) {
3068 c = (struct vcl *)d->spare;
3069 if (c)
3070 vdc = &c->conf;
3071 } else {
3072 unsigned int dummy;
3073 c = d->vlist[i];
3074 if (c)
3075 get_pd_index_from_refnum(
3076 c, d->disk.refnum,
3077 ddf->mppe,
3078 (const struct vd_config **)&vdc,
3079 &dummy);
3080 }
3081 if (vdc) {
3082 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3083 i, be32_to_cpu(d->disk.refnum),
3084 guid_str(vdc->guid),
3085 vdc->sec_elmnt_seq);
3086 vdc->crc = calc_crc(vdc, conf_size);
3087 memcpy(conf + i*conf_size, vdc, conf_size);
3088 } else
3089 memset(conf + i*conf_size, 0xff, conf_size);
3090 }
3091 if (write(fd, conf, buf_size) != buf_size)
3092 goto out;
3093
3094 d->disk.crc = calc_crc(&d->disk, 512);
3095 if (write(fd, &d->disk, 512) < 0)
3096 goto out;
3097
3098 ret = 1;
3099 out:
3100 header->openflag = 0;
3101 header->crc = calc_crc(header, 512);
3102
3103 lseek64(fd, sector<<9, 0);
3104 if (write(fd, header, 512) < 0)
3105 ret = 0;
3106
3107 return ret;
3108 }
3109
3110 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3111 {
3112 unsigned long long size;
3113 int fd = d->fd;
3114 if (fd < 0)
3115 return 0;
3116
3117 /* We need to fill in the primary, (secondary) and workspace
3118 * lba's in the headers, set their checksums,
3119 * Also checksum phys, virt....
3120 *
3121 * Then write everything out, finally the anchor is written.
3122 */
3123 get_dev_size(fd, NULL, &size);
3124 size /= 512;
3125 memcpy(&ddf->anchor, ddf->active, 512);
3126 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3127 ddf->anchor.workspace_lba = d->workspace_lba;
3128 else
3129 ddf->anchor.workspace_lba =
3130 cpu_to_be64(size - 32*1024*2);
3131 if (be64_to_cpu(d->primary_lba) != 0ULL)
3132 ddf->anchor.primary_lba = d->primary_lba;
3133 else
3134 ddf->anchor.primary_lba =
3135 cpu_to_be64(size - 16*1024*2);
3136 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3137 ddf->anchor.secondary_lba = d->secondary_lba;
3138 else
3139 ddf->anchor.secondary_lba =
3140 cpu_to_be64(size - 32*1024*2);
3141 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
3142 memcpy(&ddf->primary, &ddf->anchor, 512);
3143 memcpy(&ddf->secondary, &ddf->anchor, 512);
3144
3145 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3146 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3147 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3148
3149 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3150 return 0;
3151
3152 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3153 return 0;
3154
3155 lseek64(fd, (size-1)*512, SEEK_SET);
3156 if (write(fd, &ddf->anchor, 512) < 0)
3157 return 0;
3158
3159 return 1;
3160 }
3161
3162 #ifndef MDASSEMBLE
3163 static int __write_init_super_ddf(struct supertype *st)
3164 {
3165 struct ddf_super *ddf = st->sb;
3166 struct dl *d;
3167 int attempts = 0;
3168 int successes = 0;
3169
3170 pr_state(ddf, __func__);
3171
3172 /* try to write updated metadata,
3173 * if we catch a failure move on to the next disk
3174 */
3175 for (d = ddf->dlist; d; d=d->next) {
3176 attempts++;
3177 successes += _write_super_to_disk(ddf, d);
3178 }
3179
3180 return attempts != successes;
3181 }
3182
3183 static int write_init_super_ddf(struct supertype *st)
3184 {
3185 struct ddf_super *ddf = st->sb;
3186 struct vcl *currentconf = ddf->currentconf;
3187
3188 /* We are done with currentconf - reset it so st refers to the container */
3189 ddf->currentconf = NULL;
3190
3191 if (st->update_tail) {
3192 /* queue the virtual_disk and vd_config as metadata updates */
3193 struct virtual_disk *vd;
3194 struct vd_config *vc;
3195 int len, tlen;
3196 unsigned int i;
3197
3198 if (!currentconf) {
3199 /* Must be adding a physical disk to the container */
3200 int len = (sizeof(struct phys_disk) +
3201 sizeof(struct phys_disk_entry));
3202
3203 /* adding a disk to the container. */
3204 if (!ddf->add_list)
3205 return 0;
3206
3207 append_metadata_update(st, ddf->add_list->mdupdate, len);
3208 ddf->add_list->mdupdate = NULL;
3209 return 0;
3210 }
3211
3212 /* Newly created VD */
3213
3214 /* First the virtual disk. We have a slightly fake header */
3215 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3216 vd = xmalloc(len);
3217 *vd = *ddf->virt;
3218 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3219 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3220 append_metadata_update(st, vd, len);
3221
3222 /* Then the vd_config */
3223 len = ddf->conf_rec_len * 512;
3224 tlen = len * currentconf->conf.sec_elmnt_count;
3225 vc = xmalloc(tlen);
3226 memcpy(vc, &currentconf->conf, len);
3227 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3228 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3229 len);
3230 append_metadata_update(st, vc, tlen);
3231
3232 /* FIXME I need to close the fds! */
3233 return 0;
3234 } else {
3235 struct dl *d;
3236 if (!currentconf)
3237 for (d = ddf->dlist; d; d=d->next)
3238 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3239 return __write_init_super_ddf(st);
3240 }
3241 }
3242
3243 #endif
3244
3245 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3246 unsigned long long data_offset)
3247 {
3248 /* We must reserve the last 32Meg */
3249 if (devsize <= 32*1024*2)
3250 return 0;
3251 return devsize - 32*1024*2;
3252 }
3253
3254 #ifndef MDASSEMBLE
3255
3256 static int reserve_space(struct supertype *st, int raiddisks,
3257 unsigned long long size, int chunk,
3258 unsigned long long *freesize)
3259 {
3260 /* Find 'raiddisks' spare extents at least 'size' big (but
3261 * only caring about multiples of 'chunk') and remember
3262 * them. If size==0, find the largest size possible.
3263 * Report available size in *freesize
3264 * If space cannot be found, fail.
3265 */
3266 struct dl *dl;
3267 struct ddf_super *ddf = st->sb;
3268 int cnt = 0;
3269
3270 for (dl = ddf->dlist; dl ; dl=dl->next) {
3271 dl->raiddisk = -1;
3272 dl->esize = 0;
3273 }
3274 /* Now find largest extent on each device */
3275 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3276 struct extent *e = get_extents(ddf, dl);
3277 unsigned long long pos = 0;
3278 int i = 0;
3279 int found = 0;
3280 unsigned long long minsize = size;
3281
3282 if (size == 0)
3283 minsize = chunk;
3284
3285 if (!e)
3286 continue;
3287 do {
3288 unsigned long long esize;
3289 esize = e[i].start - pos;
3290 if (esize >= minsize) {
3291 found = 1;
3292 minsize = esize;
3293 }
3294 pos = e[i].start + e[i].size;
3295 i++;
3296 } while (e[i-1].size);
3297 if (found) {
3298 cnt++;
3299 dl->esize = minsize;
3300 }
3301 free(e);
3302 }
3303 if (cnt < raiddisks) {
3304 pr_err("not enough devices with space to create array.\n");
3305 return 0; /* No enough free spaces large enough */
3306 }
3307 if (size == 0) {
3308 /* choose the largest size of which there are at least 'raiddisk' */
3309 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3310 struct dl *dl2;
3311 if (dl->esize <= size)
3312 continue;
3313 /* This is bigger than 'size', see if there are enough */
3314 cnt = 0;
3315 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3316 if (dl2->esize >= dl->esize)
3317 cnt++;
3318 if (cnt >= raiddisks)
3319 size = dl->esize;
3320 }
3321 if (chunk) {
3322 size = size / chunk;
3323 size *= chunk;
3324 }
3325 *freesize = size;
3326 if (size < 32) {
3327 pr_err("not enough spare devices to create array.\n");
3328 return 0;
3329 }
3330 }
3331 /* We have a 'size' of which there are enough spaces.
3332 * We simply do a first-fit */
3333 cnt = 0;
3334 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3335 if (dl->esize < size)
3336 continue;
3337
3338 dl->raiddisk = cnt;
3339 cnt++;
3340 }
3341 return 1;
3342 }
3343
3344 static int validate_geometry_ddf(struct supertype *st,
3345 int level, int layout, int raiddisks,
3346 int *chunk, unsigned long long size,
3347 unsigned long long data_offset,
3348 char *dev, unsigned long long *freesize,
3349 int verbose)
3350 {
3351 int fd;
3352 struct mdinfo *sra;
3353 int cfd;
3354
3355 /* ddf potentially supports lots of things, but it depends on
3356 * what devices are offered (and maybe kernel version?)
3357 * If given unused devices, we will make a container.
3358 * If given devices in a container, we will make a BVD.
3359 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3360 */
3361
3362 if (*chunk == UnSet)
3363 *chunk = DEFAULT_CHUNK;
3364
3365 if (level == LEVEL_NONE)
3366 level = LEVEL_CONTAINER;
3367 if (level == LEVEL_CONTAINER) {
3368 /* Must be a fresh device to add to a container */
3369 return validate_geometry_ddf_container(st, level, layout,
3370 raiddisks, *chunk,
3371 size, data_offset, dev,
3372 freesize,
3373 verbose);
3374 }
3375
3376 if (!dev) {
3377 mdu_array_info_t array = {
3378 .level = level,
3379 .layout = layout,
3380 .raid_disks = raiddisks
3381 };
3382 struct vd_config conf;
3383 if (layout_md2ddf(&array, &conf) == -1) {
3384 if (verbose)
3385 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3386 level, layout, raiddisks);
3387 return 0;
3388 }
3389 /* Should check layout? etc */
3390
3391 if (st->sb && freesize) {
3392 /* --create was given a container to create in.
3393 * So we need to check that there are enough
3394 * free spaces and return the amount of space.
3395 * We may as well remember which drives were
3396 * chosen so that add_to_super/getinfo_super
3397 * can return them.
3398 */
3399 return reserve_space(st, raiddisks, size, *chunk, freesize);
3400 }
3401 return 1;
3402 }
3403
3404 if (st->sb) {
3405 /* A container has already been opened, so we are
3406 * creating in there. Maybe a BVD, maybe an SVD.
3407 * Should make a distinction one day.
3408 */
3409 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3410 chunk, size, data_offset, dev,
3411 freesize,
3412 verbose);
3413 }
3414 /* This is the first device for the array.
3415 * If it is a container, we read it in and do automagic allocations,
3416 * no other devices should be given.
3417 * Otherwise it must be a member device of a container, and we
3418 * do manual allocation.
3419 * Later we should check for a BVD and make an SVD.
3420 */
3421 fd = open(dev, O_RDONLY|O_EXCL, 0);
3422 if (fd >= 0) {
3423 sra = sysfs_read(fd, NULL, GET_VERSION);
3424 close(fd);
3425 if (sra && sra->array.major_version == -1 &&
3426 strcmp(sra->text_version, "ddf") == 0) {
3427 /* load super */
3428 /* find space for 'n' devices. */
3429 /* remember the devices */
3430 /* Somehow return the fact that we have enough */
3431 }
3432
3433 if (verbose)
3434 pr_err("ddf: Cannot create this array "
3435 "on device %s - a container is required.\n",
3436 dev);
3437 return 0;
3438 }
3439 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3440 if (verbose)
3441 pr_err("ddf: Cannot open %s: %s\n",
3442 dev, strerror(errno));
3443 return 0;
3444 }
3445 /* Well, it is in use by someone, maybe a 'ddf' container. */
3446 cfd = open_container(fd);
3447 if (cfd < 0) {
3448 close(fd);
3449 if (verbose)
3450 pr_err("ddf: Cannot use %s: %s\n",
3451 dev, strerror(EBUSY));
3452 return 0;
3453 }
3454 sra = sysfs_read(cfd, NULL, GET_VERSION);
3455 close(fd);
3456 if (sra && sra->array.major_version == -1 &&
3457 strcmp(sra->text_version, "ddf") == 0) {
3458 /* This is a member of a ddf container. Load the container
3459 * and try to create a bvd
3460 */
3461 struct ddf_super *ddf;
3462 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3463 st->sb = ddf;
3464 strcpy(st->container_devnm, fd2devnm(cfd));
3465 close(cfd);
3466 return validate_geometry_ddf_bvd(st, level, layout,
3467 raiddisks, chunk, size,
3468 data_offset,
3469 dev, freesize,
3470 verbose);
3471 }
3472 close(cfd);
3473 } else /* device may belong to a different container */
3474 return 0;
3475
3476 return 1;
3477 }
3478
3479 static int
3480 validate_geometry_ddf_container(struct supertype *st,
3481 int level, int layout, int raiddisks,
3482 int chunk, unsigned long long size,
3483 unsigned long long data_offset,
3484 char *dev, unsigned long long *freesize,
3485 int verbose)
3486 {
3487 int fd;
3488 unsigned long long ldsize;
3489
3490 if (level != LEVEL_CONTAINER)
3491 return 0;
3492 if (!dev)
3493 return 1;
3494
3495 fd = open(dev, O_RDONLY|O_EXCL, 0);
3496 if (fd < 0) {
3497 if (verbose)
3498 pr_err("ddf: Cannot open %s: %s\n",
3499 dev, strerror(errno));
3500 return 0;
3501 }
3502 if (!get_dev_size(fd, dev, &ldsize)) {
3503 close(fd);
3504 return 0;
3505 }
3506 close(fd);
3507
3508 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3509 if (*freesize == 0)
3510 return 0;
3511
3512 return 1;
3513 }
3514
3515 static int validate_geometry_ddf_bvd(struct supertype *st,
3516 int level, int layout, int raiddisks,
3517 int *chunk, unsigned long long size,
3518 unsigned long long data_offset,
3519 char *dev, unsigned long long *freesize,
3520 int verbose)
3521 {
3522 struct stat stb;
3523 struct ddf_super *ddf = st->sb;
3524 struct dl *dl;
3525 unsigned long long pos = 0;
3526 unsigned long long maxsize;
3527 struct extent *e;
3528 int i;
3529 /* ddf/bvd supports lots of things, but not containers */
3530 if (level == LEVEL_CONTAINER) {
3531 if (verbose)
3532 pr_err("DDF cannot create a container within an container\n");
3533 return 0;
3534 }
3535 /* We must have the container info already read in. */
3536 if (!ddf)
3537 return 0;
3538
3539 if (!dev) {
3540 /* General test: make sure there is space for
3541 * 'raiddisks' device extents of size 'size'.
3542 */
3543 unsigned long long minsize = size;
3544 int dcnt = 0;
3545 if (minsize == 0)
3546 minsize = 8;
3547 for (dl = ddf->dlist; dl ; dl = dl->next) {
3548 int found = 0;
3549 pos = 0;
3550
3551 i = 0;
3552 e = get_extents(ddf, dl);
3553 if (!e) continue;
3554 do {
3555 unsigned long long esize;
3556 esize = e[i].start - pos;
3557 if (esize >= minsize)
3558 found = 1;
3559 pos = e[i].start + e[i].size;
3560 i++;
3561 } while (e[i-1].size);
3562 if (found)
3563 dcnt++;
3564 free(e);
3565 }
3566 if (dcnt < raiddisks) {
3567 if (verbose)
3568 pr_err("ddf: Not enough devices with "
3569 "space for this array (%d < %d)\n",
3570 dcnt, raiddisks);
3571 return 0;
3572 }
3573 return 1;
3574 }
3575 /* This device must be a member of the set */
3576 if (stat(dev, &stb) < 0)
3577 return 0;
3578 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3579 return 0;
3580 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3581 if (dl->major == (int)major(stb.st_rdev) &&
3582 dl->minor == (int)minor(stb.st_rdev))
3583 break;
3584 }
3585 if (!dl) {
3586 if (verbose)
3587 pr_err("ddf: %s is not in the "
3588 "same DDF set\n",
3589 dev);
3590 return 0;
3591 }
3592 e = get_extents(ddf, dl);
3593 maxsize = 0;
3594 i = 0;
3595 if (e)
3596 do {
3597 unsigned long long esize;
3598 esize = e[i].start - pos;
3599 if (esize >= maxsize)
3600 maxsize = esize;
3601 pos = e[i].start + e[i].size;
3602 i++;
3603 } while (e[i-1].size);
3604 *freesize = maxsize;
3605 // FIXME here I am
3606
3607 return 1;
3608 }
3609
3610 static int load_super_ddf_all(struct supertype *st, int fd,
3611 void **sbp, char *devname)
3612 {
3613 struct mdinfo *sra;
3614 struct ddf_super *super;
3615 struct mdinfo *sd, *best = NULL;
3616 int bestseq = 0;
3617 int seq;
3618 char nm[20];
3619 int dfd;
3620
3621 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3622 if (!sra)
3623 return 1;
3624 if (sra->array.major_version != -1 ||
3625 sra->array.minor_version != -2 ||
3626 strcmp(sra->text_version, "ddf") != 0)
3627 return 1;
3628
3629 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3630 return 1;
3631 memset(super, 0, sizeof(*super));
3632
3633 /* first, try each device, and choose the best ddf */
3634 for (sd = sra->devs ; sd ; sd = sd->next) {
3635 int rv;
3636 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3637 dfd = dev_open(nm, O_RDONLY);
3638 if (dfd < 0)
3639 return 2;
3640 rv = load_ddf_headers(dfd, super, NULL);
3641 close(dfd);
3642 if (rv == 0) {
3643 seq = be32_to_cpu(super->active->seq);
3644 if (super->active->openflag)
3645 seq--;
3646 if (!best || seq > bestseq) {
3647 bestseq = seq;
3648 best = sd;
3649 }
3650 }
3651 }
3652 if (!best)
3653 return 1;
3654 /* OK, load this ddf */
3655 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3656 dfd = dev_open(nm, O_RDONLY);
3657 if (dfd < 0)
3658 return 1;
3659 load_ddf_headers(dfd, super, NULL);
3660 load_ddf_global(dfd, super, NULL);
3661 close(dfd);
3662 /* Now we need the device-local bits */
3663 for (sd = sra->devs ; sd ; sd = sd->next) {
3664 int rv;
3665
3666 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3667 dfd = dev_open(nm, O_RDWR);
3668 if (dfd < 0)
3669 return 2;
3670 rv = load_ddf_headers(dfd, super, NULL);
3671 if (rv == 0)
3672 rv = load_ddf_local(dfd, super, NULL, 1);
3673 if (rv)
3674 return 1;
3675 }
3676
3677 *sbp = super;
3678 if (st->ss == NULL) {
3679 st->ss = &super_ddf;
3680 st->minor_version = 0;
3681 st->max_devs = 512;
3682 }
3683 strcpy(st->container_devnm, fd2devnm(fd));
3684 return 0;
3685 }
3686
3687 static int load_container_ddf(struct supertype *st, int fd,
3688 char *devname)
3689 {
3690 return load_super_ddf_all(st, fd, &st->sb, devname);
3691 }
3692
3693 #endif /* MDASSEMBLE */
3694
3695 static int check_secondary(const struct vcl *vc)
3696 {
3697 const struct vd_config *conf = &vc->conf;
3698 int i;
3699
3700 /* The only DDF secondary RAID level md can support is
3701 * RAID 10, if the stripe sizes and Basic volume sizes
3702 * are all equal.
3703 * Other configurations could in theory be supported by exposing
3704 * the BVDs to user space and using device mapper for the secondary
3705 * mapping. So far we don't support that.
3706 */
3707
3708 __u64 sec_elements[4] = {0, 0, 0, 0};
3709 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3710 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3711
3712 if (vc->other_bvds == NULL) {
3713 pr_err("No BVDs for secondary RAID found\n");
3714 return -1;
3715 }
3716 if (conf->prl != DDF_RAID1) {
3717 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3718 return -1;
3719 }
3720 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3721 pr_err("Secondary RAID level %d is unsupported\n",
3722 conf->srl);
3723 return -1;
3724 }
3725 __set_sec_seen(conf->sec_elmnt_seq);
3726 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3727 const struct vd_config *bvd = vc->other_bvds[i];
3728 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3729 continue;
3730 if (bvd->srl != conf->srl) {
3731 pr_err("Inconsistent secondary RAID level across BVDs\n");
3732 return -1;
3733 }
3734 if (bvd->prl != conf->prl) {
3735 pr_err("Different RAID levels for BVDs are unsupported\n");
3736 return -1;
3737 }
3738 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3739 pr_err("All BVDs must have the same number of primary elements\n");
3740 return -1;
3741 }
3742 if (bvd->chunk_shift != conf->chunk_shift) {
3743 pr_err("Different strip sizes for BVDs are unsupported\n");
3744 return -1;
3745 }
3746 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3747 pr_err("Different BVD sizes are unsupported\n");
3748 return -1;
3749 }
3750 __set_sec_seen(bvd->sec_elmnt_seq);
3751 }
3752 for (i = 0; i < conf->sec_elmnt_count; i++) {
3753 if (!__was_sec_seen(i)) {
3754 pr_err("BVD %d is missing\n", i);
3755 return -1;
3756 }
3757 }
3758 return 0;
3759 }
3760
3761 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3762 be32 refnum, unsigned int nmax,
3763 const struct vd_config **bvd,
3764 unsigned int *idx)
3765 {
3766 unsigned int i, j, n, sec, cnt;
3767
3768 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3769 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3770
3771 for (i = 0, j = 0 ; i < nmax ; i++) {
3772 /* j counts valid entries for this BVD */
3773 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3774 *bvd = &vc->conf;
3775 *idx = i;
3776 return sec * cnt + j;
3777 }
3778 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3779 j++;
3780 }
3781 if (vc->other_bvds == NULL)
3782 goto bad;
3783
3784 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3785 struct vd_config *vd = vc->other_bvds[n-1];
3786 sec = vd->sec_elmnt_seq;
3787 if (sec == DDF_UNUSED_BVD)
3788 continue;
3789 for (i = 0, j = 0 ; i < nmax ; i++) {
3790 if (be32_eq(vd->phys_refnum[i], refnum)) {
3791 *bvd = vd;
3792 *idx = i;
3793 return sec * cnt + j;
3794 }
3795 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3796 j++;
3797 }
3798 }
3799 bad:
3800 *bvd = NULL;
3801 return DDF_NOTFOUND;
3802 }
3803
3804 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3805 {
3806 /* Given a container loaded by load_super_ddf_all,
3807 * extract information about all the arrays into
3808 * an mdinfo tree.
3809 *
3810 * For each vcl in conflist: create an mdinfo, fill it in,
3811 * then look for matching devices (phys_refnum) in dlist
3812 * and create appropriate device mdinfo.
3813 */
3814 struct ddf_super *ddf = st->sb;
3815 struct mdinfo *rest = NULL;
3816 struct vcl *vc;
3817
3818 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3819 unsigned int i;
3820 struct mdinfo *this;
3821 char *ep;
3822 __u32 *cptr;
3823 unsigned int pd;
3824
3825 if (subarray &&
3826 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3827 *ep != '\0'))
3828 continue;
3829
3830 if (vc->conf.sec_elmnt_count > 1) {
3831 if (check_secondary(vc) != 0)
3832 continue;
3833 }
3834
3835 this = xcalloc(1, sizeof(*this));
3836 this->next = rest;
3837 rest = this;
3838
3839 if (layout_ddf2md(&vc->conf, &this->array))
3840 continue;
3841 this->array.md_minor = -1;
3842 this->array.major_version = -1;
3843 this->array.minor_version = -2;
3844 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3845 cptr = (__u32 *)(vc->conf.guid + 16);
3846 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3847 this->array.utime = DECADE +
3848 be32_to_cpu(vc->conf.timestamp);
3849 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3850
3851 i = vc->vcnum;
3852 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3853 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3854 DDF_init_full) {
3855 this->array.state = 0;
3856 this->resync_start = 0;
3857 } else {
3858 this->array.state = 1;
3859 this->resync_start = MaxSector;
3860 }
3861 _ddf_array_name(this->name, ddf, i);
3862 memset(this->uuid, 0, sizeof(this->uuid));
3863 this->component_size = be64_to_cpu(vc->conf.blocks);
3864 this->array.size = this->component_size / 2;
3865 this->container_member = i;
3866
3867 ddf->currentconf = vc;
3868 uuid_from_super_ddf(st, this->uuid);
3869 if (!subarray)
3870 ddf->currentconf = NULL;
3871
3872 sprintf(this->text_version, "/%s/%d",
3873 st->container_devnm, this->container_member);
3874
3875 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3876 struct mdinfo *dev;
3877 struct dl *d;
3878 const struct vd_config *bvd;
3879 unsigned int iphys;
3880 int stt;
3881
3882 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3883 == 0xFFFFFFFF)
3884 continue;
3885
3886 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3887 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3888 != DDF_Online)
3889 continue;
3890
3891 i = get_pd_index_from_refnum(
3892 vc, ddf->phys->entries[pd].refnum,
3893 ddf->mppe, &bvd, &iphys);
3894 if (i == DDF_NOTFOUND)
3895 continue;
3896
3897 this->array.working_disks++;
3898
3899 for (d = ddf->dlist; d ; d=d->next)
3900 if (be32_eq(d->disk.refnum,
3901 ddf->phys->entries[pd].refnum))
3902 break;
3903 if (d == NULL)
3904 /* Haven't found that one yet, maybe there are others */
3905 continue;
3906
3907 dev = xcalloc(1, sizeof(*dev));
3908 dev->next = this->devs;
3909 this->devs = dev;
3910
3911 dev->disk.number = be32_to_cpu(d->disk.refnum);
3912 dev->disk.major = d->major;
3913 dev->disk.minor = d->minor;
3914 dev->disk.raid_disk = i;
3915 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3916 dev->recovery_start = MaxSector;
3917
3918 dev->events = be32_to_cpu(ddf->active->seq);
3919 dev->data_offset =
3920 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3921 dev->component_size = be64_to_cpu(bvd->blocks);
3922 if (d->devname)
3923 strcpy(dev->name, d->devname);
3924 }
3925 }
3926 return rest;
3927 }
3928
3929 static int store_super_ddf(struct supertype *st, int fd)
3930 {
3931 struct ddf_super *ddf = st->sb;
3932 unsigned long long dsize;
3933 void *buf;
3934 int rc;
3935
3936 if (!ddf)
3937 return 1;
3938
3939 if (!get_dev_size(fd, NULL, &dsize))
3940 return 1;
3941
3942 if (ddf->dlist || ddf->conflist) {
3943 struct stat sta;
3944 struct dl *dl;
3945 int ofd, ret;
3946
3947 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3948 pr_err("%s: file descriptor for invalid device\n",
3949 __func__);
3950 return 1;
3951 }
3952 for (dl = ddf->dlist; dl; dl = dl->next)
3953 if (dl->major == (int)major(sta.st_rdev) &&
3954 dl->minor == (int)minor(sta.st_rdev))
3955 break;
3956 if (!dl) {
3957 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3958 (int)major(sta.st_rdev),
3959 (int)minor(sta.st_rdev));
3960 return 1;
3961 }
3962 ofd = dl->fd;
3963 dl->fd = fd;
3964 ret = (_write_super_to_disk(ddf, dl) != 1);
3965 dl->fd = ofd;
3966 return ret;
3967 }
3968
3969 if (posix_memalign(&buf, 512, 512) != 0)
3970 return 1;
3971 memset(buf, 0, 512);
3972
3973 lseek64(fd, dsize-512, 0);
3974 rc = write(fd, buf, 512);
3975 free(buf);
3976 if (rc < 0)
3977 return 1;
3978 return 0;
3979 }
3980
3981 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3982 {
3983 /*
3984 * return:
3985 * 0 same, or first was empty, and second was copied
3986 * 1 second had wrong magic number - but that isn't possible
3987 * 2 wrong uuid
3988 * 3 wrong other info
3989 */
3990 struct ddf_super *first = st->sb;
3991 struct ddf_super *second = tst->sb;
3992 struct dl *dl1, *dl2;
3993 struct vcl *vl1, *vl2;
3994 unsigned int max_vds, max_pds, pd, vd;
3995
3996 if (!first) {
3997 st->sb = tst->sb;
3998 tst->sb = NULL;
3999 return 0;
4000 }
4001
4002 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
4003 return 2;
4004
4005 /* It is only OK to compare info in the anchor. Anything else
4006 * could be changing due to a reconfig so must be ignored.
4007 * guid really should be enough anyway.
4008 */
4009
4010 if (!be32_eq(first->active->seq, second->active->seq)) {
4011 dprintf("%s: sequence number mismatch %u<->%u\n", __func__,
4012 be32_to_cpu(first->active->seq),
4013 be32_to_cpu(second->active->seq));
4014 return 0;
4015 }
4016
4017 /*
4018 * At this point we are fairly sure that the meta data matches.
4019 * But the new disk may contain additional local data.
4020 * Add it to the super block.
4021 */
4022 max_vds = be16_to_cpu(first->active->max_vd_entries);
4023 max_pds = be16_to_cpu(first->phys->max_pdes);
4024 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
4025 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
4026 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
4027 DDF_GUID_LEN))
4028 break;
4029 if (vl1) {
4030 if (vl1->other_bvds != NULL &&
4031 vl1->conf.sec_elmnt_seq !=
4032 vl2->conf.sec_elmnt_seq) {
4033 dprintf("%s: adding BVD %u\n", __func__,
4034 vl2->conf.sec_elmnt_seq);
4035 add_other_bvd(vl1, &vl2->conf,
4036 first->conf_rec_len*512);
4037 }
4038 continue;
4039 }
4040
4041 if (posix_memalign((void **)&vl1, 512,
4042 (first->conf_rec_len*512 +
4043 offsetof(struct vcl, conf))) != 0) {
4044 pr_err("%s could not allocate vcl buf\n",
4045 __func__);
4046 return 3;
4047 }
4048
4049 vl1->next = first->conflist;
4050 vl1->block_sizes = NULL;
4051 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4052 if (alloc_other_bvds(first, vl1) != 0) {
4053 pr_err("%s could not allocate other bvds\n",
4054 __func__);
4055 free(vl1);
4056 return 3;
4057 }
4058 for (vd = 0; vd < max_vds; vd++)
4059 if (!memcmp(first->virt->entries[vd].guid,
4060 vl1->conf.guid, DDF_GUID_LEN))
4061 break;
4062 vl1->vcnum = vd;
4063 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4064 first->conflist = vl1;
4065 }
4066
4067 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4068 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4069 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4070 break;
4071 if (dl1)
4072 continue;
4073
4074 if (posix_memalign((void **)&dl1, 512,
4075 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4076 != 0) {
4077 pr_err("%s could not allocate disk info buffer\n",
4078 __func__);
4079 return 3;
4080 }
4081 memcpy(dl1, dl2, sizeof(*dl1));
4082 dl1->mdupdate = NULL;
4083 dl1->next = first->dlist;
4084 dl1->fd = -1;
4085 for (pd = 0; pd < max_pds; pd++)
4086 if (be32_eq(first->phys->entries[pd].refnum,
4087 dl1->disk.refnum))
4088 break;
4089 dl1->pdnum = pd < max_pds ? (int)pd : -1;
4090 if (dl2->spare) {
4091 if (posix_memalign((void **)&dl1->spare, 512,
4092 first->conf_rec_len*512) != 0) {
4093 pr_err("%s could not allocate spare info buf\n",
4094 __func__);
4095 return 3;
4096 }
4097 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4098 }
4099 for (vd = 0 ; vd < first->max_part ; vd++) {
4100 if (!dl2->vlist[vd]) {
4101 dl1->vlist[vd] = NULL;
4102 continue;
4103 }
4104 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4105 if (!memcmp(vl1->conf.guid,
4106 dl2->vlist[vd]->conf.guid,
4107 DDF_GUID_LEN))
4108 break;
4109 dl1->vlist[vd] = vl1;
4110 }
4111 }
4112 first->dlist = dl1;
4113 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4114 be32_to_cpu(dl1->disk.refnum));
4115 }
4116
4117 return 0;
4118 }
4119
4120 #ifndef MDASSEMBLE
4121 /*
4122 * A new array 'a' has been started which claims to be instance 'inst'
4123 * within container 'c'.
4124 * We need to confirm that the array matches the metadata in 'c' so
4125 * that we don't corrupt any metadata.
4126 */
4127 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4128 {
4129 struct ddf_super *ddf = c->sb;
4130 int n = atoi(inst);
4131 struct mdinfo *dev;
4132 struct dl *dl;
4133 static const char faulty[] = "faulty";
4134
4135 if (all_ff(ddf->virt->entries[n].guid)) {
4136 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4137 return -ENODEV;
4138 }
4139 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4140 guid_str(ddf->virt->entries[n].guid));
4141 for (dev = a->info.devs; dev; dev = dev->next) {
4142 for (dl = ddf->dlist; dl; dl = dl->next)
4143 if (dl->major == dev->disk.major &&
4144 dl->minor == dev->disk.minor)
4145 break;
4146 if (!dl || dl->pdnum < 0) {
4147 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4148 __func__, dev->disk.major, dev->disk.minor, n);
4149 return -1;
4150 }
4151 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4152 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4153 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4154 __func__, n, dl->major, dl->minor,
4155 be16_to_cpu(
4156 ddf->phys->entries[dl->pdnum].state));
4157 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4158 sizeof(faulty) - 1)
4159 pr_err("Write to state_fd failed\n");
4160 dev->curr_state = DS_FAULTY;
4161 }
4162 }
4163 a->info.container_member = n;
4164 return 0;
4165 }
4166
4167 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4168 {
4169 /* This member array is being activated. If any devices
4170 * are missing they must now be marked as failed.
4171 */
4172 struct vd_config *vc;
4173 unsigned int n_bvd;
4174 struct vcl *vcl;
4175 struct dl *dl;
4176 int pd;
4177 int n;
4178 int state;
4179
4180 for (n = 0; ; n++) {
4181 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4182 if (!vc)
4183 break;
4184 for (dl = ddf->dlist; dl; dl = dl->next)
4185 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4186 break;
4187 if (dl)
4188 /* Found this disk, so not missing */
4189 continue;
4190
4191 /* Mark the device as failed/missing. */
4192 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4193 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4194 cpu_to_be16(DDF_Online))) {
4195 be16_clear(ddf->phys->entries[pd].state,
4196 cpu_to_be16(DDF_Online));
4197 be16_set(ddf->phys->entries[pd].state,
4198 cpu_to_be16(DDF_Failed|DDF_Missing));
4199 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4200 ddf_set_updates_pending(ddf, vc);
4201 }
4202
4203 /* Mark the array as Degraded */
4204 state = get_svd_state(ddf, vcl);
4205 if (ddf->virt->entries[inst].state !=
4206 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4207 | state)) {
4208 ddf->virt->entries[inst].state =
4209 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4210 | state;
4211 a->check_degraded = 1;
4212 ddf_set_updates_pending(ddf, vc);
4213 }
4214 }
4215 }
4216
4217 /*
4218 * The array 'a' is to be marked clean in the metadata.
4219 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4220 * clean up to the point (in sectors). If that cannot be recorded in the
4221 * metadata, then leave it as dirty.
4222 *
4223 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4224 * !global! virtual_disk.virtual_entry structure.
4225 */
4226 static int ddf_set_array_state(struct active_array *a, int consistent)
4227 {
4228 struct ddf_super *ddf = a->container->sb;
4229 int inst = a->info.container_member;
4230 int old = ddf->virt->entries[inst].state;
4231 if (consistent == 2) {
4232 handle_missing(ddf, a, inst);
4233 /* Should check if a recovery should be started FIXME */
4234 consistent = 1;
4235 if (!is_resync_complete(&a->info))
4236 consistent = 0;
4237 }
4238 if (consistent)
4239 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4240 else
4241 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4242 if (old != ddf->virt->entries[inst].state)
4243 ddf_set_updates_pending(ddf, NULL);
4244
4245 old = ddf->virt->entries[inst].init_state;
4246 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4247 if (is_resync_complete(&a->info))
4248 ddf->virt->entries[inst].init_state |= DDF_init_full;
4249 else if (a->info.resync_start == 0)
4250 ddf->virt->entries[inst].init_state |= DDF_init_not;
4251 else
4252 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4253 if (old != ddf->virt->entries[inst].init_state)
4254 ddf_set_updates_pending(ddf, NULL);
4255
4256 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4257 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4258 consistent?"clean":"dirty",
4259 a->info.resync_start);
4260 return consistent;
4261 }
4262
4263 static int get_bvd_state(const struct ddf_super *ddf,
4264 const struct vd_config *vc)
4265 {
4266 unsigned int i, n_bvd, working = 0;
4267 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4268 int pd, st, state;
4269 char *avail = xcalloc(1, n_prim);
4270 mdu_array_info_t array;
4271
4272 layout_ddf2md(vc, &array);
4273
4274 for (i = 0; i < n_prim; i++) {
4275 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4276 continue;
4277 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4278 if (pd < 0)
4279 continue;
4280 st = be16_to_cpu(ddf->phys->entries[pd].state);
4281 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4282 == DDF_Online) {
4283 working++;
4284 avail[i] = 1;
4285 }
4286 }
4287
4288 state = DDF_state_degraded;
4289 if (working == n_prim)
4290 state = DDF_state_optimal;
4291 else
4292 switch (vc->prl) {
4293 case DDF_RAID0:
4294 case DDF_CONCAT:
4295 case DDF_JBOD:
4296 state = DDF_state_failed;
4297 break;
4298 case DDF_RAID1:
4299 if (working == 0)
4300 state = DDF_state_failed;
4301 else if (working >= 2)
4302 state = DDF_state_part_optimal;
4303 break;
4304 case DDF_RAID1E:
4305 if (!enough(10, n_prim, array.layout, 1, avail))
4306 state = DDF_state_failed;
4307 break;
4308 case DDF_RAID4:
4309 case DDF_RAID5:
4310 if (working < n_prim - 1)
4311 state = DDF_state_failed;
4312 break;
4313 case DDF_RAID6:
4314 if (working < n_prim - 2)
4315 state = DDF_state_failed;
4316 else if (working == n_prim - 1)
4317 state = DDF_state_part_optimal;
4318 break;
4319 }
4320 return state;
4321 }
4322
4323 static int secondary_state(int state, int other, int seclevel)
4324 {
4325 if (state == DDF_state_optimal && other == DDF_state_optimal)
4326 return DDF_state_optimal;
4327 if (seclevel == DDF_2MIRRORED) {
4328 if (state == DDF_state_optimal || other == DDF_state_optimal)
4329 return DDF_state_part_optimal;
4330 if (state == DDF_state_failed && other == DDF_state_failed)
4331 return DDF_state_failed;
4332 return DDF_state_degraded;
4333 } else {
4334 if (state == DDF_state_failed || other == DDF_state_failed)
4335 return DDF_state_failed;
4336 if (state == DDF_state_degraded || other == DDF_state_degraded)
4337 return DDF_state_degraded;
4338 return DDF_state_part_optimal;
4339 }
4340 }
4341
4342 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4343 {
4344 int state = get_bvd_state(ddf, &vcl->conf);
4345 unsigned int i;
4346 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4347 state = secondary_state(
4348 state,
4349 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4350 vcl->conf.srl);
4351 }
4352 return state;
4353 }
4354
4355 /*
4356 * The state of each disk is stored in the global phys_disk structure
4357 * in phys_disk.entries[n].state.
4358 * This makes various combinations awkward.
4359 * - When a device fails in any array, it must be failed in all arrays
4360 * that include a part of this device.
4361 * - When a component is rebuilding, we cannot include it officially in the
4362 * array unless this is the only array that uses the device.
4363 *
4364 * So: when transitioning:
4365 * Online -> failed, just set failed flag. monitor will propagate
4366 * spare -> online, the device might need to be added to the array.
4367 * spare -> failed, just set failed. Don't worry if in array or not.
4368 */
4369 static void ddf_set_disk(struct active_array *a, int n, int state)
4370 {
4371 struct ddf_super *ddf = a->container->sb;
4372 unsigned int inst = a->info.container_member, n_bvd;
4373 struct vcl *vcl;
4374 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4375 &n_bvd, &vcl);
4376 int pd;
4377 struct mdinfo *mdi;
4378 struct dl *dl;
4379 int update = 0;
4380
4381 dprintf("%s: %d to %x\n", __func__, n, state);
4382 if (vc == NULL) {
4383 dprintf("ddf: cannot find instance %d!!\n", inst);
4384 return;
4385 }
4386 /* Find the matching slot in 'info'. */
4387 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4388 if (mdi->disk.raid_disk == n)
4389 break;
4390 if (!mdi) {
4391 pr_err("%s: cannot find raid disk %d\n",
4392 __func__, n);
4393 return;
4394 }
4395
4396 /* and find the 'dl' entry corresponding to that. */
4397 for (dl = ddf->dlist; dl; dl = dl->next)
4398 if (mdi->state_fd >= 0 &&
4399 mdi->disk.major == dl->major &&
4400 mdi->disk.minor == dl->minor)
4401 break;
4402 if (!dl) {
4403 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4404 __func__, n,
4405 mdi->disk.major, mdi->disk.minor);
4406 return;
4407 }
4408
4409 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4410 if (pd < 0 || pd != dl->pdnum) {
4411 /* disk doesn't currently exist or has changed.
4412 * If it is now in_sync, insert it. */
4413 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4414 __func__, dl->pdnum, dl->major, dl->minor,
4415 be32_to_cpu(dl->disk.refnum));
4416 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4417 __func__, inst, n_bvd,
4418 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4419 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4420 pd = dl->pdnum; /* FIXME: is this really correct ? */
4421 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4422 LBA_OFFSET(ddf, vc)[n_bvd] =
4423 cpu_to_be64(mdi->data_offset);
4424 be16_clear(ddf->phys->entries[pd].type,
4425 cpu_to_be16(DDF_Global_Spare));
4426 be16_set(ddf->phys->entries[pd].type,
4427 cpu_to_be16(DDF_Active_in_VD));
4428 update = 1;
4429 }
4430 } else {
4431 be16 old = ddf->phys->entries[pd].state;
4432 if (state & DS_FAULTY)
4433 be16_set(ddf->phys->entries[pd].state,
4434 cpu_to_be16(DDF_Failed));
4435 if (state & DS_INSYNC) {
4436 be16_set(ddf->phys->entries[pd].state,
4437 cpu_to_be16(DDF_Online));
4438 be16_clear(ddf->phys->entries[pd].state,
4439 cpu_to_be16(DDF_Rebuilding));
4440 }
4441 if (!be16_eq(old, ddf->phys->entries[pd].state))
4442 update = 1;
4443 }
4444
4445 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4446 be32_to_cpu(dl->disk.refnum), state,
4447 be16_to_cpu(ddf->phys->entries[pd].state));
4448
4449 /* Now we need to check the state of the array and update
4450 * virtual_disk.entries[n].state.
4451 * It needs to be one of "optimal", "degraded", "failed".
4452 * I don't understand 'deleted' or 'missing'.
4453 */
4454 state = get_svd_state(ddf, vcl);
4455
4456 if (ddf->virt->entries[inst].state !=
4457 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4458 | state)) {
4459 ddf->virt->entries[inst].state =
4460 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4461 | state;
4462 update = 1;
4463 }
4464 if (update)
4465 ddf_set_updates_pending(ddf, vc);
4466 }
4467
4468 static void ddf_sync_metadata(struct supertype *st)
4469 {
4470 /*
4471 * Write all data to all devices.
4472 * Later, we might be able to track whether only local changes
4473 * have been made, or whether any global data has been changed,
4474 * but ddf is sufficiently weird that it probably always
4475 * changes global data ....
4476 */
4477 struct ddf_super *ddf = st->sb;
4478 if (!ddf->updates_pending)
4479 return;
4480 ddf->updates_pending = 0;
4481 __write_init_super_ddf(st);
4482 dprintf("ddf: sync_metadata\n");
4483 }
4484
4485 static int del_from_conflist(struct vcl **list, const char *guid)
4486 {
4487 struct vcl **p;
4488 int found = 0;
4489 for (p = list; p && *p; p = &((*p)->next))
4490 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4491 found = 1;
4492 *p = (*p)->next;
4493 }
4494 return found;
4495 }
4496
4497 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4498 {
4499 struct dl *dl;
4500 unsigned int vdnum, i;
4501 vdnum = find_vde_by_guid(ddf, guid);
4502 if (vdnum == DDF_NOTFOUND) {
4503 pr_err("%s: could not find VD %s\n", __func__,
4504 guid_str(guid));
4505 return -1;
4506 }
4507 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4508 pr_err("%s: could not find conf %s\n", __func__,
4509 guid_str(guid));
4510 return -1;
4511 }
4512 for (dl = ddf->dlist; dl; dl = dl->next)
4513 for (i = 0; i < ddf->max_part; i++)
4514 if (dl->vlist[i] != NULL &&
4515 !memcmp(dl->vlist[i]->conf.guid, guid,
4516 DDF_GUID_LEN))
4517 dl->vlist[i] = NULL;
4518 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4519 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4520 return 0;
4521 }
4522
4523 static int kill_subarray_ddf(struct supertype *st)
4524 {
4525 struct ddf_super *ddf = st->sb;
4526 /*
4527 * currentconf is set in container_content_ddf,
4528 * called with subarray arg
4529 */
4530 struct vcl *victim = ddf->currentconf;
4531 struct vd_config *conf;
4532 unsigned int vdnum;
4533
4534 ddf->currentconf = NULL;
4535 if (!victim) {
4536 pr_err("%s: nothing to kill\n", __func__);
4537 return -1;
4538 }
4539 conf = &victim->conf;
4540 vdnum = find_vde_by_guid(ddf, conf->guid);
4541 if (vdnum == DDF_NOTFOUND) {
4542 pr_err("%s: could not find VD %s\n", __func__,
4543 guid_str(conf->guid));
4544 return -1;
4545 }
4546 if (st->update_tail) {
4547 struct virtual_disk *vd;
4548 int len = sizeof(struct virtual_disk)
4549 + sizeof(struct virtual_entry);
4550 vd = xmalloc(len);
4551 if (vd == NULL) {
4552 pr_err("%s: failed to allocate %d bytes\n", __func__,
4553 len);
4554 return -1;
4555 }
4556 memset(vd, 0 , len);
4557 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4558 vd->populated_vdes = cpu_to_be16(0);
4559 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4560 /* we use DDF_state_deleted as marker */
4561 vd->entries[0].state = DDF_state_deleted;
4562 append_metadata_update(st, vd, len);
4563 } else {
4564 _kill_subarray_ddf(ddf, conf->guid);
4565 ddf_set_updates_pending(ddf, NULL);
4566 ddf_sync_metadata(st);
4567 }
4568 return 0;
4569 }
4570
4571 static void copy_matching_bvd(struct ddf_super *ddf,
4572 struct vd_config *conf,
4573 const struct metadata_update *update)
4574 {
4575 unsigned int mppe =
4576 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4577 unsigned int len = ddf->conf_rec_len * 512;
4578 char *p;
4579 struct vd_config *vc;
4580 for (p = update->buf; p < update->buf + update->len; p += len) {
4581 vc = (struct vd_config *) p;
4582 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4583 memcpy(conf->phys_refnum, vc->phys_refnum,
4584 mppe * (sizeof(__u32) + sizeof(__u64)));
4585 return;
4586 }
4587 }
4588 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4589 conf->sec_elmnt_seq, guid_str(conf->guid));
4590 }
4591
4592 static void ddf_process_update(struct supertype *st,
4593 struct metadata_update *update)
4594 {
4595 /* Apply this update to the metadata.
4596 * The first 4 bytes are a DDF_*_MAGIC which guides
4597 * our actions.
4598 * Possible update are:
4599 * DDF_PHYS_RECORDS_MAGIC
4600 * Add a new physical device or remove an old one.
4601 * Changes to this record only happen implicitly.
4602 * used_pdes is the device number.
4603 * DDF_VIRT_RECORDS_MAGIC
4604 * Add a new VD. Possibly also change the 'access' bits.
4605 * populated_vdes is the entry number.
4606 * DDF_VD_CONF_MAGIC
4607 * New or updated VD. the VIRT_RECORD must already
4608 * exist. For an update, phys_refnum and lba_offset
4609 * (at least) are updated, and the VD_CONF must
4610 * be written to precisely those devices listed with
4611 * a phys_refnum.
4612 * DDF_SPARE_ASSIGN_MAGIC
4613 * replacement Spare Assignment Record... but for which device?
4614 *
4615 * So, e.g.:
4616 * - to create a new array, we send a VIRT_RECORD and
4617 * a VD_CONF. Then assemble and start the array.
4618 * - to activate a spare we send a VD_CONF to add the phys_refnum
4619 * and offset. This will also mark the spare as active with
4620 * a spare-assignment record.
4621 */
4622 struct ddf_super *ddf = st->sb;
4623 be32 *magic = (be32 *)update->buf;
4624 struct phys_disk *pd;
4625 struct virtual_disk *vd;
4626 struct vd_config *vc;
4627 struct vcl *vcl;
4628 struct dl *dl;
4629 unsigned int ent;
4630 unsigned int pdnum, pd2, len;
4631
4632 dprintf("Process update %x\n", be32_to_cpu(*magic));
4633
4634 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4635 if (update->len != (sizeof(struct phys_disk) +
4636 sizeof(struct phys_disk_entry)))
4637 return;
4638 pd = (struct phys_disk*)update->buf;
4639
4640 ent = be16_to_cpu(pd->used_pdes);
4641 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4642 return;
4643 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4644 struct dl **dlp;
4645 /* removing this disk. */
4646 be16_set(ddf->phys->entries[ent].state,
4647 cpu_to_be16(DDF_Missing));
4648 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4649 struct dl *dl = *dlp;
4650 if (dl->pdnum == (signed)ent) {
4651 close(dl->fd);
4652 dl->fd = -1;
4653 /* FIXME this doesn't free
4654 * dl->devname */
4655 update->space = dl;
4656 *dlp = dl->next;
4657 break;
4658 }
4659 }
4660 ddf_set_updates_pending(ddf, NULL);
4661 return;
4662 }
4663 if (!all_ff(ddf->phys->entries[ent].guid))
4664 return;
4665 ddf->phys->entries[ent] = pd->entries[0];
4666 ddf->phys->used_pdes = cpu_to_be16
4667 (1 + be16_to_cpu(ddf->phys->used_pdes));
4668 ddf_set_updates_pending(ddf, NULL);
4669 if (ddf->add_list) {
4670 struct active_array *a;
4671 struct dl *al = ddf->add_list;
4672 ddf->add_list = al->next;
4673
4674 al->next = ddf->dlist;
4675 ddf->dlist = al;
4676
4677 /* As a device has been added, we should check
4678 * for any degraded devices that might make
4679 * use of this spare */
4680 for (a = st->arrays ; a; a=a->next)
4681 a->check_degraded = 1;
4682 }
4683 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4684 if (update->len != (sizeof(struct virtual_disk) +
4685 sizeof(struct virtual_entry)))
4686 return;
4687 vd = (struct virtual_disk*)update->buf;
4688
4689 if (vd->entries[0].state == DDF_state_deleted) {
4690 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4691 return;
4692 } else {
4693 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4694 if (ent != DDF_NOTFOUND) {
4695 dprintf("%s: VD %s exists already in slot %d\n",
4696 __func__, guid_str(vd->entries[0].guid),
4697 ent);
4698 return;
4699 }
4700 ent = find_unused_vde(ddf);
4701 if (ent == DDF_NOTFOUND)
4702 return;
4703 ddf->virt->entries[ent] = vd->entries[0];
4704 ddf->virt->populated_vdes =
4705 cpu_to_be16(
4706 1 + be16_to_cpu(
4707 ddf->virt->populated_vdes));
4708 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4709 __func__, guid_str(vd->entries[0].guid), ent,
4710 ddf->virt->entries[ent].state,
4711 ddf->virt->entries[ent].init_state);
4712 }
4713 ddf_set_updates_pending(ddf, NULL);
4714 }
4715
4716 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4717 vc = (struct vd_config*)update->buf;
4718 len = ddf->conf_rec_len * 512;
4719 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4720 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4721 __func__, guid_str(vc->guid), update->len,
4722 vc->sec_elmnt_count);
4723 return;
4724 }
4725 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4726 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4727 break;
4728 dprintf("%s: conf update for %s (%s)\n", __func__,
4729 guid_str(vc->guid), (vcl ? "old" : "new"));
4730 if (vcl) {
4731 /* An update, just copy the phys_refnum and lba_offset
4732 * fields
4733 */
4734 unsigned int i;
4735 unsigned int k;
4736 copy_matching_bvd(ddf, &vcl->conf, update);
4737 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4738 dprintf("BVD %u has %08x at %llu\n", 0,
4739 be32_to_cpu(vcl->conf.phys_refnum[k]),
4740 be64_to_cpu(LBA_OFFSET(ddf,
4741 &vcl->conf)[k]));
4742 for (i = 1; i < vc->sec_elmnt_count; i++) {
4743 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4744 update);
4745 for (k = 0; k < be16_to_cpu(
4746 vc->prim_elmnt_count); k++)
4747 dprintf("BVD %u has %08x at %llu\n", i,
4748 be32_to_cpu
4749 (vcl->other_bvds[i-1]->
4750 phys_refnum[k]),
4751 be64_to_cpu
4752 (LBA_OFFSET
4753 (ddf,
4754 vcl->other_bvds[i-1])[k]));
4755 }
4756 } else {
4757 /* A new VD_CONF */
4758 unsigned int i;
4759 if (!update->space)
4760 return;
4761 vcl = update->space;
4762 update->space = NULL;
4763 vcl->next = ddf->conflist;
4764 memcpy(&vcl->conf, vc, len);
4765 ent = find_vde_by_guid(ddf, vc->guid);
4766 if (ent == DDF_NOTFOUND)
4767 return;
4768 vcl->vcnum = ent;
4769 ddf->conflist = vcl;
4770 for (i = 1; i < vc->sec_elmnt_count; i++)
4771 memcpy(vcl->other_bvds[i-1],
4772 update->buf + len * i, len);
4773 }
4774 /* Set DDF_Transition on all Failed devices - to help
4775 * us detect those that are no longer in use
4776 */
4777 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4778 pdnum++)
4779 if (be16_and(ddf->phys->entries[pdnum].state,
4780 cpu_to_be16(DDF_Failed)))
4781 be16_set(ddf->phys->entries[pdnum].state,
4782 cpu_to_be16(DDF_Transition));
4783 /* Now make sure vlist is correct for each dl. */
4784 for (dl = ddf->dlist; dl; dl = dl->next) {
4785 unsigned int vn = 0;
4786 int in_degraded = 0;
4787
4788 if (dl->pdnum < 0)
4789 continue;
4790 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4791 unsigned int dn, ibvd;
4792 const struct vd_config *conf;
4793 int vstate;
4794 dn = get_pd_index_from_refnum(vcl,
4795 dl->disk.refnum,
4796 ddf->mppe,
4797 &conf, &ibvd);
4798 if (dn == DDF_NOTFOUND)
4799 continue;
4800 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4801 dl->pdnum,
4802 be32_to_cpu(dl->disk.refnum),
4803 guid_str(conf->guid),
4804 conf->sec_elmnt_seq, vn);
4805 /* Clear the Transition flag */
4806 if (be16_and
4807 (ddf->phys->entries[dl->pdnum].state,
4808 cpu_to_be16(DDF_Failed)))
4809 be16_clear(ddf->phys
4810 ->entries[dl->pdnum].state,
4811 cpu_to_be16(DDF_Transition));
4812 dl->vlist[vn++] = vcl;
4813 vstate = ddf->virt->entries[vcl->vcnum].state
4814 & DDF_state_mask;
4815 if (vstate == DDF_state_degraded ||
4816 vstate == DDF_state_part_optimal)
4817 in_degraded = 1;
4818 }
4819 while (vn < ddf->max_part)
4820 dl->vlist[vn++] = NULL;
4821 if (dl->vlist[0]) {
4822 be16_clear(ddf->phys->entries[dl->pdnum].type,
4823 cpu_to_be16(DDF_Global_Spare));
4824 if (!be16_and(ddf->phys
4825 ->entries[dl->pdnum].type,
4826 cpu_to_be16(DDF_Active_in_VD))) {
4827 be16_set(ddf->phys
4828 ->entries[dl->pdnum].type,
4829 cpu_to_be16(DDF_Active_in_VD));
4830 if (in_degraded)
4831 be16_set(ddf->phys
4832 ->entries[dl->pdnum]
4833 .state,
4834 cpu_to_be16
4835 (DDF_Rebuilding));
4836 }
4837 }
4838 if (dl->spare) {
4839 be16_clear(ddf->phys->entries[dl->pdnum].type,
4840 cpu_to_be16(DDF_Global_Spare));
4841 be16_set(ddf->phys->entries[dl->pdnum].type,
4842 cpu_to_be16(DDF_Spare));
4843 }
4844 if (!dl->vlist[0] && !dl->spare) {
4845 be16_set(ddf->phys->entries[dl->pdnum].type,
4846 cpu_to_be16(DDF_Global_Spare));
4847 be16_clear(ddf->phys->entries[dl->pdnum].type,
4848 cpu_to_be16(DDF_Spare));
4849 be16_clear(ddf->phys->entries[dl->pdnum].type,
4850 cpu_to_be16(DDF_Active_in_VD));
4851 }
4852 }
4853
4854 /* Now remove any 'Failed' devices that are not part
4855 * of any VD. They will have the Transition flag set.
4856 * Once done, we need to update all dl->pdnum numbers.
4857 */
4858 pd2 = 0;
4859 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4860 pdnum++) {
4861 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4862 0xFFFFFFFF)
4863 continue;
4864 if (be16_and(ddf->phys->entries[pdnum].state,
4865 cpu_to_be16(DDF_Failed))
4866 && be16_and(ddf->phys->entries[pdnum].state,
4867 cpu_to_be16(DDF_Transition))) {
4868 /* skip this one unless in dlist*/
4869 for (dl = ddf->dlist; dl; dl = dl->next)
4870 if (dl->pdnum == (int)pdnum)
4871 break;
4872 if (!dl)
4873 continue;
4874 }
4875 if (pdnum == pd2)
4876 pd2++;
4877 else {
4878 ddf->phys->entries[pd2] =
4879 ddf->phys->entries[pdnum];
4880 for (dl = ddf->dlist; dl; dl = dl->next)
4881 if (dl->pdnum == (int)pdnum)
4882 dl->pdnum = pd2;
4883 pd2++;
4884 }
4885 }
4886 ddf->phys->used_pdes = cpu_to_be16(pd2);
4887 while (pd2 < pdnum) {
4888 memset(ddf->phys->entries[pd2].guid, 0xff,
4889 DDF_GUID_LEN);
4890 pd2++;
4891 }
4892
4893 ddf_set_updates_pending(ddf, vc);
4894 }
4895 /* case DDF_SPARE_ASSIGN_MAGIC */
4896 }
4897
4898 static void ddf_prepare_update(struct supertype *st,
4899 struct metadata_update *update)
4900 {
4901 /* This update arrived at managemon.
4902 * We are about to pass it to monitor.
4903 * If a malloc is needed, do it here.
4904 */
4905 struct ddf_super *ddf = st->sb;
4906 be32 *magic = (be32 *)update->buf;
4907 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4908 struct vcl *vcl;
4909 struct vd_config *conf = (struct vd_config *) update->buf;
4910 if (posix_memalign(&update->space, 512,
4911 offsetof(struct vcl, conf)
4912 + ddf->conf_rec_len * 512) != 0) {
4913 update->space = NULL;
4914 return;
4915 }
4916 vcl = update->space;
4917 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4918 if (alloc_other_bvds(ddf, vcl) != 0) {
4919 free(update->space);
4920 update->space = NULL;
4921 }
4922 }
4923 }
4924
4925 /*
4926 * Check degraded state of a RAID10.
4927 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4928 */
4929 static int raid10_degraded(struct mdinfo *info)
4930 {
4931 int n_prim, n_bvds;
4932 int i;
4933 struct mdinfo *d;
4934 char *found;
4935 int ret = -1;
4936
4937 n_prim = info->array.layout & ~0x100;
4938 n_bvds = info->array.raid_disks / n_prim;
4939 found = xmalloc(n_bvds);
4940 if (found == NULL)
4941 return ret;
4942 memset(found, 0, n_bvds);
4943 for (d = info->devs; d; d = d->next) {
4944 i = d->disk.raid_disk / n_prim;
4945 if (i >= n_bvds) {
4946 pr_err("%s: BUG: invalid raid disk\n", __func__);
4947 goto out;
4948 }
4949 if (d->state_fd > 0)
4950 found[i]++;
4951 }
4952 ret = 2;
4953 for (i = 0; i < n_bvds; i++)
4954 if (!found[i]) {
4955 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4956 ret = 0;
4957 goto out;
4958 } else if (found[i] < n_prim) {
4959 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4960 n_bvds);
4961 ret = 1;
4962 }
4963 out:
4964 free(found);
4965 return ret;
4966 }
4967
4968 /*
4969 * Check if the array 'a' is degraded but not failed.
4970 * If it is, find as many spares as are available and needed and
4971 * arrange for their inclusion.
4972 * We only choose devices which are not already in the array,
4973 * and prefer those with a spare-assignment to this array.
4974 * Otherwise we choose global spares - assuming always that
4975 * there is enough room.
4976 * For each spare that we assign, we return an 'mdinfo' which
4977 * describes the position for the device in the array.
4978 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4979 * the new phys_refnum and lba_offset values.
4980 *
4981 * Only worry about BVDs at the moment.
4982 */
4983 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4984 struct metadata_update **updates)
4985 {
4986 int working = 0;
4987 struct mdinfo *d;
4988 struct ddf_super *ddf = a->container->sb;
4989 int global_ok = 0;
4990 struct mdinfo *rv = NULL;
4991 struct mdinfo *di;
4992 struct metadata_update *mu;
4993 struct dl *dl;
4994 int i;
4995 unsigned int j;
4996 struct vcl *vcl;
4997 struct vd_config *vc;
4998 unsigned int n_bvd;
4999
5000 for (d = a->info.devs ; d ; d = d->next) {
5001 if ((d->curr_state & DS_FAULTY) &&
5002 d->state_fd >= 0)
5003 /* wait for Removal to happen */
5004 return NULL;
5005 if (d->state_fd >= 0)
5006 working ++;
5007 }
5008
5009 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
5010 a->info.array.raid_disks,
5011 a->info.array.level);
5012 if (working == a->info.array.raid_disks)
5013 return NULL; /* array not degraded */
5014 switch (a->info.array.level) {
5015 case 1:
5016 if (working == 0)
5017 return NULL; /* failed */
5018 break;
5019 case 4:
5020 case 5:
5021 if (working < a->info.array.raid_disks - 1)
5022 return NULL; /* failed */
5023 break;
5024 case 6:
5025 if (working < a->info.array.raid_disks - 2)
5026 return NULL; /* failed */
5027 break;
5028 case 10:
5029 if (raid10_degraded(&a->info) < 1)
5030 return NULL;
5031 break;
5032 default: /* concat or stripe */
5033 return NULL; /* failed */
5034 }
5035
5036 /* For each slot, if it is not working, find a spare */
5037 dl = ddf->dlist;
5038 for (i = 0; i < a->info.array.raid_disks; i++) {
5039 for (d = a->info.devs ; d ; d = d->next)
5040 if (d->disk.raid_disk == i)
5041 break;
5042 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
5043 if (d && (d->state_fd >= 0))
5044 continue;
5045
5046 /* OK, this device needs recovery. Find a spare */
5047 again:
5048 for ( ; dl ; dl = dl->next) {
5049 unsigned long long esize;
5050 unsigned long long pos;
5051 struct mdinfo *d2;
5052 int is_global = 0;
5053 int is_dedicated = 0;
5054 struct extent *ex;
5055 unsigned int j;
5056 be16 state;
5057
5058 if (dl->pdnum < 0)
5059 continue;
5060 state = ddf->phys->entries[dl->pdnum].state;
5061 if (be16_and(state,
5062 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
5063 !be16_and(state,
5064 cpu_to_be16(DDF_Online)))
5065 continue;
5066
5067 /* If in this array, skip */
5068 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
5069 if (d2->state_fd >= 0 &&
5070 d2->disk.major == dl->major &&
5071 d2->disk.minor == dl->minor) {
5072 dprintf("%x:%x (%08x) already in array\n",
5073 dl->major, dl->minor,
5074 be32_to_cpu(dl->disk.refnum));
5075 break;
5076 }
5077 if (d2)
5078 continue;
5079 if (be16_and(ddf->phys->entries[dl->pdnum].type,
5080 cpu_to_be16(DDF_Spare))) {
5081 /* Check spare assign record */
5082 if (dl->spare) {
5083 if (dl->spare->type & DDF_spare_dedicated) {
5084 /* check spare_ents for guid */
5085 for (j = 0 ;
5086 j < be16_to_cpu
5087 (dl->spare
5088 ->populated);
5089 j++) {
5090 if (memcmp(dl->spare->spare_ents[j].guid,
5091 ddf->virt->entries[a->info.container_member].guid,
5092 DDF_GUID_LEN) == 0)
5093 is_dedicated = 1;
5094 }
5095 } else
5096 is_global = 1;
5097 }
5098 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5099 cpu_to_be16(DDF_Global_Spare))) {
5100 is_global = 1;
5101 } else if (!be16_and(ddf->phys
5102 ->entries[dl->pdnum].state,
5103 cpu_to_be16(DDF_Failed))) {
5104 /* we can possibly use some of this */
5105 is_global = 1;
5106 }
5107 if ( ! (is_dedicated ||
5108 (is_global && global_ok))) {
5109 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5110 is_dedicated, is_global);
5111 continue;
5112 }
5113
5114 /* We are allowed to use this device - is there space?
5115 * We need a->info.component_size sectors */
5116 ex = get_extents(ddf, dl);
5117 if (!ex) {
5118 dprintf("cannot get extents\n");
5119 continue;
5120 }
5121 j = 0; pos = 0;
5122 esize = 0;
5123
5124 do {
5125 esize = ex[j].start - pos;
5126 if (esize >= a->info.component_size)
5127 break;
5128 pos = ex[j].start + ex[j].size;
5129 j++;
5130 } while (ex[j-1].size);
5131
5132 free(ex);
5133 if (esize < a->info.component_size) {
5134 dprintf("%x:%x has no room: %llu %llu\n",
5135 dl->major, dl->minor,
5136 esize, a->info.component_size);
5137 /* No room */
5138 continue;
5139 }
5140
5141 /* Cool, we have a device with some space at pos */
5142 di = xcalloc(1, sizeof(*di));
5143 di->disk.number = i;
5144 di->disk.raid_disk = i;
5145 di->disk.major = dl->major;
5146 di->disk.minor = dl->minor;
5147 di->disk.state = 0;
5148 di->recovery_start = 0;
5149 di->data_offset = pos;
5150 di->component_size = a->info.component_size;
5151 di->next = rv;
5152 rv = di;
5153 dprintf("%x:%x (%08x) to be %d at %llu\n",
5154 dl->major, dl->minor,
5155 be32_to_cpu(dl->disk.refnum), i, pos);
5156
5157 break;
5158 }
5159 if (!dl && ! global_ok) {
5160 /* not enough dedicated spares, try global */
5161 global_ok = 1;
5162 dl = ddf->dlist;
5163 goto again;
5164 }
5165 }
5166
5167 if (!rv)
5168 /* No spares found */
5169 return rv;
5170 /* Now 'rv' has a list of devices to return.
5171 * Create a metadata_update record to update the
5172 * phys_refnum and lba_offset values
5173 */
5174 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5175 &n_bvd, &vcl);
5176 if (vc == NULL)
5177 return NULL;
5178
5179 mu = xmalloc(sizeof(*mu));
5180 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5181 free(mu);
5182 mu = NULL;
5183 }
5184
5185 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5186 mu->buf = xmalloc(mu->len);
5187 mu->space = NULL;
5188 mu->space_list = NULL;
5189 mu->next = *updates;
5190 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5191 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5192 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5193 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5194
5195 vc = (struct vd_config*)mu->buf;
5196 for (di = rv ; di ; di = di->next) {
5197 unsigned int i_sec, i_prim;
5198 i_sec = di->disk.raid_disk
5199 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5200 i_prim = di->disk.raid_disk
5201 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5202 vc = (struct vd_config *)(mu->buf
5203 + i_sec * ddf->conf_rec_len * 512);
5204 for (dl = ddf->dlist; dl; dl = dl->next)
5205 if (dl->major == di->disk.major
5206 && dl->minor == di->disk.minor)
5207 break;
5208 if (!dl || dl->pdnum < 0) {
5209 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5210 __func__, di->disk.raid_disk,
5211 di->disk.major, di->disk.minor);
5212 return NULL;
5213 }
5214 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5215 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5216 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5217 be32_to_cpu(vc->phys_refnum[i_prim]),
5218 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5219 }
5220 *updates = mu;
5221 return rv;
5222 }
5223 #endif /* MDASSEMBLE */
5224
5225 static int ddf_level_to_layout(int level)
5226 {
5227 switch(level) {
5228 case 0:
5229 case 1:
5230 return 0;
5231 case 5:
5232 return ALGORITHM_LEFT_SYMMETRIC;
5233 case 6:
5234 return ALGORITHM_ROTATING_N_CONTINUE;
5235 case 10:
5236 return 0x102;
5237 default:
5238 return UnSet;
5239 }
5240 }
5241
5242 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5243 {
5244 if (level && *level == UnSet)
5245 *level = LEVEL_CONTAINER;
5246
5247 if (level && layout && *layout == UnSet)
5248 *layout = ddf_level_to_layout(*level);
5249 }
5250
5251 struct superswitch super_ddf = {
5252 #ifndef MDASSEMBLE
5253 .examine_super = examine_super_ddf,
5254 .brief_examine_super = brief_examine_super_ddf,
5255 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5256 .export_examine_super = export_examine_super_ddf,
5257 .detail_super = detail_super_ddf,
5258 .brief_detail_super = brief_detail_super_ddf,
5259 .validate_geometry = validate_geometry_ddf,
5260 .write_init_super = write_init_super_ddf,
5261 .add_to_super = add_to_super_ddf,
5262 .remove_from_super = remove_from_super_ddf,
5263 .load_container = load_container_ddf,
5264 .copy_metadata = copy_metadata_ddf,
5265 .kill_subarray = kill_subarray_ddf,
5266 #endif
5267 .match_home = match_home_ddf,
5268 .uuid_from_super= uuid_from_super_ddf,
5269 .getinfo_super = getinfo_super_ddf,
5270 .update_super = update_super_ddf,
5271
5272 .avail_size = avail_size_ddf,
5273
5274 .compare_super = compare_super_ddf,
5275
5276 .load_super = load_super_ddf,
5277 .init_super = init_super_ddf,
5278 .store_super = store_super_ddf,
5279 .free_super = free_super_ddf,
5280 .match_metadata_desc = match_metadata_desc_ddf,
5281 .container_content = container_content_ddf,
5282 .default_geometry = default_geometry_ddf,
5283
5284 .external = 1,
5285
5286 #ifndef MDASSEMBLE
5287 /* for mdmon */
5288 .open_new = ddf_open_new,
5289 .set_array_state= ddf_set_array_state,
5290 .set_disk = ddf_set_disk,
5291 .sync_metadata = ddf_sync_metadata,
5292 .process_update = ddf_process_update,
5293 .prepare_update = ddf_prepare_update,
5294 .activate_spare = ddf_activate_spare,
5295 #endif
5296 .name = "ddf",
5297 };