]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: fix usage of ->used_pdes
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes; /* This is a counter, not a max - the list
233 * of used entries may not be dense */
234 be16 max_pdes;
235 __u8 pad[52];
236 struct phys_disk_entry {
237 char guid[DDF_GUID_LEN];
238 be32 refnum;
239 be16 type;
240 be16 state;
241 be64 config_size; /* DDF structures must be after here */
242 char path[18]; /* Another horrible structure really
243 * but is "used for information
244 * purposes only" */
245 __u8 pad[6];
246 } entries[0];
247 };
248
249 /* phys_disk_entry.type is a bitmap - bigendian remember */
250 #define DDF_Forced_PD_GUID 1
251 #define DDF_Active_in_VD 2
252 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
253 #define DDF_Spare 8 /* overrides Global_spare */
254 #define DDF_Foreign 16
255 #define DDF_Legacy 32 /* no DDF on this device */
256
257 #define DDF_Interface_mask 0xf00
258 #define DDF_Interface_SCSI 0x100
259 #define DDF_Interface_SAS 0x200
260 #define DDF_Interface_SATA 0x300
261 #define DDF_Interface_FC 0x400
262
263 /* phys_disk_entry.state is a bigendian bitmap */
264 #define DDF_Online 1
265 #define DDF_Failed 2 /* overrides 1,4,8 */
266 #define DDF_Rebuilding 4
267 #define DDF_Transition 8
268 #define DDF_SMART 16
269 #define DDF_ReadErrors 32
270 #define DDF_Missing 64
271
272 /* The content of the virt_section global scope */
273 struct virtual_disk {
274 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
275 be32 crc;
276 be16 populated_vdes;
277 be16 max_vdes;
278 __u8 pad[52];
279 struct virtual_entry {
280 char guid[DDF_GUID_LEN];
281 be16 unit;
282 __u16 pad0; /* 0xffff */
283 be16 guid_crc;
284 be16 type;
285 __u8 state;
286 __u8 init_state;
287 __u8 pad1[14];
288 char name[16];
289 } entries[0];
290 };
291
292 /* virtual_entry.type is a bitmap - bigendian */
293 #define DDF_Shared 1
294 #define DDF_Enforce_Groups 2
295 #define DDF_Unicode 4
296 #define DDF_Owner_Valid 8
297
298 /* virtual_entry.state is a bigendian bitmap */
299 #define DDF_state_mask 0x7
300 #define DDF_state_optimal 0x0
301 #define DDF_state_degraded 0x1
302 #define DDF_state_deleted 0x2
303 #define DDF_state_missing 0x3
304 #define DDF_state_failed 0x4
305 #define DDF_state_part_optimal 0x5
306
307 #define DDF_state_morphing 0x8
308 #define DDF_state_inconsistent 0x10
309
310 /* virtual_entry.init_state is a bigendian bitmap */
311 #define DDF_initstate_mask 0x03
312 #define DDF_init_not 0x00
313 #define DDF_init_quick 0x01 /* initialisation is progress.
314 * i.e. 'state_inconsistent' */
315 #define DDF_init_full 0x02
316
317 #define DDF_access_mask 0xc0
318 #define DDF_access_rw 0x00
319 #define DDF_access_ro 0x80
320 #define DDF_access_blocked 0xc0
321
322 /* The content of the config_section - local scope
323 * It has multiple records each config_record_len sectors
324 * They can be vd_config or spare_assign
325 */
326
327 struct vd_config {
328 be32 magic; /* DDF_VD_CONF_MAGIC */
329 be32 crc;
330 char guid[DDF_GUID_LEN];
331 be32 timestamp;
332 be32 seqnum;
333 __u8 pad0[24];
334 be16 prim_elmnt_count;
335 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
336 __u8 prl;
337 __u8 rlq;
338 __u8 sec_elmnt_count;
339 __u8 sec_elmnt_seq;
340 __u8 srl;
341 be64 blocks; /* blocks per component could be different
342 * on different component devices...(only
343 * for concat I hope) */
344 be64 array_blocks; /* blocks in array */
345 __u8 pad1[8];
346 be32 spare_refs[8];
347 __u8 cache_pol[8];
348 __u8 bg_rate;
349 __u8 pad2[3];
350 __u8 pad3[52];
351 __u8 pad4[192];
352 __u8 v0[32]; /* reserved- 0xff */
353 __u8 v1[32]; /* reserved- 0xff */
354 __u8 v2[16]; /* reserved- 0xff */
355 __u8 v3[16]; /* reserved- 0xff */
356 __u8 vendor[32];
357 be32 phys_refnum[0]; /* refnum of each disk in sequence */
358 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
359 bvd are always the same size */
360 };
361 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
362
363 /* vd_config.cache_pol[7] is a bitmap */
364 #define DDF_cache_writeback 1 /* else writethrough */
365 #define DDF_cache_wadaptive 2 /* only applies if writeback */
366 #define DDF_cache_readahead 4
367 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
368 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
369 #define DDF_cache_wallowed 32 /* enable write caching */
370 #define DDF_cache_rallowed 64 /* enable read caching */
371
372 struct spare_assign {
373 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
374 be32 crc;
375 be32 timestamp;
376 __u8 reserved[7];
377 __u8 type;
378 be16 populated; /* SAEs used */
379 be16 max; /* max SAEs */
380 __u8 pad[8];
381 struct spare_assign_entry {
382 char guid[DDF_GUID_LEN];
383 be16 secondary_element;
384 __u8 pad[6];
385 } spare_ents[0];
386 };
387 /* spare_assign.type is a bitmap */
388 #define DDF_spare_dedicated 0x1 /* else global */
389 #define DDF_spare_revertible 0x2 /* else committable */
390 #define DDF_spare_active 0x4 /* else not active */
391 #define DDF_spare_affinity 0x8 /* enclosure affinity */
392
393 /* The data_section contents - local scope */
394 struct disk_data {
395 be32 magic; /* DDF_PHYS_DATA_MAGIC */
396 be32 crc;
397 char guid[DDF_GUID_LEN];
398 be32 refnum; /* crc of some magic drive data ... */
399 __u8 forced_ref; /* set when above was not result of magic */
400 __u8 forced_guid; /* set if guid was forced rather than magic */
401 __u8 vendor[32];
402 __u8 pad[442];
403 };
404
405 /* bbm_section content */
406 struct bad_block_log {
407 be32 magic;
408 be32 crc;
409 be16 entry_count;
410 be32 spare_count;
411 __u8 pad[10];
412 be64 first_spare;
413 struct mapped_block {
414 be64 defective_start;
415 be32 replacement_start;
416 be16 remap_count;
417 __u8 pad[2];
418 } entries[0];
419 };
420
421 /* Struct for internally holding ddf structures */
422 /* The DDF structure stored on each device is potentially
423 * quite different, as some data is global and some is local.
424 * The global data is:
425 * - ddf header
426 * - controller_data
427 * - Physical disk records
428 * - Virtual disk records
429 * The local data is:
430 * - Configuration records
431 * - Physical Disk data section
432 * ( and Bad block and vendor which I don't care about yet).
433 *
434 * The local data is parsed into separate lists as it is read
435 * and reconstructed for writing. This means that we only need
436 * to make config changes once and they are automatically
437 * propagated to all devices.
438 * The global (config and disk data) records are each in a list
439 * of separate data structures. When writing we find the entry
440 * or entries applicable to the particular device.
441 */
442 struct ddf_super {
443 struct ddf_header anchor, primary, secondary;
444 struct ddf_controller_data controller;
445 struct ddf_header *active;
446 struct phys_disk *phys;
447 struct virtual_disk *virt;
448 char *conf;
449 int pdsize, vdsize;
450 unsigned int max_part, mppe, conf_rec_len;
451 int currentdev;
452 int updates_pending;
453 struct vcl {
454 union {
455 char space[512];
456 struct {
457 struct vcl *next;
458 unsigned int vcnum; /* index into ->virt */
459 /* For an array with a secondary level there are
460 * multiple vd_config structures, all with the same
461 * guid but with different sec_elmnt_seq.
462 * One of these structures is in 'conf' below.
463 * The others are in other_bvds, not in any
464 * particular order.
465 */
466 struct vd_config **other_bvds;
467 __u64 *block_sizes; /* NULL if all the same */
468 };
469 };
470 struct vd_config conf;
471 } *conflist, *currentconf;
472 struct dl {
473 union {
474 char space[512];
475 struct {
476 struct dl *next;
477 int major, minor;
478 char *devname;
479 int fd;
480 unsigned long long size; /* sectors */
481 be64 primary_lba; /* sectors */
482 be64 secondary_lba; /* sectors */
483 be64 workspace_lba; /* sectors */
484 int pdnum; /* index in ->phys */
485 struct spare_assign *spare;
486 void *mdupdate; /* hold metadata update */
487
488 /* These fields used by auto-layout */
489 int raiddisk; /* slot to fill in autolayout */
490 __u64 esize;
491 };
492 };
493 struct disk_data disk;
494 struct vcl *vlist[0]; /* max_part in size */
495 } *dlist, *add_list;
496 };
497
498 #ifndef MDASSEMBLE
499 static int load_super_ddf_all(struct supertype *st, int fd,
500 void **sbp, char *devname);
501 static int get_svd_state(const struct ddf_super *, const struct vcl *);
502 static int
503 validate_geometry_ddf_container(struct supertype *st,
504 int level, int layout, int raiddisks,
505 int chunk, unsigned long long size,
506 unsigned long long data_offset,
507 char *dev, unsigned long long *freesize,
508 int verbose);
509
510 static int validate_geometry_ddf_bvd(struct supertype *st,
511 int level, int layout, int raiddisks,
512 int *chunk, unsigned long long size,
513 unsigned long long data_offset,
514 char *dev, unsigned long long *freesize,
515 int verbose);
516 #endif
517
518 static void free_super_ddf(struct supertype *st);
519 static int all_ff(const char *guid);
520 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
521 be32 refnum, unsigned int nmax,
522 const struct vd_config **bvd,
523 unsigned int *idx);
524 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
525 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
526 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
527 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
528 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
529 static int init_super_ddf_bvd(struct supertype *st,
530 mdu_array_info_t *info,
531 unsigned long long size,
532 char *name, char *homehost,
533 int *uuid, unsigned long long data_offset);
534
535 #ifndef offsetof
536 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
537 #endif
538
539 #if DEBUG
540 static void pr_state(struct ddf_super *ddf, const char *msg)
541 {
542 unsigned int i;
543 dprintf("%s/%s: ", __func__, msg);
544 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
545 if (all_ff(ddf->virt->entries[i].guid))
546 continue;
547 dprintf("%u(s=%02x i=%02x) ", i,
548 ddf->virt->entries[i].state,
549 ddf->virt->entries[i].init_state);
550 }
551 dprintf("\n");
552 }
553 #else
554 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
555 #endif
556
557 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
558 {
559 if (ddf->updates_pending)
560 return;
561 ddf->updates_pending = 1;
562 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
563 pr_state(ddf, func);
564 }
565
566 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
567
568 static be32 calc_crc(void *buf, int len)
569 {
570 /* crcs are always at the same place as in the ddf_header */
571 struct ddf_header *ddf = buf;
572 be32 oldcrc = ddf->crc;
573 __u32 newcrc;
574 ddf->crc = cpu_to_be32(0xffffffff);
575
576 newcrc = crc32(0, buf, len);
577 ddf->crc = oldcrc;
578 /* The crc is stored (like everything) bigendian, so convert
579 * here for simplicity
580 */
581 return cpu_to_be32(newcrc);
582 }
583
584 #define DDF_INVALID_LEVEL 0xff
585 #define DDF_NO_SECONDARY 0xff
586 static int err_bad_md_layout(const mdu_array_info_t *array)
587 {
588 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
589 array->level, array->layout, array->raid_disks);
590 return -1;
591 }
592
593 static int layout_md2ddf(const mdu_array_info_t *array,
594 struct vd_config *conf)
595 {
596 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
597 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
598 __u8 sec_elmnt_count = 1;
599 __u8 srl = DDF_NO_SECONDARY;
600
601 switch (array->level) {
602 case LEVEL_LINEAR:
603 prl = DDF_CONCAT;
604 break;
605 case 0:
606 rlq = DDF_RAID0_SIMPLE;
607 prl = DDF_RAID0;
608 break;
609 case 1:
610 switch (array->raid_disks) {
611 case 2:
612 rlq = DDF_RAID1_SIMPLE;
613 break;
614 case 3:
615 rlq = DDF_RAID1_MULTI;
616 break;
617 default:
618 return err_bad_md_layout(array);
619 }
620 prl = DDF_RAID1;
621 break;
622 case 4:
623 if (array->layout != 0)
624 return err_bad_md_layout(array);
625 rlq = DDF_RAID4_N;
626 prl = DDF_RAID4;
627 break;
628 case 5:
629 switch (array->layout) {
630 case ALGORITHM_LEFT_ASYMMETRIC:
631 rlq = DDF_RAID5_N_RESTART;
632 break;
633 case ALGORITHM_RIGHT_ASYMMETRIC:
634 rlq = DDF_RAID5_0_RESTART;
635 break;
636 case ALGORITHM_LEFT_SYMMETRIC:
637 rlq = DDF_RAID5_N_CONTINUE;
638 break;
639 case ALGORITHM_RIGHT_SYMMETRIC:
640 /* not mentioned in standard */
641 default:
642 return err_bad_md_layout(array);
643 }
644 prl = DDF_RAID5;
645 break;
646 case 6:
647 switch (array->layout) {
648 case ALGORITHM_ROTATING_N_RESTART:
649 rlq = DDF_RAID5_N_RESTART;
650 break;
651 case ALGORITHM_ROTATING_ZERO_RESTART:
652 rlq = DDF_RAID6_0_RESTART;
653 break;
654 case ALGORITHM_ROTATING_N_CONTINUE:
655 rlq = DDF_RAID5_N_CONTINUE;
656 break;
657 default:
658 return err_bad_md_layout(array);
659 }
660 prl = DDF_RAID6;
661 break;
662 case 10:
663 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
664 rlq = DDF_RAID1_SIMPLE;
665 prim_elmnt_count = cpu_to_be16(2);
666 sec_elmnt_count = array->raid_disks / 2;
667 } else if (array->raid_disks % 3 == 0
668 && array->layout == 0x103) {
669 rlq = DDF_RAID1_MULTI;
670 prim_elmnt_count = cpu_to_be16(3);
671 sec_elmnt_count = array->raid_disks / 3;
672 } else
673 return err_bad_md_layout(array);
674 srl = DDF_2SPANNED;
675 prl = DDF_RAID1;
676 break;
677 default:
678 return err_bad_md_layout(array);
679 }
680 conf->prl = prl;
681 conf->prim_elmnt_count = prim_elmnt_count;
682 conf->rlq = rlq;
683 conf->srl = srl;
684 conf->sec_elmnt_count = sec_elmnt_count;
685 return 0;
686 }
687
688 static int err_bad_ddf_layout(const struct vd_config *conf)
689 {
690 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
691 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
692 return -1;
693 }
694
695 static int layout_ddf2md(const struct vd_config *conf,
696 mdu_array_info_t *array)
697 {
698 int level = LEVEL_UNSUPPORTED;
699 int layout = 0;
700 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
701
702 if (conf->sec_elmnt_count > 1) {
703 /* see also check_secondary() */
704 if (conf->prl != DDF_RAID1 ||
705 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
706 pr_err("Unsupported secondary RAID level %u/%u\n",
707 conf->prl, conf->srl);
708 return -1;
709 }
710 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
711 layout = 0x102;
712 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
713 layout = 0x103;
714 else
715 return err_bad_ddf_layout(conf);
716 raiddisks *= conf->sec_elmnt_count;
717 level = 10;
718 goto good;
719 }
720
721 switch (conf->prl) {
722 case DDF_CONCAT:
723 level = LEVEL_LINEAR;
724 break;
725 case DDF_RAID0:
726 if (conf->rlq != DDF_RAID0_SIMPLE)
727 return err_bad_ddf_layout(conf);
728 level = 0;
729 break;
730 case DDF_RAID1:
731 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
732 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
733 return err_bad_ddf_layout(conf);
734 level = 1;
735 break;
736 case DDF_RAID4:
737 if (conf->rlq != DDF_RAID4_N)
738 return err_bad_ddf_layout(conf);
739 level = 4;
740 break;
741 case DDF_RAID5:
742 switch (conf->rlq) {
743 case DDF_RAID5_N_RESTART:
744 layout = ALGORITHM_LEFT_ASYMMETRIC;
745 break;
746 case DDF_RAID5_0_RESTART:
747 layout = ALGORITHM_RIGHT_ASYMMETRIC;
748 break;
749 case DDF_RAID5_N_CONTINUE:
750 layout = ALGORITHM_LEFT_SYMMETRIC;
751 break;
752 default:
753 return err_bad_ddf_layout(conf);
754 }
755 level = 5;
756 break;
757 case DDF_RAID6:
758 switch (conf->rlq) {
759 case DDF_RAID5_N_RESTART:
760 layout = ALGORITHM_ROTATING_N_RESTART;
761 break;
762 case DDF_RAID6_0_RESTART:
763 layout = ALGORITHM_ROTATING_ZERO_RESTART;
764 break;
765 case DDF_RAID5_N_CONTINUE:
766 layout = ALGORITHM_ROTATING_N_CONTINUE;
767 break;
768 default:
769 return err_bad_ddf_layout(conf);
770 }
771 level = 6;
772 break;
773 default:
774 return err_bad_ddf_layout(conf);
775 };
776
777 good:
778 array->level = level;
779 array->layout = layout;
780 array->raid_disks = raiddisks;
781 return 0;
782 }
783
784 static int load_ddf_header(int fd, unsigned long long lba,
785 unsigned long long size,
786 int type,
787 struct ddf_header *hdr, struct ddf_header *anchor)
788 {
789 /* read a ddf header (primary or secondary) from fd/lba
790 * and check that it is consistent with anchor
791 * Need to check:
792 * magic, crc, guid, rev, and LBA's header_type, and
793 * everything after header_type must be the same
794 */
795 if (lba >= size-1)
796 return 0;
797
798 if (lseek64(fd, lba<<9, 0) < 0)
799 return 0;
800
801 if (read(fd, hdr, 512) != 512)
802 return 0;
803
804 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
805 pr_err("%s: bad header magic\n", __func__);
806 return 0;
807 }
808 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
809 pr_err("%s: bad CRC\n", __func__);
810 return 0;
811 }
812 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
813 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
814 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
815 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
816 hdr->type != type ||
817 memcmp(anchor->pad2, hdr->pad2, 512 -
818 offsetof(struct ddf_header, pad2)) != 0) {
819 pr_err("%s: header mismatch\n", __func__);
820 return 0;
821 }
822
823 /* Looks good enough to me... */
824 return 1;
825 }
826
827 static void *load_section(int fd, struct ddf_super *super, void *buf,
828 be32 offset_be, be32 len_be, int check)
829 {
830 unsigned long long offset = be32_to_cpu(offset_be);
831 unsigned long long len = be32_to_cpu(len_be);
832 int dofree = (buf == NULL);
833
834 if (check)
835 if (len != 2 && len != 8 && len != 32
836 && len != 128 && len != 512)
837 return NULL;
838
839 if (len > 1024)
840 return NULL;
841 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
842 buf = NULL;
843
844 if (!buf)
845 return NULL;
846
847 if (super->active->type == 1)
848 offset += be64_to_cpu(super->active->primary_lba);
849 else
850 offset += be64_to_cpu(super->active->secondary_lba);
851
852 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
853 if (dofree)
854 free(buf);
855 return NULL;
856 }
857 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
858 if (dofree)
859 free(buf);
860 return NULL;
861 }
862 return buf;
863 }
864
865 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
866 {
867 unsigned long long dsize;
868
869 get_dev_size(fd, NULL, &dsize);
870
871 if (lseek64(fd, dsize-512, 0) < 0) {
872 if (devname)
873 pr_err("Cannot seek to anchor block on %s: %s\n",
874 devname, strerror(errno));
875 return 1;
876 }
877 if (read(fd, &super->anchor, 512) != 512) {
878 if (devname)
879 pr_err("Cannot read anchor block on %s: %s\n",
880 devname, strerror(errno));
881 return 1;
882 }
883 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
884 if (devname)
885 pr_err("no DDF anchor found on %s\n",
886 devname);
887 return 2;
888 }
889 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
890 if (devname)
891 pr_err("bad CRC on anchor on %s\n",
892 devname);
893 return 2;
894 }
895 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
896 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
897 if (devname)
898 pr_err("can only support super revision"
899 " %.8s and earlier, not %.8s on %s\n",
900 DDF_REVISION_2, super->anchor.revision,devname);
901 return 2;
902 }
903 super->active = NULL;
904 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
905 dsize >> 9, 1,
906 &super->primary, &super->anchor) == 0) {
907 if (devname)
908 pr_err("Failed to load primary DDF header "
909 "on %s\n", devname);
910 } else
911 super->active = &super->primary;
912
913 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
914 dsize >> 9, 2,
915 &super->secondary, &super->anchor)) {
916 if (super->active == NULL
917 || (be32_to_cpu(super->primary.seq)
918 < be32_to_cpu(super->secondary.seq) &&
919 !super->secondary.openflag)
920 || (be32_to_cpu(super->primary.seq)
921 == be32_to_cpu(super->secondary.seq) &&
922 super->primary.openflag && !super->secondary.openflag)
923 )
924 super->active = &super->secondary;
925 } else if (devname &&
926 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
927 pr_err("Failed to load secondary DDF header on %s\n",
928 devname);
929 if (super->active == NULL)
930 return 2;
931 return 0;
932 }
933
934 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
935 {
936 void *ok;
937 ok = load_section(fd, super, &super->controller,
938 super->active->controller_section_offset,
939 super->active->controller_section_length,
940 0);
941 super->phys = load_section(fd, super, NULL,
942 super->active->phys_section_offset,
943 super->active->phys_section_length,
944 1);
945 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
946
947 super->virt = load_section(fd, super, NULL,
948 super->active->virt_section_offset,
949 super->active->virt_section_length,
950 1);
951 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
952 if (!ok ||
953 !super->phys ||
954 !super->virt) {
955 free(super->phys);
956 free(super->virt);
957 super->phys = NULL;
958 super->virt = NULL;
959 return 2;
960 }
961 super->conflist = NULL;
962 super->dlist = NULL;
963
964 super->max_part = be16_to_cpu(super->active->max_partitions);
965 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
966 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
967 return 0;
968 }
969
970 #define DDF_UNUSED_BVD 0xff
971 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
972 {
973 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
974 unsigned int i, vdsize;
975 void *p;
976 if (n_vds == 0) {
977 vcl->other_bvds = NULL;
978 return 0;
979 }
980 vdsize = ddf->conf_rec_len * 512;
981 if (posix_memalign(&p, 512, n_vds *
982 (vdsize + sizeof(struct vd_config *))) != 0)
983 return -1;
984 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
985 for (i = 0; i < n_vds; i++) {
986 vcl->other_bvds[i] = p + i * vdsize;
987 memset(vcl->other_bvds[i], 0, vdsize);
988 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
989 }
990 return 0;
991 }
992
993 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
994 unsigned int len)
995 {
996 int i;
997 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
998 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
999 break;
1000
1001 if (i < vcl->conf.sec_elmnt_count-1) {
1002 if (be32_to_cpu(vd->seqnum) <=
1003 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1004 return;
1005 } else {
1006 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1007 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1008 break;
1009 if (i == vcl->conf.sec_elmnt_count-1) {
1010 pr_err("no space for sec level config %u, count is %u\n",
1011 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1012 return;
1013 }
1014 }
1015 memcpy(vcl->other_bvds[i], vd, len);
1016 }
1017
1018 static int load_ddf_local(int fd, struct ddf_super *super,
1019 char *devname, int keep)
1020 {
1021 struct dl *dl;
1022 struct stat stb;
1023 char *conf;
1024 unsigned int i;
1025 unsigned int confsec;
1026 int vnum;
1027 unsigned int max_virt_disks =
1028 be16_to_cpu(super->active->max_vd_entries);
1029 unsigned long long dsize;
1030
1031 /* First the local disk info */
1032 if (posix_memalign((void**)&dl, 512,
1033 sizeof(*dl) +
1034 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1035 pr_err("%s could not allocate disk info buffer\n",
1036 __func__);
1037 return 1;
1038 }
1039
1040 load_section(fd, super, &dl->disk,
1041 super->active->data_section_offset,
1042 super->active->data_section_length,
1043 0);
1044 dl->devname = devname ? xstrdup(devname) : NULL;
1045
1046 fstat(fd, &stb);
1047 dl->major = major(stb.st_rdev);
1048 dl->minor = minor(stb.st_rdev);
1049 dl->next = super->dlist;
1050 dl->fd = keep ? fd : -1;
1051
1052 dl->size = 0;
1053 if (get_dev_size(fd, devname, &dsize))
1054 dl->size = dsize >> 9;
1055 /* If the disks have different sizes, the LBAs will differ
1056 * between phys disks.
1057 * At this point here, the values in super->active must be valid
1058 * for this phys disk. */
1059 dl->primary_lba = super->active->primary_lba;
1060 dl->secondary_lba = super->active->secondary_lba;
1061 dl->workspace_lba = super->active->workspace_lba;
1062 dl->spare = NULL;
1063 for (i = 0 ; i < super->max_part ; i++)
1064 dl->vlist[i] = NULL;
1065 super->dlist = dl;
1066 dl->pdnum = -1;
1067 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1068 if (memcmp(super->phys->entries[i].guid,
1069 dl->disk.guid, DDF_GUID_LEN) == 0)
1070 dl->pdnum = i;
1071
1072 /* Now the config list. */
1073 /* 'conf' is an array of config entries, some of which are
1074 * probably invalid. Those which are good need to be copied into
1075 * the conflist
1076 */
1077
1078 conf = load_section(fd, super, super->conf,
1079 super->active->config_section_offset,
1080 super->active->config_section_length,
1081 0);
1082 super->conf = conf;
1083 vnum = 0;
1084 for (confsec = 0;
1085 confsec < be32_to_cpu(super->active->config_section_length);
1086 confsec += super->conf_rec_len) {
1087 struct vd_config *vd =
1088 (struct vd_config *)((char*)conf + confsec*512);
1089 struct vcl *vcl;
1090
1091 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1092 if (dl->spare)
1093 continue;
1094 if (posix_memalign((void**)&dl->spare, 512,
1095 super->conf_rec_len*512) != 0) {
1096 pr_err("%s could not allocate spare info buf\n",
1097 __func__);
1098 return 1;
1099 }
1100
1101 memcpy(dl->spare, vd, super->conf_rec_len*512);
1102 continue;
1103 }
1104 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1105 /* Must be vendor-unique - I cannot handle those */
1106 continue;
1107
1108 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1109 if (memcmp(vcl->conf.guid,
1110 vd->guid, DDF_GUID_LEN) == 0)
1111 break;
1112 }
1113
1114 if (vcl) {
1115 dl->vlist[vnum++] = vcl;
1116 if (vcl->other_bvds != NULL &&
1117 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1118 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1119 continue;
1120 }
1121 if (be32_to_cpu(vd->seqnum) <=
1122 be32_to_cpu(vcl->conf.seqnum))
1123 continue;
1124 } else {
1125 if (posix_memalign((void**)&vcl, 512,
1126 (super->conf_rec_len*512 +
1127 offsetof(struct vcl, conf))) != 0) {
1128 pr_err("%s could not allocate vcl buf\n",
1129 __func__);
1130 return 1;
1131 }
1132 vcl->next = super->conflist;
1133 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1134 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1135 if (alloc_other_bvds(super, vcl) != 0) {
1136 pr_err("%s could not allocate other bvds\n",
1137 __func__);
1138 free(vcl);
1139 return 1;
1140 };
1141 super->conflist = vcl;
1142 dl->vlist[vnum++] = vcl;
1143 }
1144 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1145 for (i=0; i < max_virt_disks ; i++)
1146 if (memcmp(super->virt->entries[i].guid,
1147 vcl->conf.guid, DDF_GUID_LEN)==0)
1148 break;
1149 if (i < max_virt_disks)
1150 vcl->vcnum = i;
1151 }
1152
1153 return 0;
1154 }
1155
1156 static int load_super_ddf(struct supertype *st, int fd,
1157 char *devname)
1158 {
1159 unsigned long long dsize;
1160 struct ddf_super *super;
1161 int rv;
1162
1163 if (get_dev_size(fd, devname, &dsize) == 0)
1164 return 1;
1165
1166 if (test_partition(fd))
1167 /* DDF is not allowed on partitions */
1168 return 1;
1169
1170 /* 32M is a lower bound */
1171 if (dsize <= 32*1024*1024) {
1172 if (devname)
1173 pr_err("%s is too small for ddf: "
1174 "size is %llu sectors.\n",
1175 devname, dsize>>9);
1176 return 1;
1177 }
1178 if (dsize & 511) {
1179 if (devname)
1180 pr_err("%s is an odd size for ddf: "
1181 "size is %llu bytes.\n",
1182 devname, dsize);
1183 return 1;
1184 }
1185
1186 free_super_ddf(st);
1187
1188 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1189 pr_err("malloc of %zu failed.\n",
1190 sizeof(*super));
1191 return 1;
1192 }
1193 memset(super, 0, sizeof(*super));
1194
1195 rv = load_ddf_headers(fd, super, devname);
1196 if (rv) {
1197 free(super);
1198 return rv;
1199 }
1200
1201 /* Have valid headers and have chosen the best. Let's read in the rest*/
1202
1203 rv = load_ddf_global(fd, super, devname);
1204
1205 if (rv) {
1206 if (devname)
1207 pr_err("Failed to load all information "
1208 "sections on %s\n", devname);
1209 free(super);
1210 return rv;
1211 }
1212
1213 rv = load_ddf_local(fd, super, devname, 0);
1214
1215 if (rv) {
1216 if (devname)
1217 pr_err("Failed to load all information "
1218 "sections on %s\n", devname);
1219 free(super);
1220 return rv;
1221 }
1222
1223 /* Should possibly check the sections .... */
1224
1225 st->sb = super;
1226 if (st->ss == NULL) {
1227 st->ss = &super_ddf;
1228 st->minor_version = 0;
1229 st->max_devs = 512;
1230 }
1231 return 0;
1232
1233 }
1234
1235 static void free_super_ddf(struct supertype *st)
1236 {
1237 struct ddf_super *ddf = st->sb;
1238 if (ddf == NULL)
1239 return;
1240 free(ddf->phys);
1241 free(ddf->virt);
1242 free(ddf->conf);
1243 while (ddf->conflist) {
1244 struct vcl *v = ddf->conflist;
1245 ddf->conflist = v->next;
1246 if (v->block_sizes)
1247 free(v->block_sizes);
1248 if (v->other_bvds)
1249 /*
1250 v->other_bvds[0] points to beginning of buffer,
1251 see alloc_other_bvds()
1252 */
1253 free(v->other_bvds[0]);
1254 free(v);
1255 }
1256 while (ddf->dlist) {
1257 struct dl *d = ddf->dlist;
1258 ddf->dlist = d->next;
1259 if (d->fd >= 0)
1260 close(d->fd);
1261 if (d->spare)
1262 free(d->spare);
1263 free(d);
1264 }
1265 while (ddf->add_list) {
1266 struct dl *d = ddf->add_list;
1267 ddf->add_list = d->next;
1268 if (d->fd >= 0)
1269 close(d->fd);
1270 if (d->spare)
1271 free(d->spare);
1272 free(d);
1273 }
1274 free(ddf);
1275 st->sb = NULL;
1276 }
1277
1278 static struct supertype *match_metadata_desc_ddf(char *arg)
1279 {
1280 /* 'ddf' only supports containers */
1281 struct supertype *st;
1282 if (strcmp(arg, "ddf") != 0 &&
1283 strcmp(arg, "default") != 0
1284 )
1285 return NULL;
1286
1287 st = xcalloc(1, sizeof(*st));
1288 st->ss = &super_ddf;
1289 st->max_devs = 512;
1290 st->minor_version = 0;
1291 st->sb = NULL;
1292 return st;
1293 }
1294
1295 #ifndef MDASSEMBLE
1296
1297 static mapping_t ddf_state[] = {
1298 { "Optimal", 0},
1299 { "Degraded", 1},
1300 { "Deleted", 2},
1301 { "Missing", 3},
1302 { "Failed", 4},
1303 { "Partially Optimal", 5},
1304 { "-reserved-", 6},
1305 { "-reserved-", 7},
1306 { NULL, 0}
1307 };
1308
1309 static mapping_t ddf_init_state[] = {
1310 { "Not Initialised", 0},
1311 { "QuickInit in Progress", 1},
1312 { "Fully Initialised", 2},
1313 { "*UNKNOWN*", 3},
1314 { NULL, 0}
1315 };
1316 static mapping_t ddf_access[] = {
1317 { "Read/Write", 0},
1318 { "Reserved", 1},
1319 { "Read Only", 2},
1320 { "Blocked (no access)", 3},
1321 { NULL ,0}
1322 };
1323
1324 static mapping_t ddf_level[] = {
1325 { "RAID0", DDF_RAID0},
1326 { "RAID1", DDF_RAID1},
1327 { "RAID3", DDF_RAID3},
1328 { "RAID4", DDF_RAID4},
1329 { "RAID5", DDF_RAID5},
1330 { "RAID1E",DDF_RAID1E},
1331 { "JBOD", DDF_JBOD},
1332 { "CONCAT",DDF_CONCAT},
1333 { "RAID5E",DDF_RAID5E},
1334 { "RAID5EE",DDF_RAID5EE},
1335 { "RAID6", DDF_RAID6},
1336 { NULL, 0}
1337 };
1338 static mapping_t ddf_sec_level[] = {
1339 { "Striped", DDF_2STRIPED},
1340 { "Mirrored", DDF_2MIRRORED},
1341 { "Concat", DDF_2CONCAT},
1342 { "Spanned", DDF_2SPANNED},
1343 { NULL, 0}
1344 };
1345 #endif
1346
1347 static int all_ff(const char *guid)
1348 {
1349 int i;
1350 for (i = 0; i < DDF_GUID_LEN; i++)
1351 if (guid[i] != (char)0xff)
1352 return 0;
1353 return 1;
1354 }
1355
1356 static const char *guid_str(const char *guid)
1357 {
1358 static char buf[DDF_GUID_LEN*2+1];
1359 int i;
1360 char *p = buf;
1361 for (i = 0; i < DDF_GUID_LEN; i++) {
1362 unsigned char c = guid[i];
1363 if (c >= 32 && c < 127)
1364 p += sprintf(p, "%c", c);
1365 else
1366 p += sprintf(p, "%02x", c);
1367 }
1368 *p = '\0';
1369 return (const char *) buf;
1370 }
1371
1372 #ifndef MDASSEMBLE
1373 static void print_guid(char *guid, int tstamp)
1374 {
1375 /* A GUIDs are part (or all) ASCII and part binary.
1376 * They tend to be space padded.
1377 * We print the GUID in HEX, then in parentheses add
1378 * any initial ASCII sequence, and a possible
1379 * time stamp from bytes 16-19
1380 */
1381 int l = DDF_GUID_LEN;
1382 int i;
1383
1384 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1385 if ((i&3)==0 && i != 0) printf(":");
1386 printf("%02X", guid[i]&255);
1387 }
1388
1389 printf("\n (");
1390 while (l && guid[l-1] == ' ')
1391 l--;
1392 for (i=0 ; i<l ; i++) {
1393 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1394 fputc(guid[i], stdout);
1395 else
1396 break;
1397 }
1398 if (tstamp) {
1399 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1400 char tbuf[100];
1401 struct tm *tm;
1402 tm = localtime(&then);
1403 strftime(tbuf, 100, " %D %T",tm);
1404 fputs(tbuf, stdout);
1405 }
1406 printf(")");
1407 }
1408
1409 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1410 {
1411 int crl = sb->conf_rec_len;
1412 struct vcl *vcl;
1413
1414 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1415 unsigned int i;
1416 struct vd_config *vc = &vcl->conf;
1417
1418 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1419 continue;
1420 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1421 continue;
1422
1423 /* Ok, we know about this VD, let's give more details */
1424 printf(" Raid Devices[%d] : %d (", n,
1425 be16_to_cpu(vc->prim_elmnt_count));
1426 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1427 int j;
1428 int cnt = be16_to_cpu(sb->phys->max_pdes);
1429 for (j=0; j<cnt; j++)
1430 if (be32_eq(vc->phys_refnum[i],
1431 sb->phys->entries[j].refnum))
1432 break;
1433 if (i) printf(" ");
1434 if (j < cnt)
1435 printf("%d", j);
1436 else
1437 printf("--");
1438 }
1439 printf(")\n");
1440 if (vc->chunk_shift != 255)
1441 printf(" Chunk Size[%d] : %d sectors\n", n,
1442 1 << vc->chunk_shift);
1443 printf(" Raid Level[%d] : %s\n", n,
1444 map_num(ddf_level, vc->prl)?:"-unknown-");
1445 if (vc->sec_elmnt_count != 1) {
1446 printf(" Secondary Position[%d] : %d of %d\n", n,
1447 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1448 printf(" Secondary Level[%d] : %s\n", n,
1449 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1450 }
1451 printf(" Device Size[%d] : %llu\n", n,
1452 be64_to_cpu(vc->blocks)/2);
1453 printf(" Array Size[%d] : %llu\n", n,
1454 be64_to_cpu(vc->array_blocks)/2);
1455 }
1456 }
1457
1458 static void examine_vds(struct ddf_super *sb)
1459 {
1460 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1461 unsigned int i;
1462 printf(" Virtual Disks : %d\n", cnt);
1463
1464 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1465 struct virtual_entry *ve = &sb->virt->entries[i];
1466 if (all_ff(ve->guid))
1467 continue;
1468 printf("\n");
1469 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1470 printf("\n");
1471 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1472 printf(" state[%d] : %s, %s%s\n", i,
1473 map_num(ddf_state, ve->state & 7),
1474 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1475 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1476 printf(" init state[%d] : %s\n", i,
1477 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1478 printf(" access[%d] : %s\n", i,
1479 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1480 printf(" Name[%d] : %.16s\n", i, ve->name);
1481 examine_vd(i, sb, ve->guid);
1482 }
1483 if (cnt) printf("\n");
1484 }
1485
1486 static void examine_pds(struct ddf_super *sb)
1487 {
1488 int cnt = be16_to_cpu(sb->phys->max_pdes);
1489 int i;
1490 struct dl *dl;
1491 printf(" Physical Disks : %d\n", cnt);
1492 printf(" Number RefNo Size Device Type/State\n");
1493
1494 for (i=0 ; i<cnt ; i++) {
1495 struct phys_disk_entry *pd = &sb->phys->entries[i];
1496 int type = be16_to_cpu(pd->type);
1497 int state = be16_to_cpu(pd->state);
1498
1499 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1500 /* Not in use */
1501 continue;
1502 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1503 //printf("\n");
1504 printf(" %3d %08x ", i,
1505 be32_to_cpu(pd->refnum));
1506 printf("%8lluK ",
1507 be64_to_cpu(pd->config_size)>>1);
1508 for (dl = sb->dlist; dl ; dl = dl->next) {
1509 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1510 char *dv = map_dev(dl->major, dl->minor, 0);
1511 if (dv) {
1512 printf("%-15s", dv);
1513 break;
1514 }
1515 }
1516 }
1517 if (!dl)
1518 printf("%15s","");
1519 printf(" %s%s%s%s%s",
1520 (type&2) ? "active":"",
1521 (type&4) ? "Global-Spare":"",
1522 (type&8) ? "spare" : "",
1523 (type&16)? ", foreign" : "",
1524 (type&32)? "pass-through" : "");
1525 if (state & DDF_Failed)
1526 /* This over-rides these three */
1527 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1528 printf("/%s%s%s%s%s%s%s",
1529 (state&1)? "Online": "Offline",
1530 (state&2)? ", Failed": "",
1531 (state&4)? ", Rebuilding": "",
1532 (state&8)? ", in-transition": "",
1533 (state&16)? ", SMART-errors": "",
1534 (state&32)? ", Unrecovered-Read-Errors": "",
1535 (state&64)? ", Missing" : "");
1536 printf("\n");
1537 }
1538 }
1539
1540 static void examine_super_ddf(struct supertype *st, char *homehost)
1541 {
1542 struct ddf_super *sb = st->sb;
1543
1544 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1545 printf(" Version : %.8s\n", sb->anchor.revision);
1546 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1547 printf("\n");
1548 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1549 printf("\n");
1550 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1551 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1552 DDF_HEADER_MAGIC)
1553 ?"yes" : "no"));
1554 examine_vds(sb);
1555 examine_pds(sb);
1556 }
1557
1558 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1559 {
1560 /*
1561 * Figure out the VD number for this supertype.
1562 * Returns DDF_CONTAINER for the container itself,
1563 * and DDF_NOTFOUND on error.
1564 */
1565 struct ddf_super *ddf = st->sb;
1566 struct mdinfo *sra;
1567 char *sub, *end;
1568 unsigned int vcnum;
1569
1570 if (*st->container_devnm == '\0')
1571 return DDF_CONTAINER;
1572
1573 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1574 if (!sra || sra->array.major_version != -1 ||
1575 sra->array.minor_version != -2 ||
1576 !is_subarray(sra->text_version))
1577 return DDF_NOTFOUND;
1578
1579 sub = strchr(sra->text_version + 1, '/');
1580 if (sub != NULL)
1581 vcnum = strtoul(sub + 1, &end, 10);
1582 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1583 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1584 return DDF_NOTFOUND;
1585
1586 return vcnum;
1587 }
1588
1589 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1590 {
1591 /* We just write a generic DDF ARRAY entry
1592 */
1593 struct mdinfo info;
1594 char nbuf[64];
1595 getinfo_super_ddf(st, &info, NULL);
1596 fname_from_uuid(st, &info, nbuf, ':');
1597
1598 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1599 }
1600
1601 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1602 {
1603 /* We write a DDF ARRAY member entry for each vd, identifying container
1604 * by uuid and member by unit number and uuid.
1605 */
1606 struct ddf_super *ddf = st->sb;
1607 struct mdinfo info;
1608 unsigned int i;
1609 char nbuf[64];
1610 getinfo_super_ddf(st, &info, NULL);
1611 fname_from_uuid(st, &info, nbuf, ':');
1612
1613 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1614 struct virtual_entry *ve = &ddf->virt->entries[i];
1615 struct vcl vcl;
1616 char nbuf1[64];
1617 char namebuf[17];
1618 if (all_ff(ve->guid))
1619 continue;
1620 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1621 ddf->currentconf =&vcl;
1622 vcl.vcnum = i;
1623 uuid_from_super_ddf(st, info.uuid);
1624 fname_from_uuid(st, &info, nbuf1, ':');
1625 _ddf_array_name(namebuf, ddf, i);
1626 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1627 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1628 nbuf+5, i, nbuf1+5);
1629 }
1630 }
1631
1632 static void export_examine_super_ddf(struct supertype *st)
1633 {
1634 struct mdinfo info;
1635 char nbuf[64];
1636 getinfo_super_ddf(st, &info, NULL);
1637 fname_from_uuid(st, &info, nbuf, ':');
1638 printf("MD_METADATA=ddf\n");
1639 printf("MD_LEVEL=container\n");
1640 printf("MD_UUID=%s\n", nbuf+5);
1641 printf("MD_DEVICES=%u\n",
1642 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1643 }
1644
1645 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1646 {
1647 void *buf;
1648 unsigned long long dsize, offset;
1649 int bytes;
1650 struct ddf_header *ddf;
1651 int written = 0;
1652
1653 /* The meta consists of an anchor, a primary, and a secondary.
1654 * This all lives at the end of the device.
1655 * So it is easiest to find the earliest of primary and
1656 * secondary, and copy everything from there.
1657 *
1658 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1659 * we choose one of those
1660 */
1661
1662 if (posix_memalign(&buf, 4096, 4096) != 0)
1663 return 1;
1664
1665 if (!get_dev_size(from, NULL, &dsize))
1666 goto err;
1667
1668 if (lseek64(from, dsize-512, 0) < 0)
1669 goto err;
1670 if (read(from, buf, 512) != 512)
1671 goto err;
1672 ddf = buf;
1673 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1674 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1675 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1676 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1677 goto err;
1678
1679 offset = dsize - 512;
1680 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1681 offset = be64_to_cpu(ddf->primary_lba) << 9;
1682 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1683 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1684
1685 bytes = dsize - offset;
1686
1687 if (lseek64(from, offset, 0) < 0 ||
1688 lseek64(to, offset, 0) < 0)
1689 goto err;
1690 while (written < bytes) {
1691 int n = bytes - written;
1692 if (n > 4096)
1693 n = 4096;
1694 if (read(from, buf, n) != n)
1695 goto err;
1696 if (write(to, buf, n) != n)
1697 goto err;
1698 written += n;
1699 }
1700 free(buf);
1701 return 0;
1702 err:
1703 free(buf);
1704 return 1;
1705 }
1706
1707 static void detail_super_ddf(struct supertype *st, char *homehost)
1708 {
1709 /* FIXME later
1710 * Could print DDF GUID
1711 * Need to find which array
1712 * If whole, briefly list all arrays
1713 * If one, give name
1714 */
1715 }
1716
1717 static const char *vendors_with_variable_volume_UUID[] = {
1718 "LSI ",
1719 };
1720
1721 static int volume_id_is_reliable(const struct ddf_super *ddf)
1722 {
1723 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1724 int i;
1725 for (i = 0; i < n; i++)
1726 if (!memcmp(ddf->controller.guid,
1727 vendors_with_variable_volume_UUID[i], 8))
1728 return 0;
1729 return 1;
1730 }
1731
1732 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1733 unsigned int vcnum, int uuid[4])
1734 {
1735 char buf[DDF_GUID_LEN+18], sha[20], *p;
1736 struct sha1_ctx ctx;
1737 if (volume_id_is_reliable(ddf)) {
1738 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1739 return;
1740 }
1741 /*
1742 * Some fake RAID BIOSes (in particular, LSI ones) change the
1743 * VD GUID at every boot. These GUIDs are not suitable for
1744 * identifying an array. Luckily the header GUID appears to
1745 * remain constant.
1746 * We construct a pseudo-UUID from the header GUID and those
1747 * properties of the subarray that we expect to remain constant.
1748 */
1749 memset(buf, 0, sizeof(buf));
1750 p = buf;
1751 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1752 p += DDF_GUID_LEN;
1753 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1754 p += 16;
1755 *((__u16 *) p) = vcnum;
1756 sha1_init_ctx(&ctx);
1757 sha1_process_bytes(buf, sizeof(buf), &ctx);
1758 sha1_finish_ctx(&ctx, sha);
1759 memcpy(uuid, sha, 4*4);
1760 }
1761
1762 static void brief_detail_super_ddf(struct supertype *st)
1763 {
1764 struct mdinfo info;
1765 char nbuf[64];
1766 struct ddf_super *ddf = st->sb;
1767 unsigned int vcnum = get_vd_num_of_subarray(st);
1768 if (vcnum == DDF_CONTAINER)
1769 uuid_from_super_ddf(st, info.uuid);
1770 else if (vcnum == DDF_NOTFOUND)
1771 return;
1772 else
1773 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1774 fname_from_uuid(st, &info, nbuf,':');
1775 printf(" UUID=%s", nbuf + 5);
1776 }
1777 #endif
1778
1779 static int match_home_ddf(struct supertype *st, char *homehost)
1780 {
1781 /* It matches 'this' host if the controller is a
1782 * Linux-MD controller with vendor_data matching
1783 * the hostname. It would be nice if we could
1784 * test against controller found in /sys or somewhere...
1785 */
1786 struct ddf_super *ddf = st->sb;
1787 unsigned int len;
1788
1789 if (!homehost)
1790 return 0;
1791 len = strlen(homehost);
1792
1793 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1794 len < sizeof(ddf->controller.vendor_data) &&
1795 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1796 ddf->controller.vendor_data[len] == 0);
1797 }
1798
1799 #ifndef MDASSEMBLE
1800 static int find_index_in_bvd(const struct ddf_super *ddf,
1801 const struct vd_config *conf, unsigned int n,
1802 unsigned int *n_bvd)
1803 {
1804 /*
1805 * Find the index of the n-th valid physical disk in this BVD.
1806 * Unused entries can be sprinkled in with the used entries,
1807 * but don't count.
1808 */
1809 unsigned int i, j;
1810 for (i = 0, j = 0;
1811 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1812 i++) {
1813 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1814 if (n == j) {
1815 *n_bvd = i;
1816 return 1;
1817 }
1818 j++;
1819 }
1820 }
1821 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1822 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1823 return 0;
1824 }
1825
1826 /* Given a member array instance number, and a raid disk within that instance,
1827 * find the vd_config structure. The offset of the given disk in the phys_refnum
1828 * table is returned in n_bvd.
1829 * For two-level members with a secondary raid level the vd_config for
1830 * the appropriate BVD is returned.
1831 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1832 */
1833 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1834 unsigned int n,
1835 unsigned int *n_bvd, struct vcl **vcl)
1836 {
1837 struct vcl *v;
1838
1839 for (v = ddf->conflist; v; v = v->next) {
1840 unsigned int nsec, ibvd = 0;
1841 struct vd_config *conf;
1842 if (inst != v->vcnum)
1843 continue;
1844 conf = &v->conf;
1845 if (conf->sec_elmnt_count == 1) {
1846 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1847 *vcl = v;
1848 return conf;
1849 } else
1850 goto bad;
1851 }
1852 if (v->other_bvds == NULL) {
1853 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1854 __func__, conf->sec_elmnt_count);
1855 goto bad;
1856 }
1857 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1858 if (conf->sec_elmnt_seq != nsec) {
1859 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1860 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1861 == nsec)
1862 break;
1863 }
1864 if (ibvd == conf->sec_elmnt_count)
1865 goto bad;
1866 conf = v->other_bvds[ibvd-1];
1867 }
1868 if (!find_index_in_bvd(ddf, conf,
1869 n - nsec*conf->sec_elmnt_count, n_bvd))
1870 goto bad;
1871 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1872 , __func__, n, *n_bvd, ibvd, inst);
1873 *vcl = v;
1874 return conf;
1875 }
1876 bad:
1877 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1878 return NULL;
1879 }
1880 #endif
1881
1882 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1883 {
1884 /* Find the entry in phys_disk which has the given refnum
1885 * and return it's index
1886 */
1887 unsigned int i;
1888 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1889 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1890 return i;
1891 return -1;
1892 }
1893
1894 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1895 {
1896 char buf[20];
1897 struct sha1_ctx ctx;
1898 sha1_init_ctx(&ctx);
1899 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1900 sha1_finish_ctx(&ctx, buf);
1901 memcpy(uuid, buf, 4*4);
1902 }
1903
1904 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1905 {
1906 /* The uuid returned here is used for:
1907 * uuid to put into bitmap file (Create, Grow)
1908 * uuid for backup header when saving critical section (Grow)
1909 * comparing uuids when re-adding a device into an array
1910 * In these cases the uuid required is that of the data-array,
1911 * not the device-set.
1912 * uuid to recognise same set when adding a missing device back
1913 * to an array. This is a uuid for the device-set.
1914 *
1915 * For each of these we can make do with a truncated
1916 * or hashed uuid rather than the original, as long as
1917 * everyone agrees.
1918 * In the case of SVD we assume the BVD is of interest,
1919 * though that might be the case if a bitmap were made for
1920 * a mirrored SVD - worry about that later.
1921 * So we need to find the VD configuration record for the
1922 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1923 * The first 16 bytes of the sha1 of these is used.
1924 */
1925 struct ddf_super *ddf = st->sb;
1926 struct vcl *vcl = ddf->currentconf;
1927
1928 if (vcl)
1929 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1930 else
1931 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1932 }
1933
1934 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1935 {
1936 struct ddf_super *ddf = st->sb;
1937 int map_disks = info->array.raid_disks;
1938 __u32 *cptr;
1939
1940 if (ddf->currentconf) {
1941 getinfo_super_ddf_bvd(st, info, map);
1942 return;
1943 }
1944 memset(info, 0, sizeof(*info));
1945
1946 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1947 info->array.level = LEVEL_CONTAINER;
1948 info->array.layout = 0;
1949 info->array.md_minor = -1;
1950 cptr = (__u32 *)(ddf->anchor.guid + 16);
1951 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1952
1953 info->array.utime = 0;
1954 info->array.chunk_size = 0;
1955 info->container_enough = 1;
1956
1957 info->disk.major = 0;
1958 info->disk.minor = 0;
1959 if (ddf->dlist) {
1960 struct phys_disk_entry *pde = NULL;
1961 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1962 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1963
1964 info->data_offset = be64_to_cpu(ddf->phys->
1965 entries[info->disk.raid_disk].
1966 config_size);
1967 info->component_size = ddf->dlist->size - info->data_offset;
1968 if (info->disk.raid_disk >= 0)
1969 pde = ddf->phys->entries + info->disk.raid_disk;
1970 if (pde &&
1971 !(be16_to_cpu(pde->state) & DDF_Failed))
1972 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1973 else
1974 info->disk.state = 1 << MD_DISK_FAULTY;
1975
1976 info->events = be32_to_cpu(ddf->active->seq);
1977 } else {
1978 info->disk.number = -1;
1979 info->disk.raid_disk = -1;
1980 // info->disk.raid_disk = find refnum in the table and use index;
1981 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1982 }
1983
1984 info->recovery_start = MaxSector;
1985 info->reshape_active = 0;
1986 info->recovery_blocked = 0;
1987 info->name[0] = 0;
1988
1989 info->array.major_version = -1;
1990 info->array.minor_version = -2;
1991 strcpy(info->text_version, "ddf");
1992 info->safe_mode_delay = 0;
1993
1994 uuid_from_super_ddf(st, info->uuid);
1995
1996 if (map) {
1997 int i;
1998 for (i = 0 ; i < map_disks; i++) {
1999 if (i < info->array.raid_disks &&
2000 !(be16_to_cpu(ddf->phys->entries[i].state)
2001 & DDF_Failed))
2002 map[i] = 1;
2003 else
2004 map[i] = 0;
2005 }
2006 }
2007 }
2008
2009 /* size of name must be at least 17 bytes! */
2010 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2011 {
2012 int j;
2013 memcpy(name, ddf->virt->entries[i].name, 16);
2014 name[16] = 0;
2015 for(j = 0; j < 16; j++)
2016 if (name[j] == ' ')
2017 name[j] = 0;
2018 }
2019
2020 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2021 {
2022 struct ddf_super *ddf = st->sb;
2023 struct vcl *vc = ddf->currentconf;
2024 int cd = ddf->currentdev;
2025 int n_prim;
2026 int j;
2027 struct dl *dl;
2028 int map_disks = info->array.raid_disks;
2029 __u32 *cptr;
2030 struct vd_config *conf;
2031
2032 memset(info, 0, sizeof(*info));
2033 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2034 return;
2035 info->array.md_minor = -1;
2036 cptr = (__u32 *)(vc->conf.guid + 16);
2037 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2038 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2039 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2040 info->custom_array_size = 0;
2041
2042 conf = &vc->conf;
2043 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2044 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2045 int ibvd = cd / n_prim - 1;
2046 cd %= n_prim;
2047 conf = vc->other_bvds[ibvd];
2048 }
2049
2050 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2051 info->data_offset =
2052 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2053 if (vc->block_sizes)
2054 info->component_size = vc->block_sizes[cd];
2055 else
2056 info->component_size = be64_to_cpu(conf->blocks);
2057 }
2058
2059 for (dl = ddf->dlist; dl ; dl = dl->next)
2060 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2061 break;
2062
2063 info->disk.major = 0;
2064 info->disk.minor = 0;
2065 info->disk.state = 0;
2066 if (dl && dl->pdnum >= 0) {
2067 info->disk.major = dl->major;
2068 info->disk.minor = dl->minor;
2069 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2070 * be16_to_cpu(conf->prim_elmnt_count);
2071 info->disk.number = dl->pdnum;
2072 info->disk.state = 0;
2073 if (info->disk.number >= 0 &&
2074 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2075 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2076 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2077 info->events = be32_to_cpu(ddf->active->seq);
2078 }
2079
2080 info->container_member = ddf->currentconf->vcnum;
2081
2082 info->recovery_start = MaxSector;
2083 info->resync_start = 0;
2084 info->reshape_active = 0;
2085 info->recovery_blocked = 0;
2086 if (!(ddf->virt->entries[info->container_member].state
2087 & DDF_state_inconsistent) &&
2088 (ddf->virt->entries[info->container_member].init_state
2089 & DDF_initstate_mask)
2090 == DDF_init_full)
2091 info->resync_start = MaxSector;
2092
2093 uuid_from_super_ddf(st, info->uuid);
2094
2095 info->array.major_version = -1;
2096 info->array.minor_version = -2;
2097 sprintf(info->text_version, "/%s/%d",
2098 st->container_devnm,
2099 info->container_member);
2100 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2101
2102 _ddf_array_name(info->name, ddf, info->container_member);
2103
2104 if (map)
2105 for (j = 0; j < map_disks; j++) {
2106 map[j] = 0;
2107 if (j < info->array.raid_disks) {
2108 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2109 if (i >= 0 &&
2110 (be16_to_cpu(ddf->phys->entries[i].state)
2111 & DDF_Online) &&
2112 !(be16_to_cpu(ddf->phys->entries[i].state)
2113 & DDF_Failed))
2114 map[i] = 1;
2115 }
2116 }
2117 }
2118
2119 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2120 char *update,
2121 char *devname, int verbose,
2122 int uuid_set, char *homehost)
2123 {
2124 /* For 'assemble' and 'force' we need to return non-zero if any
2125 * change was made. For others, the return value is ignored.
2126 * Update options are:
2127 * force-one : This device looks a bit old but needs to be included,
2128 * update age info appropriately.
2129 * assemble: clear any 'faulty' flag to allow this device to
2130 * be assembled.
2131 * force-array: Array is degraded but being forced, mark it clean
2132 * if that will be needed to assemble it.
2133 *
2134 * newdev: not used ????
2135 * grow: Array has gained a new device - this is currently for
2136 * linear only
2137 * resync: mark as dirty so a resync will happen.
2138 * uuid: Change the uuid of the array to match what is given
2139 * homehost: update the recorded homehost
2140 * name: update the name - preserving the homehost
2141 * _reshape_progress: record new reshape_progress position.
2142 *
2143 * Following are not relevant for this version:
2144 * sparc2.2 : update from old dodgey metadata
2145 * super-minor: change the preferred_minor number
2146 * summaries: update redundant counters.
2147 */
2148 int rv = 0;
2149 // struct ddf_super *ddf = st->sb;
2150 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2151 // struct virtual_entry *ve = find_ve(ddf);
2152
2153 /* we don't need to handle "force-*" or "assemble" as
2154 * there is no need to 'trick' the kernel. When the metadata is
2155 * first updated to activate the array, all the implied modifications
2156 * will just happen.
2157 */
2158
2159 if (strcmp(update, "grow") == 0) {
2160 /* FIXME */
2161 } else if (strcmp(update, "resync") == 0) {
2162 // info->resync_checkpoint = 0;
2163 } else if (strcmp(update, "homehost") == 0) {
2164 /* homehost is stored in controller->vendor_data,
2165 * or it is when we are the vendor
2166 */
2167 // if (info->vendor_is_local)
2168 // strcpy(ddf->controller.vendor_data, homehost);
2169 rv = -1;
2170 } else if (strcmp(update, "name") == 0) {
2171 /* name is stored in virtual_entry->name */
2172 // memset(ve->name, ' ', 16);
2173 // strncpy(ve->name, info->name, 16);
2174 rv = -1;
2175 } else if (strcmp(update, "_reshape_progress") == 0) {
2176 /* We don't support reshape yet */
2177 } else if (strcmp(update, "assemble") == 0 ) {
2178 /* Do nothing, just succeed */
2179 rv = 0;
2180 } else
2181 rv = -1;
2182
2183 // update_all_csum(ddf);
2184
2185 return rv;
2186 }
2187
2188 static void make_header_guid(char *guid)
2189 {
2190 be32 stamp;
2191 /* Create a DDF Header of Virtual Disk GUID */
2192
2193 /* 24 bytes of fiction required.
2194 * first 8 are a 'vendor-id' - "Linux-MD"
2195 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2196 * Remaining 8 random number plus timestamp
2197 */
2198 memcpy(guid, T10, sizeof(T10));
2199 stamp = cpu_to_be32(0xdeadbeef);
2200 memcpy(guid+8, &stamp, 4);
2201 stamp = cpu_to_be32(0);
2202 memcpy(guid+12, &stamp, 4);
2203 stamp = cpu_to_be32(time(0) - DECADE);
2204 memcpy(guid+16, &stamp, 4);
2205 stamp._v32 = random32();
2206 memcpy(guid+20, &stamp, 4);
2207 }
2208
2209 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2210 {
2211 unsigned int i;
2212 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2213 if (all_ff(ddf->virt->entries[i].guid))
2214 return i;
2215 }
2216 return DDF_NOTFOUND;
2217 }
2218
2219 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2220 const char *name)
2221 {
2222 unsigned int i;
2223 if (name == NULL)
2224 return DDF_NOTFOUND;
2225 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2226 if (all_ff(ddf->virt->entries[i].guid))
2227 continue;
2228 if (!strncmp(name, ddf->virt->entries[i].name,
2229 sizeof(ddf->virt->entries[i].name)))
2230 return i;
2231 }
2232 return DDF_NOTFOUND;
2233 }
2234
2235 #ifndef MDASSEMBLE
2236 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2237 const char *guid)
2238 {
2239 unsigned int i;
2240 if (guid == NULL || all_ff(guid))
2241 return DDF_NOTFOUND;
2242 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2243 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2244 return i;
2245 return DDF_NOTFOUND;
2246 }
2247 #endif
2248
2249 static int init_super_ddf(struct supertype *st,
2250 mdu_array_info_t *info,
2251 unsigned long long size, char *name, char *homehost,
2252 int *uuid, unsigned long long data_offset)
2253 {
2254 /* This is primarily called by Create when creating a new array.
2255 * We will then get add_to_super called for each component, and then
2256 * write_init_super called to write it out to each device.
2257 * For DDF, Create can create on fresh devices or on a pre-existing
2258 * array.
2259 * To create on a pre-existing array a different method will be called.
2260 * This one is just for fresh drives.
2261 *
2262 * We need to create the entire 'ddf' structure which includes:
2263 * DDF headers - these are easy.
2264 * Controller data - a Sector describing this controller .. not that
2265 * this is a controller exactly.
2266 * Physical Disk Record - one entry per device, so
2267 * leave plenty of space.
2268 * Virtual Disk Records - again, just leave plenty of space.
2269 * This just lists VDs, doesn't give details.
2270 * Config records - describe the VDs that use this disk
2271 * DiskData - describes 'this' device.
2272 * BadBlockManagement - empty
2273 * Diag Space - empty
2274 * Vendor Logs - Could we put bitmaps here?
2275 *
2276 */
2277 struct ddf_super *ddf;
2278 char hostname[17];
2279 int hostlen;
2280 int max_phys_disks, max_virt_disks;
2281 unsigned long long sector;
2282 int clen;
2283 int i;
2284 int pdsize, vdsize;
2285 struct phys_disk *pd;
2286 struct virtual_disk *vd;
2287
2288 if (data_offset != INVALID_SECTORS) {
2289 pr_err("data-offset not supported by DDF\n");
2290 return 0;
2291 }
2292
2293 if (st->sb)
2294 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2295 data_offset);
2296
2297 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2298 pr_err("%s could not allocate superblock\n", __func__);
2299 return 0;
2300 }
2301 memset(ddf, 0, sizeof(*ddf));
2302 st->sb = ddf;
2303
2304 if (info == NULL) {
2305 /* zeroing superblock */
2306 return 0;
2307 }
2308
2309 /* At least 32MB *must* be reserved for the ddf. So let's just
2310 * start 32MB from the end, and put the primary header there.
2311 * Don't do secondary for now.
2312 * We don't know exactly where that will be yet as it could be
2313 * different on each device. So just set up the lengths.
2314 */
2315
2316 ddf->anchor.magic = DDF_HEADER_MAGIC;
2317 make_header_guid(ddf->anchor.guid);
2318
2319 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2320 ddf->anchor.seq = cpu_to_be32(1);
2321 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2322 ddf->anchor.openflag = 0xFF;
2323 ddf->anchor.foreignflag = 0;
2324 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2325 ddf->anchor.pad0 = 0xff;
2326 memset(ddf->anchor.pad1, 0xff, 12);
2327 memset(ddf->anchor.header_ext, 0xff, 32);
2328 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2329 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2330 ddf->anchor.type = DDF_HEADER_ANCHOR;
2331 memset(ddf->anchor.pad2, 0xff, 3);
2332 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2333 /* Put this at bottom of 32M reserved.. */
2334 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2335 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2336 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2337 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2338 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2339 ddf->max_part = 64;
2340 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2341 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2342 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2343 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2344 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2345 memset(ddf->anchor.pad3, 0xff, 54);
2346 /* Controller section is one sector long immediately
2347 * after the ddf header */
2348 sector = 1;
2349 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2350 ddf->anchor.controller_section_length = cpu_to_be32(1);
2351 sector += 1;
2352
2353 /* phys is 8 sectors after that */
2354 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2355 sizeof(struct phys_disk_entry)*max_phys_disks,
2356 512);
2357 switch(pdsize/512) {
2358 case 2: case 8: case 32: case 128: case 512: break;
2359 default: abort();
2360 }
2361 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2362 ddf->anchor.phys_section_length =
2363 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2364 sector += pdsize/512;
2365
2366 /* virt is another 32 sectors */
2367 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2368 sizeof(struct virtual_entry) * max_virt_disks,
2369 512);
2370 switch(vdsize/512) {
2371 case 2: case 8: case 32: case 128: case 512: break;
2372 default: abort();
2373 }
2374 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2375 ddf->anchor.virt_section_length =
2376 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2377 sector += vdsize/512;
2378
2379 clen = ddf->conf_rec_len * (ddf->max_part+1);
2380 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2381 ddf->anchor.config_section_length = cpu_to_be32(clen);
2382 sector += clen;
2383
2384 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2385 ddf->anchor.data_section_length = cpu_to_be32(1);
2386 sector += 1;
2387
2388 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2389 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2390 ddf->anchor.diag_space_length = cpu_to_be32(0);
2391 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2392 ddf->anchor.vendor_length = cpu_to_be32(0);
2393 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2394
2395 memset(ddf->anchor.pad4, 0xff, 256);
2396
2397 memcpy(&ddf->primary, &ddf->anchor, 512);
2398 memcpy(&ddf->secondary, &ddf->anchor, 512);
2399
2400 ddf->primary.openflag = 1; /* I guess.. */
2401 ddf->primary.type = DDF_HEADER_PRIMARY;
2402
2403 ddf->secondary.openflag = 1; /* I guess.. */
2404 ddf->secondary.type = DDF_HEADER_SECONDARY;
2405
2406 ddf->active = &ddf->primary;
2407
2408 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2409
2410 /* 24 more bytes of fiction required.
2411 * first 8 are a 'vendor-id' - "Linux-MD"
2412 * Remaining 16 are serial number.... maybe a hostname would do?
2413 */
2414 memcpy(ddf->controller.guid, T10, sizeof(T10));
2415 gethostname(hostname, sizeof(hostname));
2416 hostname[sizeof(hostname) - 1] = 0;
2417 hostlen = strlen(hostname);
2418 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2419 for (i = strlen(T10) ; i+hostlen < 24; i++)
2420 ddf->controller.guid[i] = ' ';
2421
2422 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2423 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2424 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2425 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2426 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2427 memset(ddf->controller.pad, 0xff, 8);
2428 memset(ddf->controller.vendor_data, 0xff, 448);
2429 if (homehost && strlen(homehost) < 440)
2430 strcpy((char*)ddf->controller.vendor_data, homehost);
2431
2432 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2433 pr_err("%s could not allocate pd\n", __func__);
2434 return 0;
2435 }
2436 ddf->phys = pd;
2437 ddf->pdsize = pdsize;
2438
2439 memset(pd, 0xff, pdsize);
2440 memset(pd, 0, sizeof(*pd));
2441 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2442 pd->used_pdes = cpu_to_be16(0);
2443 pd->max_pdes = cpu_to_be16(max_phys_disks);
2444 memset(pd->pad, 0xff, 52);
2445 for (i = 0; i < max_phys_disks; i++)
2446 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2447
2448 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2449 pr_err("%s could not allocate vd\n", __func__);
2450 return 0;
2451 }
2452 ddf->virt = vd;
2453 ddf->vdsize = vdsize;
2454 memset(vd, 0, vdsize);
2455 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2456 vd->populated_vdes = cpu_to_be16(0);
2457 vd->max_vdes = cpu_to_be16(max_virt_disks);
2458 memset(vd->pad, 0xff, 52);
2459
2460 for (i=0; i<max_virt_disks; i++)
2461 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2462
2463 st->sb = ddf;
2464 ddf_set_updates_pending(ddf);
2465 return 1;
2466 }
2467
2468 static int chunk_to_shift(int chunksize)
2469 {
2470 return ffs(chunksize/512)-1;
2471 }
2472
2473 #ifndef MDASSEMBLE
2474 struct extent {
2475 unsigned long long start, size;
2476 };
2477 static int cmp_extent(const void *av, const void *bv)
2478 {
2479 const struct extent *a = av;
2480 const struct extent *b = bv;
2481 if (a->start < b->start)
2482 return -1;
2483 if (a->start > b->start)
2484 return 1;
2485 return 0;
2486 }
2487
2488 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2489 {
2490 /* Find a list of used extents on the give physical device
2491 * (dnum) of the given ddf.
2492 * Return a malloced array of 'struct extent'
2493 */
2494 struct extent *rv;
2495 int n = 0;
2496 unsigned int i;
2497 __u16 state;
2498
2499 if (dl->pdnum < 0)
2500 return NULL;
2501 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2502
2503 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2504 return NULL;
2505
2506 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2507
2508 for (i = 0; i < ddf->max_part; i++) {
2509 const struct vd_config *bvd;
2510 unsigned int ibvd;
2511 struct vcl *v = dl->vlist[i];
2512 if (v == NULL ||
2513 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2514 &bvd, &ibvd) == DDF_NOTFOUND)
2515 continue;
2516 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2517 rv[n].size = be64_to_cpu(bvd->blocks);
2518 n++;
2519 }
2520 qsort(rv, n, sizeof(*rv), cmp_extent);
2521
2522 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2523 rv[n].size = 0;
2524 return rv;
2525 }
2526 #endif
2527
2528 static int init_super_ddf_bvd(struct supertype *st,
2529 mdu_array_info_t *info,
2530 unsigned long long size,
2531 char *name, char *homehost,
2532 int *uuid, unsigned long long data_offset)
2533 {
2534 /* We are creating a BVD inside a pre-existing container.
2535 * so st->sb is already set.
2536 * We need to create a new vd_config and a new virtual_entry
2537 */
2538 struct ddf_super *ddf = st->sb;
2539 unsigned int venum, i;
2540 struct virtual_entry *ve;
2541 struct vcl *vcl;
2542 struct vd_config *vc;
2543
2544 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2545 pr_err("This ddf already has an array called %s\n", name);
2546 return 0;
2547 }
2548 venum = find_unused_vde(ddf);
2549 if (venum == DDF_NOTFOUND) {
2550 pr_err("Cannot find spare slot for virtual disk\n");
2551 return 0;
2552 }
2553 ve = &ddf->virt->entries[venum];
2554
2555 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2556 * timestamp, random number
2557 */
2558 make_header_guid(ve->guid);
2559 ve->unit = cpu_to_be16(info->md_minor);
2560 ve->pad0 = 0xFFFF;
2561 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2562 DDF_GUID_LEN);
2563 ve->type = cpu_to_be16(0);
2564 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2565 if (info->state & 1) /* clean */
2566 ve->init_state = DDF_init_full;
2567 else
2568 ve->init_state = DDF_init_not;
2569
2570 memset(ve->pad1, 0xff, 14);
2571 memset(ve->name, ' ', 16);
2572 if (name)
2573 strncpy(ve->name, name, 16);
2574 ddf->virt->populated_vdes =
2575 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2576
2577 /* Now create a new vd_config */
2578 if (posix_memalign((void**)&vcl, 512,
2579 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2580 pr_err("%s could not allocate vd_config\n", __func__);
2581 return 0;
2582 }
2583 vcl->vcnum = venum;
2584 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2585 vc = &vcl->conf;
2586
2587 vc->magic = DDF_VD_CONF_MAGIC;
2588 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2589 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2590 vc->seqnum = cpu_to_be32(1);
2591 memset(vc->pad0, 0xff, 24);
2592 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2593 if (layout_md2ddf(info, vc) == -1 ||
2594 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2595 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2596 __func__, info->level, info->layout, info->raid_disks);
2597 free(vcl);
2598 return 0;
2599 }
2600 vc->sec_elmnt_seq = 0;
2601 if (alloc_other_bvds(ddf, vcl) != 0) {
2602 pr_err("%s could not allocate other bvds\n",
2603 __func__);
2604 free(vcl);
2605 return 0;
2606 }
2607 vc->blocks = cpu_to_be64(info->size * 2);
2608 vc->array_blocks = cpu_to_be64(
2609 calc_array_size(info->level, info->raid_disks, info->layout,
2610 info->chunk_size, info->size*2));
2611 memset(vc->pad1, 0xff, 8);
2612 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2613 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2614 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2615 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2616 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2617 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2618 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2619 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2620 memset(vc->cache_pol, 0, 8);
2621 vc->bg_rate = 0x80;
2622 memset(vc->pad2, 0xff, 3);
2623 memset(vc->pad3, 0xff, 52);
2624 memset(vc->pad4, 0xff, 192);
2625 memset(vc->v0, 0xff, 32);
2626 memset(vc->v1, 0xff, 32);
2627 memset(vc->v2, 0xff, 16);
2628 memset(vc->v3, 0xff, 16);
2629 memset(vc->vendor, 0xff, 32);
2630
2631 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2632 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2633
2634 for (i = 1; i < vc->sec_elmnt_count; i++) {
2635 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2636 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2637 }
2638
2639 vcl->next = ddf->conflist;
2640 ddf->conflist = vcl;
2641 ddf->currentconf = vcl;
2642 ddf_set_updates_pending(ddf);
2643 return 1;
2644 }
2645
2646 #ifndef MDASSEMBLE
2647 static void add_to_super_ddf_bvd(struct supertype *st,
2648 mdu_disk_info_t *dk, int fd, char *devname)
2649 {
2650 /* fd and devname identify a device within the ddf container (st).
2651 * dk identifies a location in the new BVD.
2652 * We need to find suitable free space in that device and update
2653 * the phys_refnum and lba_offset for the newly created vd_config.
2654 * We might also want to update the type in the phys_disk
2655 * section.
2656 *
2657 * Alternately: fd == -1 and we have already chosen which device to
2658 * use and recorded in dlist->raid_disk;
2659 */
2660 struct dl *dl;
2661 struct ddf_super *ddf = st->sb;
2662 struct vd_config *vc;
2663 unsigned int i;
2664 unsigned long long blocks, pos, esize;
2665 struct extent *ex;
2666 unsigned int raid_disk = dk->raid_disk;
2667
2668 if (fd == -1) {
2669 for (dl = ddf->dlist; dl ; dl = dl->next)
2670 if (dl->raiddisk == dk->raid_disk)
2671 break;
2672 } else {
2673 for (dl = ddf->dlist; dl ; dl = dl->next)
2674 if (dl->major == dk->major &&
2675 dl->minor == dk->minor)
2676 break;
2677 }
2678 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2679 return;
2680
2681 vc = &ddf->currentconf->conf;
2682 if (vc->sec_elmnt_count > 1) {
2683 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2684 if (raid_disk >= n)
2685 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2686 raid_disk %= n;
2687 }
2688
2689 ex = get_extents(ddf, dl);
2690 if (!ex)
2691 return;
2692
2693 i = 0; pos = 0;
2694 blocks = be64_to_cpu(vc->blocks);
2695 if (ddf->currentconf->block_sizes)
2696 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2697
2698 /* First-fit */
2699 do {
2700 esize = ex[i].start - pos;
2701 if (esize >= blocks)
2702 break;
2703 pos = ex[i].start + ex[i].size;
2704 i++;
2705 } while (ex[i-1].size);
2706
2707 free(ex);
2708 if (esize < blocks)
2709 return;
2710
2711 ddf->currentdev = dk->raid_disk;
2712 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2713 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2714
2715 for (i = 0; i < ddf->max_part ; i++)
2716 if (dl->vlist[i] == NULL)
2717 break;
2718 if (i == ddf->max_part)
2719 return;
2720 dl->vlist[i] = ddf->currentconf;
2721
2722 if (fd >= 0)
2723 dl->fd = fd;
2724 if (devname)
2725 dl->devname = devname;
2726
2727 /* Check if we can mark array as optimal yet */
2728 i = ddf->currentconf->vcnum;
2729 ddf->virt->entries[i].state =
2730 (ddf->virt->entries[i].state & ~DDF_state_mask)
2731 | get_svd_state(ddf, ddf->currentconf);
2732 be16_clear(ddf->phys->entries[dl->pdnum].type,
2733 cpu_to_be16(DDF_Global_Spare));
2734 be16_set(ddf->phys->entries[dl->pdnum].type,
2735 cpu_to_be16(DDF_Active_in_VD));
2736 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2737 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2738 ddf->currentconf->vcnum, guid_str(vc->guid),
2739 dk->raid_disk);
2740 ddf_set_updates_pending(ddf);
2741 }
2742
2743 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2744 {
2745 unsigned int i;
2746 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2747 if (all_ff(ddf->phys->entries[i].guid))
2748 return i;
2749 }
2750 return DDF_NOTFOUND;
2751 }
2752
2753 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2754 {
2755 __u64 cfs, t;
2756 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2757 t = be64_to_cpu(dl->secondary_lba);
2758 if (t != ~(__u64)0)
2759 cfs = min(cfs, t);
2760 /*
2761 * Some vendor DDF structures interpret workspace_lba
2762 * very differently than we do: Make a sanity check on the value.
2763 */
2764 t = be64_to_cpu(dl->workspace_lba);
2765 if (t < cfs) {
2766 __u64 wsp = cfs - t;
2767 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2768 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2769 __func__, dl->major, dl->minor, wsp);
2770 } else
2771 cfs = t;
2772 }
2773 pde->config_size = cpu_to_be64(cfs);
2774 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2775 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2776 }
2777
2778 /* Add a device to a container, either while creating it or while
2779 * expanding a pre-existing container
2780 */
2781 static int add_to_super_ddf(struct supertype *st,
2782 mdu_disk_info_t *dk, int fd, char *devname,
2783 unsigned long long data_offset)
2784 {
2785 struct ddf_super *ddf = st->sb;
2786 struct dl *dd;
2787 time_t now;
2788 struct tm *tm;
2789 unsigned long long size;
2790 struct phys_disk_entry *pde;
2791 unsigned int n, i;
2792 struct stat stb;
2793 __u32 *tptr;
2794
2795 if (ddf->currentconf) {
2796 add_to_super_ddf_bvd(st, dk, fd, devname);
2797 return 0;
2798 }
2799
2800 /* This is device numbered dk->number. We need to create
2801 * a phys_disk entry and a more detailed disk_data entry.
2802 */
2803 fstat(fd, &stb);
2804 n = find_unused_pde(ddf);
2805 if (n == DDF_NOTFOUND) {
2806 pr_err("%s: No free slot in array, cannot add disk\n",
2807 __func__);
2808 return 1;
2809 }
2810 pde = &ddf->phys->entries[n];
2811 get_dev_size(fd, NULL, &size);
2812 if (size <= 32*1024*1024) {
2813 pr_err("%s: device size must be at least 32MB\n",
2814 __func__);
2815 return 1;
2816 }
2817 size >>= 9;
2818
2819 if (posix_memalign((void**)&dd, 512,
2820 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2821 pr_err("%s could allocate buffer for new disk, aborting\n",
2822 __func__);
2823 return 1;
2824 }
2825 dd->major = major(stb.st_rdev);
2826 dd->minor = minor(stb.st_rdev);
2827 dd->devname = devname;
2828 dd->fd = fd;
2829 dd->spare = NULL;
2830
2831 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2832 now = time(0);
2833 tm = localtime(&now);
2834 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2835 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2836 tptr = (__u32 *)(dd->disk.guid + 16);
2837 *tptr++ = random32();
2838 *tptr = random32();
2839
2840 do {
2841 /* Cannot be bothered finding a CRC of some irrelevant details*/
2842 dd->disk.refnum._v32 = random32();
2843 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2844 i > 0; i--)
2845 if (be32_eq(ddf->phys->entries[i-1].refnum,
2846 dd->disk.refnum))
2847 break;
2848 } while (i > 0);
2849
2850 dd->disk.forced_ref = 1;
2851 dd->disk.forced_guid = 1;
2852 memset(dd->disk.vendor, ' ', 32);
2853 memcpy(dd->disk.vendor, "Linux", 5);
2854 memset(dd->disk.pad, 0xff, 442);
2855 for (i = 0; i < ddf->max_part ; i++)
2856 dd->vlist[i] = NULL;
2857
2858 dd->pdnum = n;
2859
2860 if (st->update_tail) {
2861 int len = (sizeof(struct phys_disk) +
2862 sizeof(struct phys_disk_entry));
2863 struct phys_disk *pd;
2864
2865 pd = xmalloc(len);
2866 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2867 pd->used_pdes = cpu_to_be16(n);
2868 pde = &pd->entries[0];
2869 dd->mdupdate = pd;
2870 } else
2871 ddf->phys->used_pdes = cpu_to_be16(
2872 1 + be16_to_cpu(ddf->phys->used_pdes));
2873
2874 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2875 pde->refnum = dd->disk.refnum;
2876 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2877 pde->state = cpu_to_be16(DDF_Online);
2878 dd->size = size;
2879 /*
2880 * If there is already a device in dlist, try to reserve the same
2881 * amount of workspace. Otherwise, use 32MB.
2882 * We checked disk size above already.
2883 */
2884 #define __calc_lba(new, old, lba, mb) do { \
2885 unsigned long long dif; \
2886 if ((old) != NULL) \
2887 dif = (old)->size - be64_to_cpu((old)->lba); \
2888 else \
2889 dif = (new)->size; \
2890 if ((new)->size > dif) \
2891 (new)->lba = cpu_to_be64((new)->size - dif); \
2892 else \
2893 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2894 } while (0)
2895 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2896 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2897 if (ddf->dlist == NULL ||
2898 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2899 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2900 _set_config_size(pde, dd);
2901
2902 sprintf(pde->path, "%17.17s","Information: nil") ;
2903 memset(pde->pad, 0xff, 6);
2904
2905 if (st->update_tail) {
2906 dd->next = ddf->add_list;
2907 ddf->add_list = dd;
2908 } else {
2909 dd->next = ddf->dlist;
2910 ddf->dlist = dd;
2911 ddf_set_updates_pending(ddf);
2912 }
2913
2914 return 0;
2915 }
2916
2917 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2918 {
2919 struct ddf_super *ddf = st->sb;
2920 struct dl *dl;
2921
2922 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2923 * disappeared from the container.
2924 * We need to arrange that it disappears from the metadata and
2925 * internal data structures too.
2926 * Most of the work is done by ddf_process_update which edits
2927 * the metadata and closes the file handle and attaches the memory
2928 * where free_updates will free it.
2929 */
2930 for (dl = ddf->dlist; dl ; dl = dl->next)
2931 if (dl->major == dk->major &&
2932 dl->minor == dk->minor)
2933 break;
2934 if (!dl || dl->pdnum < 0)
2935 return -1;
2936
2937 if (st->update_tail) {
2938 int len = (sizeof(struct phys_disk) +
2939 sizeof(struct phys_disk_entry));
2940 struct phys_disk *pd;
2941
2942 pd = xmalloc(len);
2943 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2944 pd->used_pdes = cpu_to_be16(dl->pdnum);
2945 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2946 append_metadata_update(st, pd, len);
2947 }
2948 return 0;
2949 }
2950 #endif
2951
2952 /*
2953 * This is the write_init_super method for a ddf container. It is
2954 * called when creating a container or adding another device to a
2955 * container.
2956 */
2957
2958 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2959 {
2960 unsigned long long sector;
2961 struct ddf_header *header;
2962 int fd, i, n_config, conf_size, buf_size;
2963 int ret = 0;
2964 char *conf;
2965
2966 fd = d->fd;
2967
2968 switch (type) {
2969 case DDF_HEADER_PRIMARY:
2970 header = &ddf->primary;
2971 sector = be64_to_cpu(header->primary_lba);
2972 break;
2973 case DDF_HEADER_SECONDARY:
2974 header = &ddf->secondary;
2975 sector = be64_to_cpu(header->secondary_lba);
2976 break;
2977 default:
2978 return 0;
2979 }
2980 if (sector == ~(__u64)0)
2981 return 0;
2982
2983 header->type = type;
2984 header->openflag = 1;
2985 header->crc = calc_crc(header, 512);
2986
2987 lseek64(fd, sector<<9, 0);
2988 if (write(fd, header, 512) < 0)
2989 goto out;
2990
2991 ddf->controller.crc = calc_crc(&ddf->controller, 512);
2992 if (write(fd, &ddf->controller, 512) < 0)
2993 goto out;
2994
2995 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
2996 if (write(fd, ddf->phys, ddf->pdsize) < 0)
2997 goto out;
2998 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
2999 if (write(fd, ddf->virt, ddf->vdsize) < 0)
3000 goto out;
3001
3002 /* Now write lots of config records. */
3003 n_config = ddf->max_part;
3004 conf_size = ddf->conf_rec_len * 512;
3005 conf = ddf->conf;
3006 buf_size = conf_size * (n_config + 1);
3007 if (!conf) {
3008 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
3009 goto out;
3010 ddf->conf = conf;
3011 }
3012 for (i = 0 ; i <= n_config ; i++) {
3013 struct vcl *c;
3014 struct vd_config *vdc = NULL;
3015 if (i == n_config) {
3016 c = (struct vcl *)d->spare;
3017 if (c)
3018 vdc = &c->conf;
3019 } else {
3020 unsigned int dummy;
3021 c = d->vlist[i];
3022 if (c)
3023 get_pd_index_from_refnum(
3024 c, d->disk.refnum,
3025 ddf->mppe,
3026 (const struct vd_config **)&vdc,
3027 &dummy);
3028 }
3029 if (vdc) {
3030 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3031 i, be32_to_cpu(d->disk.refnum),
3032 guid_str(vdc->guid),
3033 vdc->sec_elmnt_seq);
3034 vdc->seqnum = header->seq;
3035 vdc->crc = calc_crc(vdc, conf_size);
3036 memcpy(conf + i*conf_size, vdc, conf_size);
3037 } else
3038 memset(conf + i*conf_size, 0xff, conf_size);
3039 }
3040 if (write(fd, conf, buf_size) != buf_size)
3041 goto out;
3042
3043 d->disk.crc = calc_crc(&d->disk, 512);
3044 if (write(fd, &d->disk, 512) < 0)
3045 goto out;
3046
3047 ret = 1;
3048 out:
3049 header->openflag = 0;
3050 header->crc = calc_crc(header, 512);
3051
3052 lseek64(fd, sector<<9, 0);
3053 if (write(fd, header, 512) < 0)
3054 ret = 0;
3055
3056 return ret;
3057 }
3058
3059 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3060 {
3061 unsigned long long size;
3062 int fd = d->fd;
3063 if (fd < 0)
3064 return 0;
3065
3066 /* We need to fill in the primary, (secondary) and workspace
3067 * lba's in the headers, set their checksums,
3068 * Also checksum phys, virt....
3069 *
3070 * Then write everything out, finally the anchor is written.
3071 */
3072 get_dev_size(fd, NULL, &size);
3073 size /= 512;
3074 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3075 ddf->anchor.workspace_lba = d->workspace_lba;
3076 else
3077 ddf->anchor.workspace_lba =
3078 cpu_to_be64(size - 32*1024*2);
3079 if (be64_to_cpu(d->primary_lba) != 0ULL)
3080 ddf->anchor.primary_lba = d->primary_lba;
3081 else
3082 ddf->anchor.primary_lba =
3083 cpu_to_be64(size - 16*1024*2);
3084 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3085 ddf->anchor.secondary_lba = d->secondary_lba;
3086 else
3087 ddf->anchor.secondary_lba =
3088 cpu_to_be64(size - 32*1024*2);
3089 ddf->anchor.seq = ddf->active->seq;
3090 memcpy(&ddf->primary, &ddf->anchor, 512);
3091 memcpy(&ddf->secondary, &ddf->anchor, 512);
3092
3093 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3094 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3095 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3096
3097 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3098 return 0;
3099
3100 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3101 return 0;
3102
3103 lseek64(fd, (size-1)*512, SEEK_SET);
3104 if (write(fd, &ddf->anchor, 512) < 0)
3105 return 0;
3106
3107 return 1;
3108 }
3109
3110 #ifndef MDASSEMBLE
3111 static int __write_init_super_ddf(struct supertype *st)
3112 {
3113 struct ddf_super *ddf = st->sb;
3114 struct dl *d;
3115 int attempts = 0;
3116 int successes = 0;
3117
3118 pr_state(ddf, __func__);
3119
3120 /* try to write updated metadata,
3121 * if we catch a failure move on to the next disk
3122 */
3123 for (d = ddf->dlist; d; d=d->next) {
3124 attempts++;
3125 successes += _write_super_to_disk(ddf, d);
3126 }
3127
3128 return attempts != successes;
3129 }
3130
3131 static int write_init_super_ddf(struct supertype *st)
3132 {
3133 struct ddf_super *ddf = st->sb;
3134 struct vcl *currentconf = ddf->currentconf;
3135
3136 /* We are done with currentconf - reset it so st refers to the container */
3137 ddf->currentconf = NULL;
3138
3139 if (st->update_tail) {
3140 /* queue the virtual_disk and vd_config as metadata updates */
3141 struct virtual_disk *vd;
3142 struct vd_config *vc;
3143 int len, tlen;
3144 unsigned int i;
3145
3146 if (!currentconf) {
3147 /* Must be adding a physical disk to the container */
3148 int len = (sizeof(struct phys_disk) +
3149 sizeof(struct phys_disk_entry));
3150
3151 /* adding a disk to the container. */
3152 if (!ddf->add_list)
3153 return 0;
3154
3155 append_metadata_update(st, ddf->add_list->mdupdate, len);
3156 ddf->add_list->mdupdate = NULL;
3157 return 0;
3158 }
3159
3160 /* Newly created VD */
3161
3162 /* First the virtual disk. We have a slightly fake header */
3163 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3164 vd = xmalloc(len);
3165 *vd = *ddf->virt;
3166 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3167 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3168 append_metadata_update(st, vd, len);
3169
3170 /* Then the vd_config */
3171 len = ddf->conf_rec_len * 512;
3172 tlen = len * currentconf->conf.sec_elmnt_count;
3173 vc = xmalloc(tlen);
3174 memcpy(vc, &currentconf->conf, len);
3175 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3176 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3177 len);
3178 append_metadata_update(st, vc, tlen);
3179
3180 /* FIXME I need to close the fds! */
3181 return 0;
3182 } else {
3183 struct dl *d;
3184 if (!currentconf)
3185 for (d = ddf->dlist; d; d=d->next)
3186 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3187 return __write_init_super_ddf(st);
3188 }
3189 }
3190
3191 #endif
3192
3193 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3194 unsigned long long data_offset)
3195 {
3196 /* We must reserve the last 32Meg */
3197 if (devsize <= 32*1024*2)
3198 return 0;
3199 return devsize - 32*1024*2;
3200 }
3201
3202 #ifndef MDASSEMBLE
3203
3204 static int reserve_space(struct supertype *st, int raiddisks,
3205 unsigned long long size, int chunk,
3206 unsigned long long *freesize)
3207 {
3208 /* Find 'raiddisks' spare extents at least 'size' big (but
3209 * only caring about multiples of 'chunk') and remember
3210 * them. If size==0, find the largest size possible.
3211 * Report available size in *freesize
3212 * If space cannot be found, fail.
3213 */
3214 struct dl *dl;
3215 struct ddf_super *ddf = st->sb;
3216 int cnt = 0;
3217
3218 for (dl = ddf->dlist; dl ; dl=dl->next) {
3219 dl->raiddisk = -1;
3220 dl->esize = 0;
3221 }
3222 /* Now find largest extent on each device */
3223 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3224 struct extent *e = get_extents(ddf, dl);
3225 unsigned long long pos = 0;
3226 int i = 0;
3227 int found = 0;
3228 unsigned long long minsize = size;
3229
3230 if (size == 0)
3231 minsize = chunk;
3232
3233 if (!e)
3234 continue;
3235 do {
3236 unsigned long long esize;
3237 esize = e[i].start - pos;
3238 if (esize >= minsize) {
3239 found = 1;
3240 minsize = esize;
3241 }
3242 pos = e[i].start + e[i].size;
3243 i++;
3244 } while (e[i-1].size);
3245 if (found) {
3246 cnt++;
3247 dl->esize = minsize;
3248 }
3249 free(e);
3250 }
3251 if (cnt < raiddisks) {
3252 pr_err("not enough devices with space to create array.\n");
3253 return 0; /* No enough free spaces large enough */
3254 }
3255 if (size == 0) {
3256 /* choose the largest size of which there are at least 'raiddisk' */
3257 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3258 struct dl *dl2;
3259 if (dl->esize <= size)
3260 continue;
3261 /* This is bigger than 'size', see if there are enough */
3262 cnt = 0;
3263 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3264 if (dl2->esize >= dl->esize)
3265 cnt++;
3266 if (cnt >= raiddisks)
3267 size = dl->esize;
3268 }
3269 if (chunk) {
3270 size = size / chunk;
3271 size *= chunk;
3272 }
3273 *freesize = size;
3274 if (size < 32) {
3275 pr_err("not enough spare devices to create array.\n");
3276 return 0;
3277 }
3278 }
3279 /* We have a 'size' of which there are enough spaces.
3280 * We simply do a first-fit */
3281 cnt = 0;
3282 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3283 if (dl->esize < size)
3284 continue;
3285
3286 dl->raiddisk = cnt;
3287 cnt++;
3288 }
3289 return 1;
3290 }
3291
3292 static int validate_geometry_ddf(struct supertype *st,
3293 int level, int layout, int raiddisks,
3294 int *chunk, unsigned long long size,
3295 unsigned long long data_offset,
3296 char *dev, unsigned long long *freesize,
3297 int verbose)
3298 {
3299 int fd;
3300 struct mdinfo *sra;
3301 int cfd;
3302
3303 /* ddf potentially supports lots of things, but it depends on
3304 * what devices are offered (and maybe kernel version?)
3305 * If given unused devices, we will make a container.
3306 * If given devices in a container, we will make a BVD.
3307 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3308 */
3309
3310 if (*chunk == UnSet)
3311 *chunk = DEFAULT_CHUNK;
3312
3313 if (level == LEVEL_NONE)
3314 level = LEVEL_CONTAINER;
3315 if (level == LEVEL_CONTAINER) {
3316 /* Must be a fresh device to add to a container */
3317 return validate_geometry_ddf_container(st, level, layout,
3318 raiddisks, *chunk,
3319 size, data_offset, dev,
3320 freesize,
3321 verbose);
3322 }
3323
3324 if (!dev) {
3325 mdu_array_info_t array = {
3326 .level = level,
3327 .layout = layout,
3328 .raid_disks = raiddisks
3329 };
3330 struct vd_config conf;
3331 if (layout_md2ddf(&array, &conf) == -1) {
3332 if (verbose)
3333 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3334 level, layout, raiddisks);
3335 return 0;
3336 }
3337 /* Should check layout? etc */
3338
3339 if (st->sb && freesize) {
3340 /* --create was given a container to create in.
3341 * So we need to check that there are enough
3342 * free spaces and return the amount of space.
3343 * We may as well remember which drives were
3344 * chosen so that add_to_super/getinfo_super
3345 * can return them.
3346 */
3347 return reserve_space(st, raiddisks, size, *chunk, freesize);
3348 }
3349 return 1;
3350 }
3351
3352 if (st->sb) {
3353 /* A container has already been opened, so we are
3354 * creating in there. Maybe a BVD, maybe an SVD.
3355 * Should make a distinction one day.
3356 */
3357 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3358 chunk, size, data_offset, dev,
3359 freesize,
3360 verbose);
3361 }
3362 /* This is the first device for the array.
3363 * If it is a container, we read it in and do automagic allocations,
3364 * no other devices should be given.
3365 * Otherwise it must be a member device of a container, and we
3366 * do manual allocation.
3367 * Later we should check for a BVD and make an SVD.
3368 */
3369 fd = open(dev, O_RDONLY|O_EXCL, 0);
3370 if (fd >= 0) {
3371 sra = sysfs_read(fd, NULL, GET_VERSION);
3372 close(fd);
3373 if (sra && sra->array.major_version == -1 &&
3374 strcmp(sra->text_version, "ddf") == 0) {
3375 /* load super */
3376 /* find space for 'n' devices. */
3377 /* remember the devices */
3378 /* Somehow return the fact that we have enough */
3379 }
3380
3381 if (verbose)
3382 pr_err("ddf: Cannot create this array "
3383 "on device %s - a container is required.\n",
3384 dev);
3385 return 0;
3386 }
3387 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3388 if (verbose)
3389 pr_err("ddf: Cannot open %s: %s\n",
3390 dev, strerror(errno));
3391 return 0;
3392 }
3393 /* Well, it is in use by someone, maybe a 'ddf' container. */
3394 cfd = open_container(fd);
3395 if (cfd < 0) {
3396 close(fd);
3397 if (verbose)
3398 pr_err("ddf: Cannot use %s: %s\n",
3399 dev, strerror(EBUSY));
3400 return 0;
3401 }
3402 sra = sysfs_read(cfd, NULL, GET_VERSION);
3403 close(fd);
3404 if (sra && sra->array.major_version == -1 &&
3405 strcmp(sra->text_version, "ddf") == 0) {
3406 /* This is a member of a ddf container. Load the container
3407 * and try to create a bvd
3408 */
3409 struct ddf_super *ddf;
3410 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3411 st->sb = ddf;
3412 strcpy(st->container_devnm, fd2devnm(cfd));
3413 close(cfd);
3414 return validate_geometry_ddf_bvd(st, level, layout,
3415 raiddisks, chunk, size,
3416 data_offset,
3417 dev, freesize,
3418 verbose);
3419 }
3420 close(cfd);
3421 } else /* device may belong to a different container */
3422 return 0;
3423
3424 return 1;
3425 }
3426
3427 static int
3428 validate_geometry_ddf_container(struct supertype *st,
3429 int level, int layout, int raiddisks,
3430 int chunk, unsigned long long size,
3431 unsigned long long data_offset,
3432 char *dev, unsigned long long *freesize,
3433 int verbose)
3434 {
3435 int fd;
3436 unsigned long long ldsize;
3437
3438 if (level != LEVEL_CONTAINER)
3439 return 0;
3440 if (!dev)
3441 return 1;
3442
3443 fd = open(dev, O_RDONLY|O_EXCL, 0);
3444 if (fd < 0) {
3445 if (verbose)
3446 pr_err("ddf: Cannot open %s: %s\n",
3447 dev, strerror(errno));
3448 return 0;
3449 }
3450 if (!get_dev_size(fd, dev, &ldsize)) {
3451 close(fd);
3452 return 0;
3453 }
3454 close(fd);
3455
3456 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3457 if (*freesize == 0)
3458 return 0;
3459
3460 return 1;
3461 }
3462
3463 static int validate_geometry_ddf_bvd(struct supertype *st,
3464 int level, int layout, int raiddisks,
3465 int *chunk, unsigned long long size,
3466 unsigned long long data_offset,
3467 char *dev, unsigned long long *freesize,
3468 int verbose)
3469 {
3470 struct stat stb;
3471 struct ddf_super *ddf = st->sb;
3472 struct dl *dl;
3473 unsigned long long pos = 0;
3474 unsigned long long maxsize;
3475 struct extent *e;
3476 int i;
3477 /* ddf/bvd supports lots of things, but not containers */
3478 if (level == LEVEL_CONTAINER) {
3479 if (verbose)
3480 pr_err("DDF cannot create a container within an container\n");
3481 return 0;
3482 }
3483 /* We must have the container info already read in. */
3484 if (!ddf)
3485 return 0;
3486
3487 if (!dev) {
3488 /* General test: make sure there is space for
3489 * 'raiddisks' device extents of size 'size'.
3490 */
3491 unsigned long long minsize = size;
3492 int dcnt = 0;
3493 if (minsize == 0)
3494 minsize = 8;
3495 for (dl = ddf->dlist; dl ; dl = dl->next) {
3496 int found = 0;
3497 pos = 0;
3498
3499 i = 0;
3500 e = get_extents(ddf, dl);
3501 if (!e) continue;
3502 do {
3503 unsigned long long esize;
3504 esize = e[i].start - pos;
3505 if (esize >= minsize)
3506 found = 1;
3507 pos = e[i].start + e[i].size;
3508 i++;
3509 } while (e[i-1].size);
3510 if (found)
3511 dcnt++;
3512 free(e);
3513 }
3514 if (dcnt < raiddisks) {
3515 if (verbose)
3516 pr_err("ddf: Not enough devices with "
3517 "space for this array (%d < %d)\n",
3518 dcnt, raiddisks);
3519 return 0;
3520 }
3521 return 1;
3522 }
3523 /* This device must be a member of the set */
3524 if (stat(dev, &stb) < 0)
3525 return 0;
3526 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3527 return 0;
3528 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3529 if (dl->major == (int)major(stb.st_rdev) &&
3530 dl->minor == (int)minor(stb.st_rdev))
3531 break;
3532 }
3533 if (!dl) {
3534 if (verbose)
3535 pr_err("ddf: %s is not in the "
3536 "same DDF set\n",
3537 dev);
3538 return 0;
3539 }
3540 e = get_extents(ddf, dl);
3541 maxsize = 0;
3542 i = 0;
3543 if (e)
3544 do {
3545 unsigned long long esize;
3546 esize = e[i].start - pos;
3547 if (esize >= maxsize)
3548 maxsize = esize;
3549 pos = e[i].start + e[i].size;
3550 i++;
3551 } while (e[i-1].size);
3552 *freesize = maxsize;
3553 // FIXME here I am
3554
3555 return 1;
3556 }
3557
3558 static int load_super_ddf_all(struct supertype *st, int fd,
3559 void **sbp, char *devname)
3560 {
3561 struct mdinfo *sra;
3562 struct ddf_super *super;
3563 struct mdinfo *sd, *best = NULL;
3564 int bestseq = 0;
3565 int seq;
3566 char nm[20];
3567 int dfd;
3568
3569 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3570 if (!sra)
3571 return 1;
3572 if (sra->array.major_version != -1 ||
3573 sra->array.minor_version != -2 ||
3574 strcmp(sra->text_version, "ddf") != 0)
3575 return 1;
3576
3577 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3578 return 1;
3579 memset(super, 0, sizeof(*super));
3580
3581 /* first, try each device, and choose the best ddf */
3582 for (sd = sra->devs ; sd ; sd = sd->next) {
3583 int rv;
3584 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3585 dfd = dev_open(nm, O_RDONLY);
3586 if (dfd < 0)
3587 return 2;
3588 rv = load_ddf_headers(dfd, super, NULL);
3589 close(dfd);
3590 if (rv == 0) {
3591 seq = be32_to_cpu(super->active->seq);
3592 if (super->active->openflag)
3593 seq--;
3594 if (!best || seq > bestseq) {
3595 bestseq = seq;
3596 best = sd;
3597 }
3598 }
3599 }
3600 if (!best)
3601 return 1;
3602 /* OK, load this ddf */
3603 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3604 dfd = dev_open(nm, O_RDONLY);
3605 if (dfd < 0)
3606 return 1;
3607 load_ddf_headers(dfd, super, NULL);
3608 load_ddf_global(dfd, super, NULL);
3609 close(dfd);
3610 /* Now we need the device-local bits */
3611 for (sd = sra->devs ; sd ; sd = sd->next) {
3612 int rv;
3613
3614 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3615 dfd = dev_open(nm, O_RDWR);
3616 if (dfd < 0)
3617 return 2;
3618 rv = load_ddf_headers(dfd, super, NULL);
3619 if (rv == 0)
3620 rv = load_ddf_local(dfd, super, NULL, 1);
3621 if (rv)
3622 return 1;
3623 }
3624
3625 *sbp = super;
3626 if (st->ss == NULL) {
3627 st->ss = &super_ddf;
3628 st->minor_version = 0;
3629 st->max_devs = 512;
3630 }
3631 strcpy(st->container_devnm, fd2devnm(fd));
3632 return 0;
3633 }
3634
3635 static int load_container_ddf(struct supertype *st, int fd,
3636 char *devname)
3637 {
3638 return load_super_ddf_all(st, fd, &st->sb, devname);
3639 }
3640
3641 #endif /* MDASSEMBLE */
3642
3643 static int check_secondary(const struct vcl *vc)
3644 {
3645 const struct vd_config *conf = &vc->conf;
3646 int i;
3647
3648 /* The only DDF secondary RAID level md can support is
3649 * RAID 10, if the stripe sizes and Basic volume sizes
3650 * are all equal.
3651 * Other configurations could in theory be supported by exposing
3652 * the BVDs to user space and using device mapper for the secondary
3653 * mapping. So far we don't support that.
3654 */
3655
3656 __u64 sec_elements[4] = {0, 0, 0, 0};
3657 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3658 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3659
3660 if (vc->other_bvds == NULL) {
3661 pr_err("No BVDs for secondary RAID found\n");
3662 return -1;
3663 }
3664 if (conf->prl != DDF_RAID1) {
3665 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3666 return -1;
3667 }
3668 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3669 pr_err("Secondary RAID level %d is unsupported\n",
3670 conf->srl);
3671 return -1;
3672 }
3673 __set_sec_seen(conf->sec_elmnt_seq);
3674 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3675 const struct vd_config *bvd = vc->other_bvds[i];
3676 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3677 continue;
3678 if (bvd->srl != conf->srl) {
3679 pr_err("Inconsistent secondary RAID level across BVDs\n");
3680 return -1;
3681 }
3682 if (bvd->prl != conf->prl) {
3683 pr_err("Different RAID levels for BVDs are unsupported\n");
3684 return -1;
3685 }
3686 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3687 pr_err("All BVDs must have the same number of primary elements\n");
3688 return -1;
3689 }
3690 if (bvd->chunk_shift != conf->chunk_shift) {
3691 pr_err("Different strip sizes for BVDs are unsupported\n");
3692 return -1;
3693 }
3694 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3695 pr_err("Different BVD sizes are unsupported\n");
3696 return -1;
3697 }
3698 __set_sec_seen(bvd->sec_elmnt_seq);
3699 }
3700 for (i = 0; i < conf->sec_elmnt_count; i++) {
3701 if (!__was_sec_seen(i)) {
3702 pr_err("BVD %d is missing\n", i);
3703 return -1;
3704 }
3705 }
3706 return 0;
3707 }
3708
3709 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3710 be32 refnum, unsigned int nmax,
3711 const struct vd_config **bvd,
3712 unsigned int *idx)
3713 {
3714 unsigned int i, j, n, sec, cnt;
3715
3716 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3717 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3718
3719 for (i = 0, j = 0 ; i < nmax ; i++) {
3720 /* j counts valid entries for this BVD */
3721 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3722 *bvd = &vc->conf;
3723 *idx = i;
3724 return sec * cnt + j;
3725 }
3726 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3727 j++;
3728 }
3729 if (vc->other_bvds == NULL)
3730 goto bad;
3731
3732 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3733 struct vd_config *vd = vc->other_bvds[n-1];
3734 sec = vd->sec_elmnt_seq;
3735 if (sec == DDF_UNUSED_BVD)
3736 continue;
3737 for (i = 0, j = 0 ; i < nmax ; i++) {
3738 if (be32_eq(vd->phys_refnum[i], refnum)) {
3739 *bvd = vd;
3740 *idx = i;
3741 return sec * cnt + j;
3742 }
3743 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3744 j++;
3745 }
3746 }
3747 bad:
3748 *bvd = NULL;
3749 return DDF_NOTFOUND;
3750 }
3751
3752 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3753 {
3754 /* Given a container loaded by load_super_ddf_all,
3755 * extract information about all the arrays into
3756 * an mdinfo tree.
3757 *
3758 * For each vcl in conflist: create an mdinfo, fill it in,
3759 * then look for matching devices (phys_refnum) in dlist
3760 * and create appropriate device mdinfo.
3761 */
3762 struct ddf_super *ddf = st->sb;
3763 struct mdinfo *rest = NULL;
3764 struct vcl *vc;
3765
3766 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3767 unsigned int i;
3768 struct mdinfo *this;
3769 char *ep;
3770 __u32 *cptr;
3771 unsigned int pd;
3772
3773 if (subarray &&
3774 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3775 *ep != '\0'))
3776 continue;
3777
3778 if (vc->conf.sec_elmnt_count > 1) {
3779 if (check_secondary(vc) != 0)
3780 continue;
3781 }
3782
3783 this = xcalloc(1, sizeof(*this));
3784 this->next = rest;
3785 rest = this;
3786
3787 if (layout_ddf2md(&vc->conf, &this->array))
3788 continue;
3789 this->array.md_minor = -1;
3790 this->array.major_version = -1;
3791 this->array.minor_version = -2;
3792 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3793 cptr = (__u32 *)(vc->conf.guid + 16);
3794 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3795 this->array.utime = DECADE +
3796 be32_to_cpu(vc->conf.timestamp);
3797 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3798
3799 i = vc->vcnum;
3800 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3801 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3802 DDF_init_full) {
3803 this->array.state = 0;
3804 this->resync_start = 0;
3805 } else {
3806 this->array.state = 1;
3807 this->resync_start = MaxSector;
3808 }
3809 _ddf_array_name(this->name, ddf, i);
3810 memset(this->uuid, 0, sizeof(this->uuid));
3811 this->component_size = be64_to_cpu(vc->conf.blocks);
3812 this->array.size = this->component_size / 2;
3813 this->container_member = i;
3814
3815 ddf->currentconf = vc;
3816 uuid_from_super_ddf(st, this->uuid);
3817 if (!subarray)
3818 ddf->currentconf = NULL;
3819
3820 sprintf(this->text_version, "/%s/%d",
3821 st->container_devnm, this->container_member);
3822
3823 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3824 struct mdinfo *dev;
3825 struct dl *d;
3826 const struct vd_config *bvd;
3827 unsigned int iphys;
3828 int stt;
3829
3830 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3831 == 0xFFFFFFFF)
3832 continue;
3833
3834 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3835 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3836 != DDF_Online)
3837 continue;
3838
3839 i = get_pd_index_from_refnum(
3840 vc, ddf->phys->entries[pd].refnum,
3841 ddf->mppe, &bvd, &iphys);
3842 if (i == DDF_NOTFOUND)
3843 continue;
3844
3845 this->array.working_disks++;
3846
3847 for (d = ddf->dlist; d ; d=d->next)
3848 if (be32_eq(d->disk.refnum,
3849 ddf->phys->entries[pd].refnum))
3850 break;
3851 if (d == NULL)
3852 /* Haven't found that one yet, maybe there are others */
3853 continue;
3854
3855 dev = xcalloc(1, sizeof(*dev));
3856 dev->next = this->devs;
3857 this->devs = dev;
3858
3859 dev->disk.number = be32_to_cpu(d->disk.refnum);
3860 dev->disk.major = d->major;
3861 dev->disk.minor = d->minor;
3862 dev->disk.raid_disk = i;
3863 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3864 dev->recovery_start = MaxSector;
3865
3866 dev->events = be32_to_cpu(ddf->active->seq);
3867 dev->data_offset =
3868 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3869 dev->component_size = be64_to_cpu(bvd->blocks);
3870 if (d->devname)
3871 strcpy(dev->name, d->devname);
3872 }
3873 }
3874 return rest;
3875 }
3876
3877 static int store_super_ddf(struct supertype *st, int fd)
3878 {
3879 struct ddf_super *ddf = st->sb;
3880 unsigned long long dsize;
3881 void *buf;
3882 int rc;
3883
3884 if (!ddf)
3885 return 1;
3886
3887 if (!get_dev_size(fd, NULL, &dsize))
3888 return 1;
3889
3890 if (ddf->dlist || ddf->conflist) {
3891 struct stat sta;
3892 struct dl *dl;
3893 int ofd, ret;
3894
3895 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3896 pr_err("%s: file descriptor for invalid device\n",
3897 __func__);
3898 return 1;
3899 }
3900 for (dl = ddf->dlist; dl; dl = dl->next)
3901 if (dl->major == (int)major(sta.st_rdev) &&
3902 dl->minor == (int)minor(sta.st_rdev))
3903 break;
3904 if (!dl) {
3905 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3906 (int)major(sta.st_rdev),
3907 (int)minor(sta.st_rdev));
3908 return 1;
3909 }
3910 ofd = dl->fd;
3911 dl->fd = fd;
3912 ret = (_write_super_to_disk(ddf, dl) != 1);
3913 dl->fd = ofd;
3914 return ret;
3915 }
3916
3917 if (posix_memalign(&buf, 512, 512) != 0)
3918 return 1;
3919 memset(buf, 0, 512);
3920
3921 lseek64(fd, dsize-512, 0);
3922 rc = write(fd, buf, 512);
3923 free(buf);
3924 if (rc < 0)
3925 return 1;
3926 return 0;
3927 }
3928
3929 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3930 {
3931 /*
3932 * return:
3933 * 0 same, or first was empty, and second was copied
3934 * 1 second had wrong magic number - but that isn't possible
3935 * 2 wrong uuid
3936 * 3 wrong other info
3937 */
3938 struct ddf_super *first = st->sb;
3939 struct ddf_super *second = tst->sb;
3940 struct dl *dl1, *dl2;
3941 struct vcl *vl1, *vl2;
3942 unsigned int max_vds, max_pds, pd, vd;
3943
3944 if (!first) {
3945 st->sb = tst->sb;
3946 tst->sb = NULL;
3947 return 0;
3948 }
3949
3950 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3951 return 2;
3952
3953 /* It is only OK to compare info in the anchor. Anything else
3954 * could be changing due to a reconfig so must be ignored.
3955 * guid really should be enough anyway.
3956 */
3957
3958 if (!be32_eq(first->active->seq, second->active->seq)) {
3959 dprintf("%s: sequence number mismatch %u<->%u\n", __func__,
3960 be32_to_cpu(first->active->seq),
3961 be32_to_cpu(second->active->seq));
3962 return 0;
3963 }
3964
3965 /*
3966 * At this point we are fairly sure that the meta data matches.
3967 * But the new disk may contain additional local data.
3968 * Add it to the super block.
3969 */
3970 max_vds = be16_to_cpu(first->active->max_vd_entries);
3971 max_pds = be16_to_cpu(first->phys->max_pdes);
3972 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3973 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3974 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3975 DDF_GUID_LEN))
3976 break;
3977 if (vl1) {
3978 if (vl1->other_bvds != NULL &&
3979 vl1->conf.sec_elmnt_seq !=
3980 vl2->conf.sec_elmnt_seq) {
3981 dprintf("%s: adding BVD %u\n", __func__,
3982 vl2->conf.sec_elmnt_seq);
3983 add_other_bvd(vl1, &vl2->conf,
3984 first->conf_rec_len*512);
3985 }
3986 continue;
3987 }
3988
3989 if (posix_memalign((void **)&vl1, 512,
3990 (first->conf_rec_len*512 +
3991 offsetof(struct vcl, conf))) != 0) {
3992 pr_err("%s could not allocate vcl buf\n",
3993 __func__);
3994 return 3;
3995 }
3996
3997 vl1->next = first->conflist;
3998 vl1->block_sizes = NULL;
3999 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4000 if (alloc_other_bvds(first, vl1) != 0) {
4001 pr_err("%s could not allocate other bvds\n",
4002 __func__);
4003 free(vl1);
4004 return 3;
4005 }
4006 for (vd = 0; vd < max_vds; vd++)
4007 if (!memcmp(first->virt->entries[vd].guid,
4008 vl1->conf.guid, DDF_GUID_LEN))
4009 break;
4010 vl1->vcnum = vd;
4011 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4012 first->conflist = vl1;
4013 }
4014
4015 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4016 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4017 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4018 break;
4019 if (dl1)
4020 continue;
4021
4022 if (posix_memalign((void **)&dl1, 512,
4023 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4024 != 0) {
4025 pr_err("%s could not allocate disk info buffer\n",
4026 __func__);
4027 return 3;
4028 }
4029 memcpy(dl1, dl2, sizeof(*dl1));
4030 dl1->mdupdate = NULL;
4031 dl1->next = first->dlist;
4032 dl1->fd = -1;
4033 for (pd = 0; pd < max_pds; pd++)
4034 if (be32_eq(first->phys->entries[pd].refnum,
4035 dl1->disk.refnum))
4036 break;
4037 dl1->pdnum = pd < max_pds ? (int)pd : -1;
4038 if (dl2->spare) {
4039 if (posix_memalign((void **)&dl1->spare, 512,
4040 first->conf_rec_len*512) != 0) {
4041 pr_err("%s could not allocate spare info buf\n",
4042 __func__);
4043 return 3;
4044 }
4045 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4046 }
4047 for (vd = 0 ; vd < first->max_part ; vd++) {
4048 if (!dl2->vlist[vd]) {
4049 dl1->vlist[vd] = NULL;
4050 continue;
4051 }
4052 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4053 if (!memcmp(vl1->conf.guid,
4054 dl2->vlist[vd]->conf.guid,
4055 DDF_GUID_LEN))
4056 break;
4057 dl1->vlist[vd] = vl1;
4058 }
4059 }
4060 first->dlist = dl1;
4061 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4062 be32_to_cpu(dl1->disk.refnum));
4063 }
4064
4065 return 0;
4066 }
4067
4068 #ifndef MDASSEMBLE
4069 /*
4070 * A new array 'a' has been started which claims to be instance 'inst'
4071 * within container 'c'.
4072 * We need to confirm that the array matches the metadata in 'c' so
4073 * that we don't corrupt any metadata.
4074 */
4075 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4076 {
4077 struct ddf_super *ddf = c->sb;
4078 int n = atoi(inst);
4079 struct mdinfo *dev;
4080 struct dl *dl;
4081 static const char faulty[] = "faulty";
4082
4083 if (all_ff(ddf->virt->entries[n].guid)) {
4084 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4085 return -ENODEV;
4086 }
4087 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4088 guid_str(ddf->virt->entries[n].guid));
4089 for (dev = a->info.devs; dev; dev = dev->next) {
4090 for (dl = ddf->dlist; dl; dl = dl->next)
4091 if (dl->major == dev->disk.major &&
4092 dl->minor == dev->disk.minor)
4093 break;
4094 if (!dl || dl->pdnum < 0) {
4095 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4096 __func__, dev->disk.major, dev->disk.minor, n);
4097 return -1;
4098 }
4099 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4100 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4101 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4102 __func__, n, dl->major, dl->minor,
4103 be16_to_cpu(
4104 ddf->phys->entries[dl->pdnum].state));
4105 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4106 sizeof(faulty) - 1)
4107 pr_err("Write to state_fd failed\n");
4108 dev->curr_state = DS_FAULTY;
4109 }
4110 }
4111 a->info.container_member = n;
4112 return 0;
4113 }
4114
4115 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4116 {
4117 /* This member array is being activated. If any devices
4118 * are missing they must now be marked as failed.
4119 */
4120 struct vd_config *vc;
4121 unsigned int n_bvd;
4122 struct vcl *vcl;
4123 struct dl *dl;
4124 int pd;
4125 int n;
4126 int state;
4127
4128 for (n = 0; ; n++) {
4129 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4130 if (!vc)
4131 break;
4132 for (dl = ddf->dlist; dl; dl = dl->next)
4133 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4134 break;
4135 if (dl)
4136 /* Found this disk, so not missing */
4137 continue;
4138
4139 /* Mark the device as failed/missing. */
4140 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4141 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4142 cpu_to_be16(DDF_Online))) {
4143 be16_clear(ddf->phys->entries[pd].state,
4144 cpu_to_be16(DDF_Online));
4145 be16_set(ddf->phys->entries[pd].state,
4146 cpu_to_be16(DDF_Failed|DDF_Missing));
4147 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4148 ddf_set_updates_pending(ddf);
4149 }
4150
4151 /* Mark the array as Degraded */
4152 state = get_svd_state(ddf, vcl);
4153 if (ddf->virt->entries[inst].state !=
4154 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4155 | state)) {
4156 ddf->virt->entries[inst].state =
4157 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4158 | state;
4159 a->check_degraded = 1;
4160 ddf_set_updates_pending(ddf);
4161 }
4162 }
4163 }
4164
4165 /*
4166 * The array 'a' is to be marked clean in the metadata.
4167 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4168 * clean up to the point (in sectors). If that cannot be recorded in the
4169 * metadata, then leave it as dirty.
4170 *
4171 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4172 * !global! virtual_disk.virtual_entry structure.
4173 */
4174 static int ddf_set_array_state(struct active_array *a, int consistent)
4175 {
4176 struct ddf_super *ddf = a->container->sb;
4177 int inst = a->info.container_member;
4178 int old = ddf->virt->entries[inst].state;
4179 if (consistent == 2) {
4180 handle_missing(ddf, a, inst);
4181 /* Should check if a recovery should be started FIXME */
4182 consistent = 1;
4183 if (!is_resync_complete(&a->info))
4184 consistent = 0;
4185 }
4186 if (consistent)
4187 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4188 else
4189 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4190 if (old != ddf->virt->entries[inst].state)
4191 ddf_set_updates_pending(ddf);
4192
4193 old = ddf->virt->entries[inst].init_state;
4194 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4195 if (is_resync_complete(&a->info))
4196 ddf->virt->entries[inst].init_state |= DDF_init_full;
4197 else if (a->info.resync_start == 0)
4198 ddf->virt->entries[inst].init_state |= DDF_init_not;
4199 else
4200 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4201 if (old != ddf->virt->entries[inst].init_state)
4202 ddf_set_updates_pending(ddf);
4203
4204 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4205 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4206 consistent?"clean":"dirty",
4207 a->info.resync_start);
4208 return consistent;
4209 }
4210
4211 static int get_bvd_state(const struct ddf_super *ddf,
4212 const struct vd_config *vc)
4213 {
4214 unsigned int i, n_bvd, working = 0;
4215 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4216 int pd, st, state;
4217 for (i = 0; i < n_prim; i++) {
4218 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4219 continue;
4220 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4221 if (pd < 0)
4222 continue;
4223 st = be16_to_cpu(ddf->phys->entries[pd].state);
4224 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4225 == DDF_Online)
4226 working++;
4227 }
4228
4229 state = DDF_state_degraded;
4230 if (working == n_prim)
4231 state = DDF_state_optimal;
4232 else
4233 switch (vc->prl) {
4234 case DDF_RAID0:
4235 case DDF_CONCAT:
4236 case DDF_JBOD:
4237 state = DDF_state_failed;
4238 break;
4239 case DDF_RAID1:
4240 if (working == 0)
4241 state = DDF_state_failed;
4242 else if (working >= 2)
4243 state = DDF_state_part_optimal;
4244 break;
4245 case DDF_RAID4:
4246 case DDF_RAID5:
4247 if (working < n_prim - 1)
4248 state = DDF_state_failed;
4249 break;
4250 case DDF_RAID6:
4251 if (working < n_prim - 2)
4252 state = DDF_state_failed;
4253 else if (working == n_prim - 1)
4254 state = DDF_state_part_optimal;
4255 break;
4256 }
4257 return state;
4258 }
4259
4260 static int secondary_state(int state, int other, int seclevel)
4261 {
4262 if (state == DDF_state_optimal && other == DDF_state_optimal)
4263 return DDF_state_optimal;
4264 if (seclevel == DDF_2MIRRORED) {
4265 if (state == DDF_state_optimal || other == DDF_state_optimal)
4266 return DDF_state_part_optimal;
4267 if (state == DDF_state_failed && other == DDF_state_failed)
4268 return DDF_state_failed;
4269 return DDF_state_degraded;
4270 } else {
4271 if (state == DDF_state_failed || other == DDF_state_failed)
4272 return DDF_state_failed;
4273 if (state == DDF_state_degraded || other == DDF_state_degraded)
4274 return DDF_state_degraded;
4275 return DDF_state_part_optimal;
4276 }
4277 }
4278
4279 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4280 {
4281 int state = get_bvd_state(ddf, &vcl->conf);
4282 unsigned int i;
4283 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4284 state = secondary_state(
4285 state,
4286 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4287 vcl->conf.srl);
4288 }
4289 return state;
4290 }
4291
4292 /*
4293 * The state of each disk is stored in the global phys_disk structure
4294 * in phys_disk.entries[n].state.
4295 * This makes various combinations awkward.
4296 * - When a device fails in any array, it must be failed in all arrays
4297 * that include a part of this device.
4298 * - When a component is rebuilding, we cannot include it officially in the
4299 * array unless this is the only array that uses the device.
4300 *
4301 * So: when transitioning:
4302 * Online -> failed, just set failed flag. monitor will propagate
4303 * spare -> online, the device might need to be added to the array.
4304 * spare -> failed, just set failed. Don't worry if in array or not.
4305 */
4306 static void ddf_set_disk(struct active_array *a, int n, int state)
4307 {
4308 struct ddf_super *ddf = a->container->sb;
4309 unsigned int inst = a->info.container_member, n_bvd;
4310 struct vcl *vcl;
4311 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4312 &n_bvd, &vcl);
4313 int pd;
4314 struct mdinfo *mdi;
4315 struct dl *dl;
4316
4317 dprintf("%s: %d to %x\n", __func__, n, state);
4318 if (vc == NULL) {
4319 dprintf("ddf: cannot find instance %d!!\n", inst);
4320 return;
4321 }
4322 /* Find the matching slot in 'info'. */
4323 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4324 if (mdi->disk.raid_disk == n)
4325 break;
4326 if (!mdi) {
4327 pr_err("%s: cannot find raid disk %d\n",
4328 __func__, n);
4329 return;
4330 }
4331
4332 /* and find the 'dl' entry corresponding to that. */
4333 for (dl = ddf->dlist; dl; dl = dl->next)
4334 if (mdi->state_fd >= 0 &&
4335 mdi->disk.major == dl->major &&
4336 mdi->disk.minor == dl->minor)
4337 break;
4338 if (!dl) {
4339 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4340 __func__, n,
4341 mdi->disk.major, mdi->disk.minor);
4342 return;
4343 }
4344
4345 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4346 if (pd < 0 || pd != dl->pdnum) {
4347 /* disk doesn't currently exist or has changed.
4348 * If it is now in_sync, insert it. */
4349 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4350 __func__, dl->pdnum, dl->major, dl->minor,
4351 be32_to_cpu(dl->disk.refnum));
4352 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4353 __func__, inst, n_bvd,
4354 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4355 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4356 pd = dl->pdnum; /* FIXME: is this really correct ? */
4357 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4358 LBA_OFFSET(ddf, vc)[n_bvd] =
4359 cpu_to_be64(mdi->data_offset);
4360 be16_clear(ddf->phys->entries[pd].type,
4361 cpu_to_be16(DDF_Global_Spare));
4362 be16_set(ddf->phys->entries[pd].type,
4363 cpu_to_be16(DDF_Active_in_VD));
4364 ddf_set_updates_pending(ddf);
4365 }
4366 } else {
4367 be16 old = ddf->phys->entries[pd].state;
4368 if (state & DS_FAULTY)
4369 be16_set(ddf->phys->entries[pd].state,
4370 cpu_to_be16(DDF_Failed));
4371 if (state & DS_INSYNC) {
4372 be16_set(ddf->phys->entries[pd].state,
4373 cpu_to_be16(DDF_Online));
4374 be16_clear(ddf->phys->entries[pd].state,
4375 cpu_to_be16(DDF_Rebuilding));
4376 }
4377 if (!be16_eq(old, ddf->phys->entries[pd].state))
4378 ddf_set_updates_pending(ddf);
4379 }
4380
4381 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4382 be32_to_cpu(dl->disk.refnum), state,
4383 be16_to_cpu(ddf->phys->entries[pd].state));
4384
4385 /* Now we need to check the state of the array and update
4386 * virtual_disk.entries[n].state.
4387 * It needs to be one of "optimal", "degraded", "failed".
4388 * I don't understand 'deleted' or 'missing'.
4389 */
4390 state = get_svd_state(ddf, vcl);
4391
4392 if (ddf->virt->entries[inst].state !=
4393 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4394 | state)) {
4395 ddf->virt->entries[inst].state =
4396 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4397 | state;
4398 ddf_set_updates_pending(ddf);
4399 }
4400
4401 }
4402
4403 static void ddf_sync_metadata(struct supertype *st)
4404 {
4405 /*
4406 * Write all data to all devices.
4407 * Later, we might be able to track whether only local changes
4408 * have been made, or whether any global data has been changed,
4409 * but ddf is sufficiently weird that it probably always
4410 * changes global data ....
4411 */
4412 struct ddf_super *ddf = st->sb;
4413 if (!ddf->updates_pending)
4414 return;
4415 ddf->updates_pending = 0;
4416 __write_init_super_ddf(st);
4417 dprintf("ddf: sync_metadata\n");
4418 }
4419
4420 static int del_from_conflist(struct vcl **list, const char *guid)
4421 {
4422 struct vcl **p;
4423 int found = 0;
4424 for (p = list; p && *p; p = &((*p)->next))
4425 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4426 found = 1;
4427 *p = (*p)->next;
4428 }
4429 return found;
4430 }
4431
4432 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4433 {
4434 struct dl *dl;
4435 unsigned int vdnum, i;
4436 vdnum = find_vde_by_guid(ddf, guid);
4437 if (vdnum == DDF_NOTFOUND) {
4438 pr_err("%s: could not find VD %s\n", __func__,
4439 guid_str(guid));
4440 return -1;
4441 }
4442 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4443 pr_err("%s: could not find conf %s\n", __func__,
4444 guid_str(guid));
4445 return -1;
4446 }
4447 for (dl = ddf->dlist; dl; dl = dl->next)
4448 for (i = 0; i < ddf->max_part; i++)
4449 if (dl->vlist[i] != NULL &&
4450 !memcmp(dl->vlist[i]->conf.guid, guid,
4451 DDF_GUID_LEN))
4452 dl->vlist[i] = NULL;
4453 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4454 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4455 return 0;
4456 }
4457
4458 static int kill_subarray_ddf(struct supertype *st)
4459 {
4460 struct ddf_super *ddf = st->sb;
4461 /*
4462 * currentconf is set in container_content_ddf,
4463 * called with subarray arg
4464 */
4465 struct vcl *victim = ddf->currentconf;
4466 struct vd_config *conf;
4467 unsigned int vdnum;
4468
4469 ddf->currentconf = NULL;
4470 if (!victim) {
4471 pr_err("%s: nothing to kill\n", __func__);
4472 return -1;
4473 }
4474 conf = &victim->conf;
4475 vdnum = find_vde_by_guid(ddf, conf->guid);
4476 if (vdnum == DDF_NOTFOUND) {
4477 pr_err("%s: could not find VD %s\n", __func__,
4478 guid_str(conf->guid));
4479 return -1;
4480 }
4481 if (st->update_tail) {
4482 struct virtual_disk *vd;
4483 int len = sizeof(struct virtual_disk)
4484 + sizeof(struct virtual_entry);
4485 vd = xmalloc(len);
4486 if (vd == NULL) {
4487 pr_err("%s: failed to allocate %d bytes\n", __func__,
4488 len);
4489 return -1;
4490 }
4491 memset(vd, 0 , len);
4492 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4493 vd->populated_vdes = cpu_to_be16(0);
4494 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4495 /* we use DDF_state_deleted as marker */
4496 vd->entries[0].state = DDF_state_deleted;
4497 append_metadata_update(st, vd, len);
4498 } else {
4499 _kill_subarray_ddf(ddf, conf->guid);
4500 ddf_set_updates_pending(ddf);
4501 ddf_sync_metadata(st);
4502 }
4503 return 0;
4504 }
4505
4506 static void copy_matching_bvd(struct ddf_super *ddf,
4507 struct vd_config *conf,
4508 const struct metadata_update *update)
4509 {
4510 unsigned int mppe =
4511 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4512 unsigned int len = ddf->conf_rec_len * 512;
4513 char *p;
4514 struct vd_config *vc;
4515 for (p = update->buf; p < update->buf + update->len; p += len) {
4516 vc = (struct vd_config *) p;
4517 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4518 memcpy(conf->phys_refnum, vc->phys_refnum,
4519 mppe * (sizeof(__u32) + sizeof(__u64)));
4520 return;
4521 }
4522 }
4523 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4524 conf->sec_elmnt_seq, guid_str(conf->guid));
4525 }
4526
4527 static void ddf_process_update(struct supertype *st,
4528 struct metadata_update *update)
4529 {
4530 /* Apply this update to the metadata.
4531 * The first 4 bytes are a DDF_*_MAGIC which guides
4532 * our actions.
4533 * Possible update are:
4534 * DDF_PHYS_RECORDS_MAGIC
4535 * Add a new physical device or remove an old one.
4536 * Changes to this record only happen implicitly.
4537 * used_pdes is the device number.
4538 * DDF_VIRT_RECORDS_MAGIC
4539 * Add a new VD. Possibly also change the 'access' bits.
4540 * populated_vdes is the entry number.
4541 * DDF_VD_CONF_MAGIC
4542 * New or updated VD. the VIRT_RECORD must already
4543 * exist. For an update, phys_refnum and lba_offset
4544 * (at least) are updated, and the VD_CONF must
4545 * be written to precisely those devices listed with
4546 * a phys_refnum.
4547 * DDF_SPARE_ASSIGN_MAGIC
4548 * replacement Spare Assignment Record... but for which device?
4549 *
4550 * So, e.g.:
4551 * - to create a new array, we send a VIRT_RECORD and
4552 * a VD_CONF. Then assemble and start the array.
4553 * - to activate a spare we send a VD_CONF to add the phys_refnum
4554 * and offset. This will also mark the spare as active with
4555 * a spare-assignment record.
4556 */
4557 struct ddf_super *ddf = st->sb;
4558 be32 *magic = (be32 *)update->buf;
4559 struct phys_disk *pd;
4560 struct virtual_disk *vd;
4561 struct vd_config *vc;
4562 struct vcl *vcl;
4563 struct dl *dl;
4564 unsigned int ent;
4565 unsigned int pdnum, pd2, len;
4566
4567 dprintf("Process update %x\n", be32_to_cpu(*magic));
4568
4569 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4570 if (update->len != (sizeof(struct phys_disk) +
4571 sizeof(struct phys_disk_entry)))
4572 return;
4573 pd = (struct phys_disk*)update->buf;
4574
4575 ent = be16_to_cpu(pd->used_pdes);
4576 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4577 return;
4578 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4579 struct dl **dlp;
4580 /* removing this disk. */
4581 be16_set(ddf->phys->entries[ent].state,
4582 cpu_to_be16(DDF_Missing));
4583 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4584 struct dl *dl = *dlp;
4585 if (dl->pdnum == (signed)ent) {
4586 close(dl->fd);
4587 dl->fd = -1;
4588 /* FIXME this doesn't free
4589 * dl->devname */
4590 update->space = dl;
4591 *dlp = dl->next;
4592 break;
4593 }
4594 }
4595 ddf_set_updates_pending(ddf);
4596 return;
4597 }
4598 if (!all_ff(ddf->phys->entries[ent].guid))
4599 return;
4600 ddf->phys->entries[ent] = pd->entries[0];
4601 ddf->phys->used_pdes = cpu_to_be16
4602 (1 + be16_to_cpu(ddf->phys->used_pdes));
4603 ddf_set_updates_pending(ddf);
4604 if (ddf->add_list) {
4605 struct active_array *a;
4606 struct dl *al = ddf->add_list;
4607 ddf->add_list = al->next;
4608
4609 al->next = ddf->dlist;
4610 ddf->dlist = al;
4611
4612 /* As a device has been added, we should check
4613 * for any degraded devices that might make
4614 * use of this spare */
4615 for (a = st->arrays ; a; a=a->next)
4616 a->check_degraded = 1;
4617 }
4618 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4619 if (update->len != (sizeof(struct virtual_disk) +
4620 sizeof(struct virtual_entry)))
4621 return;
4622 vd = (struct virtual_disk*)update->buf;
4623
4624 if (vd->entries[0].state == DDF_state_deleted) {
4625 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4626 return;
4627 } else {
4628 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4629 if (ent != DDF_NOTFOUND) {
4630 dprintf("%s: VD %s exists already in slot %d\n",
4631 __func__, guid_str(vd->entries[0].guid),
4632 ent);
4633 return;
4634 }
4635 ent = find_unused_vde(ddf);
4636 if (ent == DDF_NOTFOUND)
4637 return;
4638 ddf->virt->entries[ent] = vd->entries[0];
4639 ddf->virt->populated_vdes =
4640 cpu_to_be16(
4641 1 + be16_to_cpu(
4642 ddf->virt->populated_vdes));
4643 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4644 __func__, guid_str(vd->entries[0].guid), ent,
4645 ddf->virt->entries[ent].state,
4646 ddf->virt->entries[ent].init_state);
4647 }
4648 ddf_set_updates_pending(ddf);
4649 }
4650
4651 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4652 vc = (struct vd_config*)update->buf;
4653 len = ddf->conf_rec_len * 512;
4654 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4655 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4656 __func__, guid_str(vc->guid), update->len,
4657 vc->sec_elmnt_count);
4658 return;
4659 }
4660 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4661 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4662 break;
4663 dprintf("%s: conf update for %s (%s)\n", __func__,
4664 guid_str(vc->guid), (vcl ? "old" : "new"));
4665 if (vcl) {
4666 /* An update, just copy the phys_refnum and lba_offset
4667 * fields
4668 */
4669 unsigned int i;
4670 unsigned int k;
4671 copy_matching_bvd(ddf, &vcl->conf, update);
4672 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4673 dprintf("BVD %u has %08x at %llu\n", 0,
4674 be32_to_cpu(vcl->conf.phys_refnum[k]),
4675 be64_to_cpu(LBA_OFFSET(ddf,
4676 &vcl->conf)[k]));
4677 for (i = 1; i < vc->sec_elmnt_count; i++) {
4678 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4679 update);
4680 for (k = 0; k < be16_to_cpu(
4681 vc->prim_elmnt_count); k++)
4682 dprintf("BVD %u has %08x at %llu\n", i,
4683 be32_to_cpu
4684 (vcl->other_bvds[i-1]->
4685 phys_refnum[k]),
4686 be64_to_cpu
4687 (LBA_OFFSET
4688 (ddf,
4689 vcl->other_bvds[i-1])[k]));
4690 }
4691 } else {
4692 /* A new VD_CONF */
4693 unsigned int i;
4694 if (!update->space)
4695 return;
4696 vcl = update->space;
4697 update->space = NULL;
4698 vcl->next = ddf->conflist;
4699 memcpy(&vcl->conf, vc, len);
4700 ent = find_vde_by_guid(ddf, vc->guid);
4701 if (ent == DDF_NOTFOUND)
4702 return;
4703 vcl->vcnum = ent;
4704 ddf->conflist = vcl;
4705 for (i = 1; i < vc->sec_elmnt_count; i++)
4706 memcpy(vcl->other_bvds[i-1],
4707 update->buf + len * i, len);
4708 }
4709 /* Set DDF_Transition on all Failed devices - to help
4710 * us detect those that are no longer in use
4711 */
4712 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4713 pdnum++)
4714 if (be16_and(ddf->phys->entries[pdnum].state,
4715 cpu_to_be16(DDF_Failed)))
4716 be16_set(ddf->phys->entries[pdnum].state,
4717 cpu_to_be16(DDF_Transition));
4718 /* Now make sure vlist is correct for each dl. */
4719 for (dl = ddf->dlist; dl; dl = dl->next) {
4720 unsigned int vn = 0;
4721 int in_degraded = 0;
4722
4723 if (dl->pdnum < 0)
4724 continue;
4725 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4726 unsigned int dn, ibvd;
4727 const struct vd_config *conf;
4728 int vstate;
4729 dn = get_pd_index_from_refnum(vcl,
4730 dl->disk.refnum,
4731 ddf->mppe,
4732 &conf, &ibvd);
4733 if (dn == DDF_NOTFOUND)
4734 continue;
4735 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4736 dl->pdnum,
4737 be32_to_cpu(dl->disk.refnum),
4738 guid_str(conf->guid),
4739 conf->sec_elmnt_seq, vn);
4740 /* Clear the Transition flag */
4741 if (be16_and
4742 (ddf->phys->entries[dl->pdnum].state,
4743 cpu_to_be16(DDF_Failed)))
4744 be16_clear(ddf->phys
4745 ->entries[dl->pdnum].state,
4746 cpu_to_be16(DDF_Transition));
4747 dl->vlist[vn++] = vcl;
4748 vstate = ddf->virt->entries[vcl->vcnum].state
4749 & DDF_state_mask;
4750 if (vstate == DDF_state_degraded ||
4751 vstate == DDF_state_part_optimal)
4752 in_degraded = 1;
4753 }
4754 while (vn < ddf->max_part)
4755 dl->vlist[vn++] = NULL;
4756 if (dl->vlist[0]) {
4757 be16_clear(ddf->phys->entries[dl->pdnum].type,
4758 cpu_to_be16(DDF_Global_Spare));
4759 if (!be16_and(ddf->phys
4760 ->entries[dl->pdnum].type,
4761 cpu_to_be16(DDF_Active_in_VD))) {
4762 be16_set(ddf->phys
4763 ->entries[dl->pdnum].type,
4764 cpu_to_be16(DDF_Active_in_VD));
4765 if (in_degraded)
4766 be16_set(ddf->phys
4767 ->entries[dl->pdnum]
4768 .state,
4769 cpu_to_be16
4770 (DDF_Rebuilding));
4771 }
4772 }
4773 if (dl->spare) {
4774 be16_clear(ddf->phys->entries[dl->pdnum].type,
4775 cpu_to_be16(DDF_Global_Spare));
4776 be16_set(ddf->phys->entries[dl->pdnum].type,
4777 cpu_to_be16(DDF_Spare));
4778 }
4779 if (!dl->vlist[0] && !dl->spare) {
4780 be16_set(ddf->phys->entries[dl->pdnum].type,
4781 cpu_to_be16(DDF_Global_Spare));
4782 be16_clear(ddf->phys->entries[dl->pdnum].type,
4783 cpu_to_be16(DDF_Spare));
4784 be16_clear(ddf->phys->entries[dl->pdnum].type,
4785 cpu_to_be16(DDF_Active_in_VD));
4786 }
4787 }
4788
4789 /* Now remove any 'Failed' devices that are not part
4790 * of any VD. They will have the Transition flag set.
4791 * Once done, we need to update all dl->pdnum numbers.
4792 */
4793 pd2 = 0;
4794 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4795 pdnum++) {
4796 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4797 0xFFFFFFFF)
4798 continue;
4799 if (be16_and(ddf->phys->entries[pdnum].state,
4800 cpu_to_be16(DDF_Failed))
4801 && be16_and(ddf->phys->entries[pdnum].state,
4802 cpu_to_be16(DDF_Transition))) {
4803 /* skip this one unless in dlist*/
4804 for (dl = ddf->dlist; dl; dl = dl->next)
4805 if (dl->pdnum == (int)pdnum)
4806 break;
4807 if (!dl)
4808 continue;
4809 }
4810 if (pdnum == pd2)
4811 pd2++;
4812 else {
4813 ddf->phys->entries[pd2] =
4814 ddf->phys->entries[pdnum];
4815 for (dl = ddf->dlist; dl; dl = dl->next)
4816 if (dl->pdnum == (int)pdnum)
4817 dl->pdnum = pd2;
4818 pd2++;
4819 }
4820 }
4821 ddf->phys->used_pdes = cpu_to_be16(pd2);
4822 while (pd2 < pdnum) {
4823 memset(ddf->phys->entries[pd2].guid, 0xff,
4824 DDF_GUID_LEN);
4825 pd2++;
4826 }
4827
4828 ddf_set_updates_pending(ddf);
4829 }
4830 /* case DDF_SPARE_ASSIGN_MAGIC */
4831 }
4832
4833 static void ddf_prepare_update(struct supertype *st,
4834 struct metadata_update *update)
4835 {
4836 /* This update arrived at managemon.
4837 * We are about to pass it to monitor.
4838 * If a malloc is needed, do it here.
4839 */
4840 struct ddf_super *ddf = st->sb;
4841 be32 *magic = (be32 *)update->buf;
4842 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4843 struct vcl *vcl;
4844 struct vd_config *conf = (struct vd_config *) update->buf;
4845 if (posix_memalign(&update->space, 512,
4846 offsetof(struct vcl, conf)
4847 + ddf->conf_rec_len * 512) != 0) {
4848 update->space = NULL;
4849 return;
4850 }
4851 vcl = update->space;
4852 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4853 if (alloc_other_bvds(ddf, vcl) != 0) {
4854 free(update->space);
4855 update->space = NULL;
4856 }
4857 }
4858 }
4859
4860 /*
4861 * Check degraded state of a RAID10.
4862 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4863 */
4864 static int raid10_degraded(struct mdinfo *info)
4865 {
4866 int n_prim, n_bvds;
4867 int i;
4868 struct mdinfo *d;
4869 char *found;
4870 int ret = -1;
4871
4872 n_prim = info->array.layout & ~0x100;
4873 n_bvds = info->array.raid_disks / n_prim;
4874 found = xmalloc(n_bvds);
4875 if (found == NULL)
4876 return ret;
4877 memset(found, 0, n_bvds);
4878 for (d = info->devs; d; d = d->next) {
4879 i = d->disk.raid_disk / n_prim;
4880 if (i >= n_bvds) {
4881 pr_err("%s: BUG: invalid raid disk\n", __func__);
4882 goto out;
4883 }
4884 if (d->state_fd > 0)
4885 found[i]++;
4886 }
4887 ret = 2;
4888 for (i = 0; i < n_bvds; i++)
4889 if (!found[i]) {
4890 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4891 ret = 0;
4892 goto out;
4893 } else if (found[i] < n_prim) {
4894 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4895 n_bvds);
4896 ret = 1;
4897 }
4898 out:
4899 free(found);
4900 return ret;
4901 }
4902
4903 /*
4904 * Check if the array 'a' is degraded but not failed.
4905 * If it is, find as many spares as are available and needed and
4906 * arrange for their inclusion.
4907 * We only choose devices which are not already in the array,
4908 * and prefer those with a spare-assignment to this array.
4909 * Otherwise we choose global spares - assuming always that
4910 * there is enough room.
4911 * For each spare that we assign, we return an 'mdinfo' which
4912 * describes the position for the device in the array.
4913 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4914 * the new phys_refnum and lba_offset values.
4915 *
4916 * Only worry about BVDs at the moment.
4917 */
4918 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4919 struct metadata_update **updates)
4920 {
4921 int working = 0;
4922 struct mdinfo *d;
4923 struct ddf_super *ddf = a->container->sb;
4924 int global_ok = 0;
4925 struct mdinfo *rv = NULL;
4926 struct mdinfo *di;
4927 struct metadata_update *mu;
4928 struct dl *dl;
4929 int i;
4930 unsigned int j;
4931 struct vcl *vcl;
4932 struct vd_config *vc;
4933 unsigned int n_bvd;
4934
4935 for (d = a->info.devs ; d ; d = d->next) {
4936 if ((d->curr_state & DS_FAULTY) &&
4937 d->state_fd >= 0)
4938 /* wait for Removal to happen */
4939 return NULL;
4940 if (d->state_fd >= 0)
4941 working ++;
4942 }
4943
4944 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4945 a->info.array.raid_disks,
4946 a->info.array.level);
4947 if (working == a->info.array.raid_disks)
4948 return NULL; /* array not degraded */
4949 switch (a->info.array.level) {
4950 case 1:
4951 if (working == 0)
4952 return NULL; /* failed */
4953 break;
4954 case 4:
4955 case 5:
4956 if (working < a->info.array.raid_disks - 1)
4957 return NULL; /* failed */
4958 break;
4959 case 6:
4960 if (working < a->info.array.raid_disks - 2)
4961 return NULL; /* failed */
4962 break;
4963 case 10:
4964 if (raid10_degraded(&a->info) < 1)
4965 return NULL;
4966 break;
4967 default: /* concat or stripe */
4968 return NULL; /* failed */
4969 }
4970
4971 /* For each slot, if it is not working, find a spare */
4972 dl = ddf->dlist;
4973 for (i = 0; i < a->info.array.raid_disks; i++) {
4974 for (d = a->info.devs ; d ; d = d->next)
4975 if (d->disk.raid_disk == i)
4976 break;
4977 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4978 if (d && (d->state_fd >= 0))
4979 continue;
4980
4981 /* OK, this device needs recovery. Find a spare */
4982 again:
4983 for ( ; dl ; dl = dl->next) {
4984 unsigned long long esize;
4985 unsigned long long pos;
4986 struct mdinfo *d2;
4987 int is_global = 0;
4988 int is_dedicated = 0;
4989 struct extent *ex;
4990 unsigned int j;
4991 be16 state;
4992
4993 if (dl->pdnum < 0)
4994 continue;
4995 state = ddf->phys->entries[dl->pdnum].state;
4996 if (be16_and(state,
4997 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
4998 !be16_and(state,
4999 cpu_to_be16(DDF_Online)))
5000 continue;
5001
5002 /* If in this array, skip */
5003 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
5004 if (d2->state_fd >= 0 &&
5005 d2->disk.major == dl->major &&
5006 d2->disk.minor == dl->minor) {
5007 dprintf("%x:%x (%08x) already in array\n",
5008 dl->major, dl->minor,
5009 be32_to_cpu(dl->disk.refnum));
5010 break;
5011 }
5012 if (d2)
5013 continue;
5014 if (be16_and(ddf->phys->entries[dl->pdnum].type,
5015 cpu_to_be16(DDF_Spare))) {
5016 /* Check spare assign record */
5017 if (dl->spare) {
5018 if (dl->spare->type & DDF_spare_dedicated) {
5019 /* check spare_ents for guid */
5020 for (j = 0 ;
5021 j < be16_to_cpu
5022 (dl->spare
5023 ->populated);
5024 j++) {
5025 if (memcmp(dl->spare->spare_ents[j].guid,
5026 ddf->virt->entries[a->info.container_member].guid,
5027 DDF_GUID_LEN) == 0)
5028 is_dedicated = 1;
5029 }
5030 } else
5031 is_global = 1;
5032 }
5033 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5034 cpu_to_be16(DDF_Global_Spare))) {
5035 is_global = 1;
5036 } else if (!be16_and(ddf->phys
5037 ->entries[dl->pdnum].state,
5038 cpu_to_be16(DDF_Failed))) {
5039 /* we can possibly use some of this */
5040 is_global = 1;
5041 }
5042 if ( ! (is_dedicated ||
5043 (is_global && global_ok))) {
5044 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5045 is_dedicated, is_global);
5046 continue;
5047 }
5048
5049 /* We are allowed to use this device - is there space?
5050 * We need a->info.component_size sectors */
5051 ex = get_extents(ddf, dl);
5052 if (!ex) {
5053 dprintf("cannot get extents\n");
5054 continue;
5055 }
5056 j = 0; pos = 0;
5057 esize = 0;
5058
5059 do {
5060 esize = ex[j].start - pos;
5061 if (esize >= a->info.component_size)
5062 break;
5063 pos = ex[j].start + ex[j].size;
5064 j++;
5065 } while (ex[j-1].size);
5066
5067 free(ex);
5068 if (esize < a->info.component_size) {
5069 dprintf("%x:%x has no room: %llu %llu\n",
5070 dl->major, dl->minor,
5071 esize, a->info.component_size);
5072 /* No room */
5073 continue;
5074 }
5075
5076 /* Cool, we have a device with some space at pos */
5077 di = xcalloc(1, sizeof(*di));
5078 di->disk.number = i;
5079 di->disk.raid_disk = i;
5080 di->disk.major = dl->major;
5081 di->disk.minor = dl->minor;
5082 di->disk.state = 0;
5083 di->recovery_start = 0;
5084 di->data_offset = pos;
5085 di->component_size = a->info.component_size;
5086 di->next = rv;
5087 rv = di;
5088 dprintf("%x:%x (%08x) to be %d at %llu\n",
5089 dl->major, dl->minor,
5090 be32_to_cpu(dl->disk.refnum), i, pos);
5091
5092 break;
5093 }
5094 if (!dl && ! global_ok) {
5095 /* not enough dedicated spares, try global */
5096 global_ok = 1;
5097 dl = ddf->dlist;
5098 goto again;
5099 }
5100 }
5101
5102 if (!rv)
5103 /* No spares found */
5104 return rv;
5105 /* Now 'rv' has a list of devices to return.
5106 * Create a metadata_update record to update the
5107 * phys_refnum and lba_offset values
5108 */
5109 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5110 &n_bvd, &vcl);
5111 if (vc == NULL)
5112 return NULL;
5113
5114 mu = xmalloc(sizeof(*mu));
5115 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5116 free(mu);
5117 mu = NULL;
5118 }
5119
5120 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5121 mu->buf = xmalloc(mu->len);
5122 mu->space = NULL;
5123 mu->space_list = NULL;
5124 mu->next = *updates;
5125 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5126 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5127 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5128 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5129
5130 vc = (struct vd_config*)mu->buf;
5131 for (di = rv ; di ; di = di->next) {
5132 unsigned int i_sec, i_prim;
5133 i_sec = di->disk.raid_disk
5134 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5135 i_prim = di->disk.raid_disk
5136 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5137 vc = (struct vd_config *)(mu->buf
5138 + i_sec * ddf->conf_rec_len * 512);
5139 for (dl = ddf->dlist; dl; dl = dl->next)
5140 if (dl->major == di->disk.major
5141 && dl->minor == di->disk.minor)
5142 break;
5143 if (!dl || dl->pdnum < 0) {
5144 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5145 __func__, di->disk.raid_disk,
5146 di->disk.major, di->disk.minor);
5147 return NULL;
5148 }
5149 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5150 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5151 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5152 be32_to_cpu(vc->phys_refnum[i_prim]),
5153 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5154 }
5155 *updates = mu;
5156 return rv;
5157 }
5158 #endif /* MDASSEMBLE */
5159
5160 static int ddf_level_to_layout(int level)
5161 {
5162 switch(level) {
5163 case 0:
5164 case 1:
5165 return 0;
5166 case 5:
5167 return ALGORITHM_LEFT_SYMMETRIC;
5168 case 6:
5169 return ALGORITHM_ROTATING_N_CONTINUE;
5170 case 10:
5171 return 0x102;
5172 default:
5173 return UnSet;
5174 }
5175 }
5176
5177 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5178 {
5179 if (level && *level == UnSet)
5180 *level = LEVEL_CONTAINER;
5181
5182 if (level && layout && *layout == UnSet)
5183 *layout = ddf_level_to_layout(*level);
5184 }
5185
5186 struct superswitch super_ddf = {
5187 #ifndef MDASSEMBLE
5188 .examine_super = examine_super_ddf,
5189 .brief_examine_super = brief_examine_super_ddf,
5190 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5191 .export_examine_super = export_examine_super_ddf,
5192 .detail_super = detail_super_ddf,
5193 .brief_detail_super = brief_detail_super_ddf,
5194 .validate_geometry = validate_geometry_ddf,
5195 .write_init_super = write_init_super_ddf,
5196 .add_to_super = add_to_super_ddf,
5197 .remove_from_super = remove_from_super_ddf,
5198 .load_container = load_container_ddf,
5199 .copy_metadata = copy_metadata_ddf,
5200 .kill_subarray = kill_subarray_ddf,
5201 #endif
5202 .match_home = match_home_ddf,
5203 .uuid_from_super= uuid_from_super_ddf,
5204 .getinfo_super = getinfo_super_ddf,
5205 .update_super = update_super_ddf,
5206
5207 .avail_size = avail_size_ddf,
5208
5209 .compare_super = compare_super_ddf,
5210
5211 .load_super = load_super_ddf,
5212 .init_super = init_super_ddf,
5213 .store_super = store_super_ddf,
5214 .free_super = free_super_ddf,
5215 .match_metadata_desc = match_metadata_desc_ddf,
5216 .container_content = container_content_ddf,
5217 .default_geometry = default_geometry_ddf,
5218
5219 .external = 1,
5220
5221 #ifndef MDASSEMBLE
5222 /* for mdmon */
5223 .open_new = ddf_open_new,
5224 .set_array_state= ddf_set_array_state,
5225 .set_disk = ddf_set_disk,
5226 .sync_metadata = ddf_sync_metadata,
5227 .process_update = ddf_process_update,
5228 .prepare_update = ddf_prepare_update,
5229 .activate_spare = ddf_activate_spare,
5230 #endif
5231 .name = "ddf",
5232 };