]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
mdmon: fix wrong array state when disk fails during mdmon startup
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33 #include <stddef.h>
34
35 /* a non-official T10 name for creation GUIDs */
36 static char T10[] = "Linux-MD";
37
38 /* DDF timestamps are 1980 based, so we need to add
39 * second-in-decade-of-seventies to convert to linux timestamps.
40 * 10 years with 2 leap years.
41 */
42 #define DECADE (3600*24*(365*10+2))
43 unsigned long crc32(
44 unsigned long crc,
45 const unsigned char *buf,
46 unsigned len);
47
48 #define DDF_NOTFOUND (~0U)
49 #define DDF_CONTAINER (DDF_NOTFOUND-1)
50
51 /* Default for safe_mode_delay. Same value as for IMSM.
52 */
53 static const int DDF_SAFE_MODE_DELAY = 4000;
54
55 /* The DDF metadata handling.
56 * DDF metadata lives at the end of the device.
57 * The last 512 byte block provides an 'anchor' which is used to locate
58 * the rest of the metadata which usually lives immediately behind the anchor.
59 *
60 * Note:
61 * - all multibyte numeric fields are bigendian.
62 * - all strings are space padded.
63 *
64 */
65
66 typedef struct __be16 {
67 __u16 _v16;
68 } be16;
69 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
70 #define be16_and(x, y) ((x)._v16 & (y)._v16)
71 #define be16_or(x, y) ((x)._v16 | (y)._v16)
72 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
73 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
74
75 typedef struct __be32 {
76 __u32 _v32;
77 } be32;
78 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
79
80 typedef struct __be64 {
81 __u64 _v64;
82 } be64;
83 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
84
85 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
86 static inline be16 cpu_to_be16(__u16 x)
87 {
88 be16 be = { ._v16 = __cpu_to_be16(x) };
89 return be;
90 }
91
92 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
93 static inline be32 cpu_to_be32(__u32 x)
94 {
95 be32 be = { ._v32 = __cpu_to_be32(x) };
96 return be;
97 }
98
99 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
100 static inline be64 cpu_to_be64(__u64 x)
101 {
102 be64 be = { ._v64 = __cpu_to_be64(x) };
103 return be;
104 }
105
106 /* Primary Raid Level (PRL) */
107 #define DDF_RAID0 0x00
108 #define DDF_RAID1 0x01
109 #define DDF_RAID3 0x03
110 #define DDF_RAID4 0x04
111 #define DDF_RAID5 0x05
112 #define DDF_RAID1E 0x11
113 #define DDF_JBOD 0x0f
114 #define DDF_CONCAT 0x1f
115 #define DDF_RAID5E 0x15
116 #define DDF_RAID5EE 0x25
117 #define DDF_RAID6 0x06
118
119 /* Raid Level Qualifier (RLQ) */
120 #define DDF_RAID0_SIMPLE 0x00
121 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
122 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
123 #define DDF_RAID3_0 0x00 /* parity in first extent */
124 #define DDF_RAID3_N 0x01 /* parity in last extent */
125 #define DDF_RAID4_0 0x00 /* parity in first extent */
126 #define DDF_RAID4_N 0x01 /* parity in last extent */
127 /* these apply to raid5e and raid5ee as well */
128 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
129 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
130 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
131 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
132
133 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
134 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
135
136 /* Secondary RAID Level (SRL) */
137 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
138 #define DDF_2MIRRORED 0x01
139 #define DDF_2CONCAT 0x02
140 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
141
142 /* Magic numbers */
143 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
144 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
145 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
146 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
147 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
148 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
149 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
150 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
151 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
152 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
153
154 #define DDF_GUID_LEN 24
155 #define DDF_REVISION_0 "01.00.00"
156 #define DDF_REVISION_2 "01.02.00"
157
158 struct ddf_header {
159 be32 magic; /* DDF_HEADER_MAGIC */
160 be32 crc;
161 char guid[DDF_GUID_LEN];
162 char revision[8]; /* 01.02.00 */
163 be32 seq; /* starts at '1' */
164 be32 timestamp;
165 __u8 openflag;
166 __u8 foreignflag;
167 __u8 enforcegroups;
168 __u8 pad0; /* 0xff */
169 __u8 pad1[12]; /* 12 * 0xff */
170 /* 64 bytes so far */
171 __u8 header_ext[32]; /* reserved: fill with 0xff */
172 be64 primary_lba;
173 be64 secondary_lba;
174 __u8 type;
175 __u8 pad2[3]; /* 0xff */
176 be32 workspace_len; /* sectors for vendor space -
177 * at least 32768(sectors) */
178 be64 workspace_lba;
179 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
180 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
181 be16 max_partitions; /* i.e. max num of configuration
182 record entries per disk */
183 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
184 *12/512) */
185 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
186 __u8 pad3[54]; /* 0xff */
187 /* 192 bytes so far */
188 be32 controller_section_offset;
189 be32 controller_section_length;
190 be32 phys_section_offset;
191 be32 phys_section_length;
192 be32 virt_section_offset;
193 be32 virt_section_length;
194 be32 config_section_offset;
195 be32 config_section_length;
196 be32 data_section_offset;
197 be32 data_section_length;
198 be32 bbm_section_offset;
199 be32 bbm_section_length;
200 be32 diag_space_offset;
201 be32 diag_space_length;
202 be32 vendor_offset;
203 be32 vendor_length;
204 /* 256 bytes so far */
205 __u8 pad4[256]; /* 0xff */
206 };
207
208 /* type field */
209 #define DDF_HEADER_ANCHOR 0x00
210 #define DDF_HEADER_PRIMARY 0x01
211 #define DDF_HEADER_SECONDARY 0x02
212
213 /* The content of the 'controller section' - global scope */
214 struct ddf_controller_data {
215 be32 magic; /* DDF_CONTROLLER_MAGIC */
216 be32 crc;
217 char guid[DDF_GUID_LEN];
218 struct controller_type {
219 be16 vendor_id;
220 be16 device_id;
221 be16 sub_vendor_id;
222 be16 sub_device_id;
223 } type;
224 char product_id[16];
225 __u8 pad[8]; /* 0xff */
226 __u8 vendor_data[448];
227 };
228
229 /* The content of phys_section - global scope */
230 struct phys_disk {
231 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
232 be32 crc;
233 be16 used_pdes; /* This is a counter, not a max - the list
234 * of used entries may not be dense */
235 be16 max_pdes;
236 __u8 pad[52];
237 struct phys_disk_entry {
238 char guid[DDF_GUID_LEN];
239 be32 refnum;
240 be16 type;
241 be16 state;
242 be64 config_size; /* DDF structures must be after here */
243 char path[18]; /* Another horrible structure really
244 * but is "used for information
245 * purposes only" */
246 __u8 pad[6];
247 } entries[0];
248 };
249
250 /* phys_disk_entry.type is a bitmap - bigendian remember */
251 #define DDF_Forced_PD_GUID 1
252 #define DDF_Active_in_VD 2
253 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
254 #define DDF_Spare 8 /* overrides Global_spare */
255 #define DDF_Foreign 16
256 #define DDF_Legacy 32 /* no DDF on this device */
257
258 #define DDF_Interface_mask 0xf00
259 #define DDF_Interface_SCSI 0x100
260 #define DDF_Interface_SAS 0x200
261 #define DDF_Interface_SATA 0x300
262 #define DDF_Interface_FC 0x400
263
264 /* phys_disk_entry.state is a bigendian bitmap */
265 #define DDF_Online 1
266 #define DDF_Failed 2 /* overrides 1,4,8 */
267 #define DDF_Rebuilding 4
268 #define DDF_Transition 8
269 #define DDF_SMART 16
270 #define DDF_ReadErrors 32
271 #define DDF_Missing 64
272
273 /* The content of the virt_section global scope */
274 struct virtual_disk {
275 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
276 be32 crc;
277 be16 populated_vdes;
278 be16 max_vdes;
279 __u8 pad[52];
280 struct virtual_entry {
281 char guid[DDF_GUID_LEN];
282 be16 unit;
283 __u16 pad0; /* 0xffff */
284 be16 guid_crc;
285 be16 type;
286 __u8 state;
287 __u8 init_state;
288 __u8 pad1[14];
289 char name[16];
290 } entries[0];
291 };
292
293 /* virtual_entry.type is a bitmap - bigendian */
294 #define DDF_Shared 1
295 #define DDF_Enforce_Groups 2
296 #define DDF_Unicode 4
297 #define DDF_Owner_Valid 8
298
299 /* virtual_entry.state is a bigendian bitmap */
300 #define DDF_state_mask 0x7
301 #define DDF_state_optimal 0x0
302 #define DDF_state_degraded 0x1
303 #define DDF_state_deleted 0x2
304 #define DDF_state_missing 0x3
305 #define DDF_state_failed 0x4
306 #define DDF_state_part_optimal 0x5
307
308 #define DDF_state_morphing 0x8
309 #define DDF_state_inconsistent 0x10
310
311 /* virtual_entry.init_state is a bigendian bitmap */
312 #define DDF_initstate_mask 0x03
313 #define DDF_init_not 0x00
314 #define DDF_init_quick 0x01 /* initialisation is progress.
315 * i.e. 'state_inconsistent' */
316 #define DDF_init_full 0x02
317
318 #define DDF_access_mask 0xc0
319 #define DDF_access_rw 0x00
320 #define DDF_access_ro 0x80
321 #define DDF_access_blocked 0xc0
322
323 /* The content of the config_section - local scope
324 * It has multiple records each config_record_len sectors
325 * They can be vd_config or spare_assign
326 */
327
328 struct vd_config {
329 be32 magic; /* DDF_VD_CONF_MAGIC */
330 be32 crc;
331 char guid[DDF_GUID_LEN];
332 be32 timestamp;
333 be32 seqnum;
334 __u8 pad0[24];
335 be16 prim_elmnt_count;
336 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
337 __u8 prl;
338 __u8 rlq;
339 __u8 sec_elmnt_count;
340 __u8 sec_elmnt_seq;
341 __u8 srl;
342 be64 blocks; /* blocks per component could be different
343 * on different component devices...(only
344 * for concat I hope) */
345 be64 array_blocks; /* blocks in array */
346 __u8 pad1[8];
347 be32 spare_refs[8]; /* This is used to detect missing spares.
348 * As we don't have an interface for that
349 * the values are ignored.
350 */
351 __u8 cache_pol[8];
352 __u8 bg_rate;
353 __u8 pad2[3];
354 __u8 pad3[52];
355 __u8 pad4[192];
356 __u8 v0[32]; /* reserved- 0xff */
357 __u8 v1[32]; /* reserved- 0xff */
358 __u8 v2[16]; /* reserved- 0xff */
359 __u8 v3[16]; /* reserved- 0xff */
360 __u8 vendor[32];
361 be32 phys_refnum[0]; /* refnum of each disk in sequence */
362 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
363 bvd are always the same size */
364 };
365 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
366
367 /* vd_config.cache_pol[7] is a bitmap */
368 #define DDF_cache_writeback 1 /* else writethrough */
369 #define DDF_cache_wadaptive 2 /* only applies if writeback */
370 #define DDF_cache_readahead 4
371 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
372 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
373 #define DDF_cache_wallowed 32 /* enable write caching */
374 #define DDF_cache_rallowed 64 /* enable read caching */
375
376 struct spare_assign {
377 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
378 be32 crc;
379 be32 timestamp;
380 __u8 reserved[7];
381 __u8 type;
382 be16 populated; /* SAEs used */
383 be16 max; /* max SAEs */
384 __u8 pad[8];
385 struct spare_assign_entry {
386 char guid[DDF_GUID_LEN];
387 be16 secondary_element;
388 __u8 pad[6];
389 } spare_ents[0];
390 };
391 /* spare_assign.type is a bitmap */
392 #define DDF_spare_dedicated 0x1 /* else global */
393 #define DDF_spare_revertible 0x2 /* else committable */
394 #define DDF_spare_active 0x4 /* else not active */
395 #define DDF_spare_affinity 0x8 /* enclosure affinity */
396
397 /* The data_section contents - local scope */
398 struct disk_data {
399 be32 magic; /* DDF_PHYS_DATA_MAGIC */
400 be32 crc;
401 char guid[DDF_GUID_LEN];
402 be32 refnum; /* crc of some magic drive data ... */
403 __u8 forced_ref; /* set when above was not result of magic */
404 __u8 forced_guid; /* set if guid was forced rather than magic */
405 __u8 vendor[32];
406 __u8 pad[442];
407 };
408
409 /* bbm_section content */
410 struct bad_block_log {
411 be32 magic;
412 be32 crc;
413 be16 entry_count;
414 be32 spare_count;
415 __u8 pad[10];
416 be64 first_spare;
417 struct mapped_block {
418 be64 defective_start;
419 be32 replacement_start;
420 be16 remap_count;
421 __u8 pad[2];
422 } entries[0];
423 };
424
425 /* Struct for internally holding ddf structures */
426 /* The DDF structure stored on each device is potentially
427 * quite different, as some data is global and some is local.
428 * The global data is:
429 * - ddf header
430 * - controller_data
431 * - Physical disk records
432 * - Virtual disk records
433 * The local data is:
434 * - Configuration records
435 * - Physical Disk data section
436 * ( and Bad block and vendor which I don't care about yet).
437 *
438 * The local data is parsed into separate lists as it is read
439 * and reconstructed for writing. This means that we only need
440 * to make config changes once and they are automatically
441 * propagated to all devices.
442 * The global (config and disk data) records are each in a list
443 * of separate data structures. When writing we find the entry
444 * or entries applicable to the particular device.
445 */
446 struct ddf_super {
447 struct ddf_header anchor, primary, secondary;
448 struct ddf_controller_data controller;
449 struct ddf_header *active;
450 struct phys_disk *phys;
451 struct virtual_disk *virt;
452 char *conf;
453 int pdsize, vdsize;
454 unsigned int max_part, mppe, conf_rec_len;
455 int currentdev;
456 int updates_pending;
457 struct vcl {
458 union {
459 char space[512];
460 struct {
461 struct vcl *next;
462 unsigned int vcnum; /* index into ->virt */
463 /* For an array with a secondary level there are
464 * multiple vd_config structures, all with the same
465 * guid but with different sec_elmnt_seq.
466 * One of these structures is in 'conf' below.
467 * The others are in other_bvds, not in any
468 * particular order.
469 */
470 struct vd_config **other_bvds;
471 __u64 *block_sizes; /* NULL if all the same */
472 };
473 };
474 struct vd_config conf;
475 } *conflist, *currentconf;
476 struct dl {
477 union {
478 char space[512];
479 struct {
480 struct dl *next;
481 int major, minor;
482 char *devname;
483 int fd;
484 unsigned long long size; /* sectors */
485 be64 primary_lba; /* sectors */
486 be64 secondary_lba; /* sectors */
487 be64 workspace_lba; /* sectors */
488 int pdnum; /* index in ->phys */
489 struct spare_assign *spare;
490 void *mdupdate; /* hold metadata update */
491
492 /* These fields used by auto-layout */
493 int raiddisk; /* slot to fill in autolayout */
494 __u64 esize;
495 int displayed;
496 };
497 };
498 struct disk_data disk;
499 struct vcl *vlist[0]; /* max_part in size */
500 } *dlist, *add_list;
501 };
502
503 static int load_super_ddf_all(struct supertype *st, int fd,
504 void **sbp, char *devname);
505 static int get_svd_state(const struct ddf_super *, const struct vcl *);
506 static int
507 validate_geometry_ddf_container(struct supertype *st,
508 int level, int layout, int raiddisks,
509 int chunk, unsigned long long size,
510 unsigned long long data_offset,
511 char *dev, unsigned long long *freesize,
512 int verbose);
513
514 static int validate_geometry_ddf_bvd(struct supertype *st,
515 int level, int layout, int raiddisks,
516 int *chunk, unsigned long long size,
517 unsigned long long data_offset,
518 char *dev, unsigned long long *freesize,
519 int verbose);
520
521 static void free_super_ddf(struct supertype *st);
522 static int all_ff(const char *guid);
523 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
524 be32 refnum, unsigned int nmax,
525 const struct vd_config **bvd,
526 unsigned int *idx);
527 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
528 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
529 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
530 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
531 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
532 static int init_super_ddf_bvd(struct supertype *st,
533 mdu_array_info_t *info,
534 unsigned long long size,
535 char *name, char *homehost,
536 int *uuid, unsigned long long data_offset);
537
538 #if DEBUG
539 static void pr_state(struct ddf_super *ddf, const char *msg)
540 {
541 unsigned int i;
542 dprintf("%s: ", msg);
543 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
544 if (all_ff(ddf->virt->entries[i].guid))
545 continue;
546 dprintf_cont("%u(s=%02x i=%02x) ", i,
547 ddf->virt->entries[i].state,
548 ddf->virt->entries[i].init_state);
549 }
550 dprintf_cont("\n");
551 }
552 #else
553 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
554 #endif
555
556 static void _ddf_set_updates_pending(struct ddf_super *ddf, struct vd_config *vc,
557 const char *func)
558 {
559 if (vc) {
560 vc->timestamp = cpu_to_be32(time(0)-DECADE);
561 vc->seqnum = cpu_to_be32(be32_to_cpu(vc->seqnum) + 1);
562 }
563 if (ddf->updates_pending)
564 return;
565 ddf->updates_pending = 1;
566 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
567 pr_state(ddf, func);
568 }
569
570 #define ddf_set_updates_pending(x,v) _ddf_set_updates_pending((x), (v), __func__)
571
572 static be32 calc_crc(void *buf, int len)
573 {
574 /* crcs are always at the same place as in the ddf_header */
575 struct ddf_header *ddf = buf;
576 be32 oldcrc = ddf->crc;
577 __u32 newcrc;
578 ddf->crc = cpu_to_be32(0xffffffff);
579
580 newcrc = crc32(0, buf, len);
581 ddf->crc = oldcrc;
582 /* The crc is stored (like everything) bigendian, so convert
583 * here for simplicity
584 */
585 return cpu_to_be32(newcrc);
586 }
587
588 #define DDF_INVALID_LEVEL 0xff
589 #define DDF_NO_SECONDARY 0xff
590 static int err_bad_md_layout(const mdu_array_info_t *array)
591 {
592 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
593 array->level, array->layout, array->raid_disks);
594 return -1;
595 }
596
597 static int layout_md2ddf(const mdu_array_info_t *array,
598 struct vd_config *conf)
599 {
600 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
601 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
602 __u8 sec_elmnt_count = 1;
603 __u8 srl = DDF_NO_SECONDARY;
604
605 switch (array->level) {
606 case LEVEL_LINEAR:
607 prl = DDF_CONCAT;
608 break;
609 case 0:
610 rlq = DDF_RAID0_SIMPLE;
611 prl = DDF_RAID0;
612 break;
613 case 1:
614 switch (array->raid_disks) {
615 case 2:
616 rlq = DDF_RAID1_SIMPLE;
617 break;
618 case 3:
619 rlq = DDF_RAID1_MULTI;
620 break;
621 default:
622 return err_bad_md_layout(array);
623 }
624 prl = DDF_RAID1;
625 break;
626 case 4:
627 if (array->layout != 0)
628 return err_bad_md_layout(array);
629 rlq = DDF_RAID4_N;
630 prl = DDF_RAID4;
631 break;
632 case 5:
633 switch (array->layout) {
634 case ALGORITHM_LEFT_ASYMMETRIC:
635 rlq = DDF_RAID5_N_RESTART;
636 break;
637 case ALGORITHM_RIGHT_ASYMMETRIC:
638 rlq = DDF_RAID5_0_RESTART;
639 break;
640 case ALGORITHM_LEFT_SYMMETRIC:
641 rlq = DDF_RAID5_N_CONTINUE;
642 break;
643 case ALGORITHM_RIGHT_SYMMETRIC:
644 /* not mentioned in standard */
645 default:
646 return err_bad_md_layout(array);
647 }
648 prl = DDF_RAID5;
649 break;
650 case 6:
651 switch (array->layout) {
652 case ALGORITHM_ROTATING_N_RESTART:
653 rlq = DDF_RAID5_N_RESTART;
654 break;
655 case ALGORITHM_ROTATING_ZERO_RESTART:
656 rlq = DDF_RAID6_0_RESTART;
657 break;
658 case ALGORITHM_ROTATING_N_CONTINUE:
659 rlq = DDF_RAID5_N_CONTINUE;
660 break;
661 default:
662 return err_bad_md_layout(array);
663 }
664 prl = DDF_RAID6;
665 break;
666 case 10:
667 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
668 rlq = DDF_RAID1_SIMPLE;
669 prim_elmnt_count = cpu_to_be16(2);
670 sec_elmnt_count = array->raid_disks / 2;
671 srl = DDF_2SPANNED;
672 prl = DDF_RAID1;
673 } else if (array->raid_disks % 3 == 0 &&
674 array->layout == 0x103) {
675 rlq = DDF_RAID1_MULTI;
676 prim_elmnt_count = cpu_to_be16(3);
677 sec_elmnt_count = array->raid_disks / 3;
678 srl = DDF_2SPANNED;
679 prl = DDF_RAID1;
680 } else if (array->layout == 0x201) {
681 prl = DDF_RAID1E;
682 rlq = DDF_RAID1E_OFFSET;
683 } else if (array->layout == 0x102) {
684 prl = DDF_RAID1E;
685 rlq = DDF_RAID1E_ADJACENT;
686 } else
687 return err_bad_md_layout(array);
688 break;
689 default:
690 return err_bad_md_layout(array);
691 }
692 conf->prl = prl;
693 conf->prim_elmnt_count = prim_elmnt_count;
694 conf->rlq = rlq;
695 conf->srl = srl;
696 conf->sec_elmnt_count = sec_elmnt_count;
697 return 0;
698 }
699
700 static int err_bad_ddf_layout(const struct vd_config *conf)
701 {
702 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
703 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
704 return -1;
705 }
706
707 static int layout_ddf2md(const struct vd_config *conf,
708 mdu_array_info_t *array)
709 {
710 int level = LEVEL_UNSUPPORTED;
711 int layout = 0;
712 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
713
714 if (conf->sec_elmnt_count > 1) {
715 /* see also check_secondary() */
716 if (conf->prl != DDF_RAID1 ||
717 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
718 pr_err("Unsupported secondary RAID level %u/%u\n",
719 conf->prl, conf->srl);
720 return -1;
721 }
722 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
723 layout = 0x102;
724 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
725 layout = 0x103;
726 else
727 return err_bad_ddf_layout(conf);
728 raiddisks *= conf->sec_elmnt_count;
729 level = 10;
730 goto good;
731 }
732
733 switch (conf->prl) {
734 case DDF_CONCAT:
735 level = LEVEL_LINEAR;
736 break;
737 case DDF_RAID0:
738 if (conf->rlq != DDF_RAID0_SIMPLE)
739 return err_bad_ddf_layout(conf);
740 level = 0;
741 break;
742 case DDF_RAID1:
743 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
744 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
745 return err_bad_ddf_layout(conf);
746 level = 1;
747 break;
748 case DDF_RAID1E:
749 if (conf->rlq == DDF_RAID1E_ADJACENT)
750 layout = 0x102;
751 else if (conf->rlq == DDF_RAID1E_OFFSET)
752 layout = 0x201;
753 else
754 return err_bad_ddf_layout(conf);
755 level = 10;
756 break;
757 case DDF_RAID4:
758 if (conf->rlq != DDF_RAID4_N)
759 return err_bad_ddf_layout(conf);
760 level = 4;
761 break;
762 case DDF_RAID5:
763 switch (conf->rlq) {
764 case DDF_RAID5_N_RESTART:
765 layout = ALGORITHM_LEFT_ASYMMETRIC;
766 break;
767 case DDF_RAID5_0_RESTART:
768 layout = ALGORITHM_RIGHT_ASYMMETRIC;
769 break;
770 case DDF_RAID5_N_CONTINUE:
771 layout = ALGORITHM_LEFT_SYMMETRIC;
772 break;
773 default:
774 return err_bad_ddf_layout(conf);
775 }
776 level = 5;
777 break;
778 case DDF_RAID6:
779 switch (conf->rlq) {
780 case DDF_RAID5_N_RESTART:
781 layout = ALGORITHM_ROTATING_N_RESTART;
782 break;
783 case DDF_RAID6_0_RESTART:
784 layout = ALGORITHM_ROTATING_ZERO_RESTART;
785 break;
786 case DDF_RAID5_N_CONTINUE:
787 layout = ALGORITHM_ROTATING_N_CONTINUE;
788 break;
789 default:
790 return err_bad_ddf_layout(conf);
791 }
792 level = 6;
793 break;
794 default:
795 return err_bad_ddf_layout(conf);
796 };
797
798 good:
799 array->level = level;
800 array->layout = layout;
801 array->raid_disks = raiddisks;
802 return 0;
803 }
804
805 static int load_ddf_header(int fd, unsigned long long lba,
806 unsigned long long size,
807 int type,
808 struct ddf_header *hdr, struct ddf_header *anchor)
809 {
810 /* read a ddf header (primary or secondary) from fd/lba
811 * and check that it is consistent with anchor
812 * Need to check:
813 * magic, crc, guid, rev, and LBA's header_type, and
814 * everything after header_type must be the same
815 */
816 if (lba >= size-1)
817 return 0;
818
819 if (lseek64(fd, lba<<9, 0) < 0)
820 return 0;
821
822 if (read(fd, hdr, 512) != 512)
823 return 0;
824
825 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
826 pr_err("bad header magic\n");
827 return 0;
828 }
829 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
830 pr_err("bad CRC\n");
831 return 0;
832 }
833 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
834 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
835 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
836 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
837 hdr->type != type ||
838 memcmp(anchor->pad2, hdr->pad2, 512 -
839 offsetof(struct ddf_header, pad2)) != 0) {
840 pr_err("header mismatch\n");
841 return 0;
842 }
843
844 /* Looks good enough to me... */
845 return 1;
846 }
847
848 static void *load_section(int fd, struct ddf_super *super, void *buf,
849 be32 offset_be, be32 len_be, int check)
850 {
851 unsigned long long offset = be32_to_cpu(offset_be);
852 unsigned long long len = be32_to_cpu(len_be);
853 int dofree = (buf == NULL);
854
855 if (check)
856 if (len != 2 && len != 8 && len != 32 &&
857 len != 128 && len != 512)
858 return NULL;
859
860 if (len > 1024)
861 return NULL;
862 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
863 buf = NULL;
864
865 if (!buf)
866 return NULL;
867
868 if (super->active->type == 1)
869 offset += be64_to_cpu(super->active->primary_lba);
870 else
871 offset += be64_to_cpu(super->active->secondary_lba);
872
873 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
874 if (dofree)
875 free(buf);
876 return NULL;
877 }
878 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
879 if (dofree)
880 free(buf);
881 return NULL;
882 }
883 return buf;
884 }
885
886 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
887 {
888 unsigned long long dsize;
889
890 get_dev_size(fd, NULL, &dsize);
891
892 if (lseek64(fd, dsize-512, 0) < 0) {
893 if (devname)
894 pr_err("Cannot seek to anchor block on %s: %s\n",
895 devname, strerror(errno));
896 return 1;
897 }
898 if (read(fd, &super->anchor, 512) != 512) {
899 if (devname)
900 pr_err("Cannot read anchor block on %s: %s\n",
901 devname, strerror(errno));
902 return 1;
903 }
904 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
905 if (devname)
906 pr_err("no DDF anchor found on %s\n",
907 devname);
908 return 2;
909 }
910 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
911 if (devname)
912 pr_err("bad CRC on anchor on %s\n",
913 devname);
914 return 2;
915 }
916 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
917 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
918 if (devname)
919 pr_err("can only support super revision %.8s and earlier, not %.8s on %s\n",
920 DDF_REVISION_2, super->anchor.revision,devname);
921 return 2;
922 }
923 super->active = NULL;
924 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
925 dsize >> 9, 1,
926 &super->primary, &super->anchor) == 0) {
927 if (devname)
928 pr_err("Failed to load primary DDF header on %s\n", devname);
929 } else
930 super->active = &super->primary;
931
932 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
933 dsize >> 9, 2,
934 &super->secondary, &super->anchor)) {
935 if (super->active == NULL ||
936 (be32_to_cpu(super->primary.seq)
937 < be32_to_cpu(super->secondary.seq) &&
938 !super->secondary.openflag) ||
939 (be32_to_cpu(super->primary.seq) ==
940 be32_to_cpu(super->secondary.seq) &&
941 super->primary.openflag && !super->secondary.openflag))
942 super->active = &super->secondary;
943 } else if (devname &&
944 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
945 pr_err("Failed to load secondary DDF header on %s\n",
946 devname);
947 if (super->active == NULL)
948 return 2;
949 return 0;
950 }
951
952 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
953 {
954 void *ok;
955 ok = load_section(fd, super, &super->controller,
956 super->active->controller_section_offset,
957 super->active->controller_section_length,
958 0);
959 super->phys = load_section(fd, super, NULL,
960 super->active->phys_section_offset,
961 super->active->phys_section_length,
962 1);
963 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
964
965 super->virt = load_section(fd, super, NULL,
966 super->active->virt_section_offset,
967 super->active->virt_section_length,
968 1);
969 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
970 if (!ok ||
971 !super->phys ||
972 !super->virt) {
973 free(super->phys);
974 free(super->virt);
975 super->phys = NULL;
976 super->virt = NULL;
977 return 2;
978 }
979 super->conflist = NULL;
980 super->dlist = NULL;
981
982 super->max_part = be16_to_cpu(super->active->max_partitions);
983 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
984 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
985 return 0;
986 }
987
988 #define DDF_UNUSED_BVD 0xff
989 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
990 {
991 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
992 unsigned int i, vdsize;
993 void *p;
994 if (n_vds == 0) {
995 vcl->other_bvds = NULL;
996 return 0;
997 }
998 vdsize = ddf->conf_rec_len * 512;
999 if (posix_memalign(&p, 512, n_vds *
1000 (vdsize + sizeof(struct vd_config *))) != 0)
1001 return -1;
1002 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
1003 for (i = 0; i < n_vds; i++) {
1004 vcl->other_bvds[i] = p + i * vdsize;
1005 memset(vcl->other_bvds[i], 0, vdsize);
1006 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
1007 }
1008 return 0;
1009 }
1010
1011 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
1012 unsigned int len)
1013 {
1014 int i;
1015 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1016 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
1017 break;
1018
1019 if (i < vcl->conf.sec_elmnt_count-1) {
1020 if (be32_to_cpu(vd->seqnum) <=
1021 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1022 return;
1023 } else {
1024 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1025 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1026 break;
1027 if (i == vcl->conf.sec_elmnt_count-1) {
1028 pr_err("no space for sec level config %u, count is %u\n",
1029 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1030 return;
1031 }
1032 }
1033 memcpy(vcl->other_bvds[i], vd, len);
1034 }
1035
1036 static int load_ddf_local(int fd, struct ddf_super *super,
1037 char *devname, int keep)
1038 {
1039 struct dl *dl;
1040 struct stat stb;
1041 char *conf;
1042 unsigned int i;
1043 unsigned int confsec;
1044 int vnum;
1045 unsigned int max_virt_disks =
1046 be16_to_cpu(super->active->max_vd_entries);
1047 unsigned long long dsize;
1048
1049 /* First the local disk info */
1050 if (posix_memalign((void**)&dl, 512,
1051 sizeof(*dl) +
1052 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1053 pr_err("could not allocate disk info buffer\n");
1054 return 1;
1055 }
1056
1057 load_section(fd, super, &dl->disk,
1058 super->active->data_section_offset,
1059 super->active->data_section_length,
1060 0);
1061 dl->devname = devname ? xstrdup(devname) : NULL;
1062
1063 fstat(fd, &stb);
1064 dl->major = major(stb.st_rdev);
1065 dl->minor = minor(stb.st_rdev);
1066 dl->next = super->dlist;
1067 dl->fd = keep ? fd : -1;
1068
1069 dl->size = 0;
1070 if (get_dev_size(fd, devname, &dsize))
1071 dl->size = dsize >> 9;
1072 /* If the disks have different sizes, the LBAs will differ
1073 * between phys disks.
1074 * At this point here, the values in super->active must be valid
1075 * for this phys disk. */
1076 dl->primary_lba = super->active->primary_lba;
1077 dl->secondary_lba = super->active->secondary_lba;
1078 dl->workspace_lba = super->active->workspace_lba;
1079 dl->spare = NULL;
1080 for (i = 0 ; i < super->max_part ; i++)
1081 dl->vlist[i] = NULL;
1082 super->dlist = dl;
1083 dl->pdnum = -1;
1084 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1085 if (memcmp(super->phys->entries[i].guid,
1086 dl->disk.guid, DDF_GUID_LEN) == 0)
1087 dl->pdnum = i;
1088
1089 /* Now the config list. */
1090 /* 'conf' is an array of config entries, some of which are
1091 * probably invalid. Those which are good need to be copied into
1092 * the conflist
1093 */
1094
1095 conf = load_section(fd, super, super->conf,
1096 super->active->config_section_offset,
1097 super->active->config_section_length,
1098 0);
1099 super->conf = conf;
1100 vnum = 0;
1101 for (confsec = 0;
1102 confsec < be32_to_cpu(super->active->config_section_length);
1103 confsec += super->conf_rec_len) {
1104 struct vd_config *vd =
1105 (struct vd_config *)((char*)conf + confsec*512);
1106 struct vcl *vcl;
1107
1108 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1109 if (dl->spare)
1110 continue;
1111 if (posix_memalign((void**)&dl->spare, 512,
1112 super->conf_rec_len*512) != 0) {
1113 pr_err("could not allocate spare info buf\n");
1114 return 1;
1115 }
1116
1117 memcpy(dl->spare, vd, super->conf_rec_len*512);
1118 continue;
1119 }
1120 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1121 /* Must be vendor-unique - I cannot handle those */
1122 continue;
1123
1124 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1125 if (memcmp(vcl->conf.guid,
1126 vd->guid, DDF_GUID_LEN) == 0)
1127 break;
1128 }
1129
1130 if (vcl) {
1131 dl->vlist[vnum++] = vcl;
1132 if (vcl->other_bvds != NULL &&
1133 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1134 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1135 continue;
1136 }
1137 if (be32_to_cpu(vd->seqnum) <=
1138 be32_to_cpu(vcl->conf.seqnum))
1139 continue;
1140 } else {
1141 if (posix_memalign((void**)&vcl, 512,
1142 (super->conf_rec_len*512 +
1143 offsetof(struct vcl, conf))) != 0) {
1144 pr_err("could not allocate vcl buf\n");
1145 return 1;
1146 }
1147 vcl->next = super->conflist;
1148 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1149 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1150 if (alloc_other_bvds(super, vcl) != 0) {
1151 pr_err("could not allocate other bvds\n");
1152 free(vcl);
1153 return 1;
1154 };
1155 super->conflist = vcl;
1156 dl->vlist[vnum++] = vcl;
1157 }
1158 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1159 for (i=0; i < max_virt_disks ; i++)
1160 if (memcmp(super->virt->entries[i].guid,
1161 vcl->conf.guid, DDF_GUID_LEN)==0)
1162 break;
1163 if (i < max_virt_disks)
1164 vcl->vcnum = i;
1165 }
1166
1167 return 0;
1168 }
1169
1170 static int load_super_ddf(struct supertype *st, int fd,
1171 char *devname)
1172 {
1173 unsigned long long dsize;
1174 struct ddf_super *super;
1175 int rv;
1176
1177 if (get_dev_size(fd, devname, &dsize) == 0)
1178 return 1;
1179
1180 if (test_partition(fd))
1181 /* DDF is not allowed on partitions */
1182 return 1;
1183
1184 /* 32M is a lower bound */
1185 if (dsize <= 32*1024*1024) {
1186 if (devname)
1187 pr_err("%s is too small for ddf: size is %llu sectors.\n",
1188 devname, dsize>>9);
1189 return 1;
1190 }
1191 if (dsize & 511) {
1192 if (devname)
1193 pr_err("%s is an odd size for ddf: size is %llu bytes.\n",
1194 devname, dsize);
1195 return 1;
1196 }
1197
1198 free_super_ddf(st);
1199
1200 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1201 pr_err("malloc of %zu failed.\n",
1202 sizeof(*super));
1203 return 1;
1204 }
1205 memset(super, 0, sizeof(*super));
1206
1207 rv = load_ddf_headers(fd, super, devname);
1208 if (rv) {
1209 free(super);
1210 return rv;
1211 }
1212
1213 /* Have valid headers and have chosen the best. Let's read in the rest*/
1214
1215 rv = load_ddf_global(fd, super, devname);
1216
1217 if (rv) {
1218 if (devname)
1219 pr_err("Failed to load all information sections on %s\n", devname);
1220 free(super);
1221 return rv;
1222 }
1223
1224 rv = load_ddf_local(fd, super, devname, 0);
1225
1226 if (rv) {
1227 if (devname)
1228 pr_err("Failed to load all information sections on %s\n", devname);
1229 free(super);
1230 return rv;
1231 }
1232
1233 /* Should possibly check the sections .... */
1234
1235 st->sb = super;
1236 if (st->ss == NULL) {
1237 st->ss = &super_ddf;
1238 st->minor_version = 0;
1239 st->max_devs = 512;
1240 }
1241 return 0;
1242
1243 }
1244
1245 static void free_super_ddf(struct supertype *st)
1246 {
1247 struct ddf_super *ddf = st->sb;
1248 if (ddf == NULL)
1249 return;
1250 free(ddf->phys);
1251 free(ddf->virt);
1252 free(ddf->conf);
1253 while (ddf->conflist) {
1254 struct vcl *v = ddf->conflist;
1255 ddf->conflist = v->next;
1256 if (v->block_sizes)
1257 free(v->block_sizes);
1258 if (v->other_bvds)
1259 /*
1260 v->other_bvds[0] points to beginning of buffer,
1261 see alloc_other_bvds()
1262 */
1263 free(v->other_bvds[0]);
1264 free(v);
1265 }
1266 while (ddf->dlist) {
1267 struct dl *d = ddf->dlist;
1268 ddf->dlist = d->next;
1269 if (d->fd >= 0)
1270 close(d->fd);
1271 if (d->spare)
1272 free(d->spare);
1273 free(d);
1274 }
1275 while (ddf->add_list) {
1276 struct dl *d = ddf->add_list;
1277 ddf->add_list = d->next;
1278 if (d->fd >= 0)
1279 close(d->fd);
1280 if (d->spare)
1281 free(d->spare);
1282 free(d);
1283 }
1284 free(ddf);
1285 st->sb = NULL;
1286 }
1287
1288 static struct supertype *match_metadata_desc_ddf(char *arg)
1289 {
1290 /* 'ddf' only supports containers */
1291 struct supertype *st;
1292 if (strcmp(arg, "ddf") != 0 &&
1293 strcmp(arg, "default") != 0
1294 )
1295 return NULL;
1296
1297 st = xcalloc(1, sizeof(*st));
1298 st->ss = &super_ddf;
1299 st->max_devs = 512;
1300 st->minor_version = 0;
1301 st->sb = NULL;
1302 return st;
1303 }
1304
1305 static mapping_t ddf_state[] = {
1306 { "Optimal", 0},
1307 { "Degraded", 1},
1308 { "Deleted", 2},
1309 { "Missing", 3},
1310 { "Failed", 4},
1311 { "Partially Optimal", 5},
1312 { "-reserved-", 6},
1313 { "-reserved-", 7},
1314 { NULL, 0}
1315 };
1316
1317 static mapping_t ddf_init_state[] = {
1318 { "Not Initialised", 0},
1319 { "QuickInit in Progress", 1},
1320 { "Fully Initialised", 2},
1321 { "*UNKNOWN*", 3},
1322 { NULL, 0}
1323 };
1324 static mapping_t ddf_access[] = {
1325 { "Read/Write", 0},
1326 { "Reserved", 1},
1327 { "Read Only", 2},
1328 { "Blocked (no access)", 3},
1329 { NULL ,0}
1330 };
1331
1332 static mapping_t ddf_level[] = {
1333 { "RAID0", DDF_RAID0},
1334 { "RAID1", DDF_RAID1},
1335 { "RAID3", DDF_RAID3},
1336 { "RAID4", DDF_RAID4},
1337 { "RAID5", DDF_RAID5},
1338 { "RAID1E",DDF_RAID1E},
1339 { "JBOD", DDF_JBOD},
1340 { "CONCAT",DDF_CONCAT},
1341 { "RAID5E",DDF_RAID5E},
1342 { "RAID5EE",DDF_RAID5EE},
1343 { "RAID6", DDF_RAID6},
1344 { NULL, 0}
1345 };
1346 static mapping_t ddf_sec_level[] = {
1347 { "Striped", DDF_2STRIPED},
1348 { "Mirrored", DDF_2MIRRORED},
1349 { "Concat", DDF_2CONCAT},
1350 { "Spanned", DDF_2SPANNED},
1351 { NULL, 0}
1352 };
1353
1354 static int all_ff(const char *guid)
1355 {
1356 int i;
1357 for (i = 0; i < DDF_GUID_LEN; i++)
1358 if (guid[i] != (char)0xff)
1359 return 0;
1360 return 1;
1361 }
1362
1363 static const char *guid_str(const char *guid)
1364 {
1365 static char buf[DDF_GUID_LEN*2+1];
1366 int i;
1367 char *p = buf;
1368 for (i = 0; i < DDF_GUID_LEN; i++) {
1369 unsigned char c = guid[i];
1370 if (c >= 32 && c < 127)
1371 p += sprintf(p, "%c", c);
1372 else
1373 p += sprintf(p, "%02x", c);
1374 }
1375 *p = '\0';
1376 return (const char *) buf;
1377 }
1378
1379 static void print_guid(char *guid, int tstamp)
1380 {
1381 /* A GUIDs are part (or all) ASCII and part binary.
1382 * They tend to be space padded.
1383 * We print the GUID in HEX, then in parentheses add
1384 * any initial ASCII sequence, and a possible
1385 * time stamp from bytes 16-19
1386 */
1387 int l = DDF_GUID_LEN;
1388 int i;
1389
1390 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1391 if ((i&3)==0 && i != 0) printf(":");
1392 printf("%02X", guid[i]&255);
1393 }
1394
1395 printf("\n (");
1396 while (l && guid[l-1] == ' ')
1397 l--;
1398 for (i=0 ; i<l ; i++) {
1399 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1400 fputc(guid[i], stdout);
1401 else
1402 break;
1403 }
1404 if (tstamp) {
1405 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1406 char tbuf[100];
1407 struct tm *tm;
1408 tm = localtime(&then);
1409 strftime(tbuf, 100, " %D %T",tm);
1410 fputs(tbuf, stdout);
1411 }
1412 printf(")");
1413 }
1414
1415 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1416 {
1417 int crl = sb->conf_rec_len;
1418 struct vcl *vcl;
1419
1420 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1421 unsigned int i;
1422 struct vd_config *vc = &vcl->conf;
1423
1424 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1425 continue;
1426 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1427 continue;
1428
1429 /* Ok, we know about this VD, let's give more details */
1430 printf(" Raid Devices[%d] : %d (", n,
1431 be16_to_cpu(vc->prim_elmnt_count));
1432 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1433 int j;
1434 int cnt = be16_to_cpu(sb->phys->max_pdes);
1435 for (j=0; j<cnt; j++)
1436 if (be32_eq(vc->phys_refnum[i],
1437 sb->phys->entries[j].refnum))
1438 break;
1439 if (i) printf(" ");
1440 if (j < cnt)
1441 printf("%d", j);
1442 else
1443 printf("--");
1444 printf("@%lluK", (unsigned long long) be64_to_cpu(LBA_OFFSET(sb, vc)[i])/2);
1445 }
1446 printf(")\n");
1447 if (vc->chunk_shift != 255)
1448 printf(" Chunk Size[%d] : %d sectors\n", n,
1449 1 << vc->chunk_shift);
1450 printf(" Raid Level[%d] : %s\n", n,
1451 map_num(ddf_level, vc->prl)?:"-unknown-");
1452 if (vc->sec_elmnt_count != 1) {
1453 printf(" Secondary Position[%d] : %d of %d\n", n,
1454 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1455 printf(" Secondary Level[%d] : %s\n", n,
1456 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1457 }
1458 printf(" Device Size[%d] : %llu\n", n,
1459 be64_to_cpu(vc->blocks)/2);
1460 printf(" Array Size[%d] : %llu\n", n,
1461 be64_to_cpu(vc->array_blocks)/2);
1462 }
1463 }
1464
1465 static void examine_vds(struct ddf_super *sb)
1466 {
1467 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1468 unsigned int i;
1469 printf(" Virtual Disks : %d\n", cnt);
1470
1471 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1472 struct virtual_entry *ve = &sb->virt->entries[i];
1473 if (all_ff(ve->guid))
1474 continue;
1475 printf("\n");
1476 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1477 printf("\n");
1478 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1479 printf(" state[%d] : %s, %s%s\n", i,
1480 map_num(ddf_state, ve->state & 7),
1481 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1482 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1483 printf(" init state[%d] : %s\n", i,
1484 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1485 printf(" access[%d] : %s\n", i,
1486 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1487 printf(" Name[%d] : %.16s\n", i, ve->name);
1488 examine_vd(i, sb, ve->guid);
1489 }
1490 if (cnt) printf("\n");
1491 }
1492
1493 static void examine_pds(struct ddf_super *sb)
1494 {
1495 int cnt = be16_to_cpu(sb->phys->max_pdes);
1496 int i;
1497 struct dl *dl;
1498 int unlisted = 0;
1499 printf(" Physical Disks : %d\n", cnt);
1500 printf(" Number RefNo Size Device Type/State\n");
1501
1502 for (dl = sb->dlist; dl; dl = dl->next)
1503 dl->displayed = 0;
1504
1505 for (i=0 ; i<cnt ; i++) {
1506 struct phys_disk_entry *pd = &sb->phys->entries[i];
1507 int type = be16_to_cpu(pd->type);
1508 int state = be16_to_cpu(pd->state);
1509
1510 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1511 /* Not in use */
1512 continue;
1513 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1514 //printf("\n");
1515 printf(" %3d %08x ", i,
1516 be32_to_cpu(pd->refnum));
1517 printf("%8lluK ",
1518 be64_to_cpu(pd->config_size)>>1);
1519 for (dl = sb->dlist; dl ; dl = dl->next) {
1520 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1521 char *dv = map_dev(dl->major, dl->minor, 0);
1522 if (dv) {
1523 printf("%-15s", dv);
1524 break;
1525 }
1526 }
1527 }
1528 if (!dl)
1529 printf("%15s","");
1530 else
1531 dl->displayed = 1;
1532 printf(" %s%s%s%s%s",
1533 (type&2) ? "active":"",
1534 (type&4) ? "Global-Spare":"",
1535 (type&8) ? "spare" : "",
1536 (type&16)? ", foreign" : "",
1537 (type&32)? "pass-through" : "");
1538 if (state & DDF_Failed)
1539 /* This over-rides these three */
1540 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1541 printf("/%s%s%s%s%s%s%s",
1542 (state&1)? "Online": "Offline",
1543 (state&2)? ", Failed": "",
1544 (state&4)? ", Rebuilding": "",
1545 (state&8)? ", in-transition": "",
1546 (state&16)? ", SMART-errors": "",
1547 (state&32)? ", Unrecovered-Read-Errors": "",
1548 (state&64)? ", Missing" : "");
1549 printf("\n");
1550 }
1551 for (dl = sb->dlist; dl; dl = dl->next) {
1552 char *dv;
1553 if (dl->displayed)
1554 continue;
1555 if (!unlisted)
1556 printf(" Physical disks not in metadata!:\n");
1557 unlisted = 1;
1558 dv = map_dev(dl->major, dl->minor, 0);
1559 printf(" %08x %s\n", be32_to_cpu(dl->disk.refnum),
1560 dv ? dv : "-unknown-");
1561 }
1562 if (unlisted)
1563 printf("\n");
1564 }
1565
1566 static void examine_super_ddf(struct supertype *st, char *homehost)
1567 {
1568 struct ddf_super *sb = st->sb;
1569
1570 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1571 printf(" Version : %.8s\n", sb->anchor.revision);
1572 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1573 printf("\n");
1574 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1575 printf("\n");
1576 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1577 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1578 DDF_HEADER_MAGIC)
1579 ?"yes" : "no"));
1580 examine_vds(sb);
1581 examine_pds(sb);
1582 }
1583
1584 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1585 {
1586 /*
1587 * Figure out the VD number for this supertype.
1588 * Returns DDF_CONTAINER for the container itself,
1589 * and DDF_NOTFOUND on error.
1590 */
1591 struct ddf_super *ddf = st->sb;
1592 struct mdinfo *sra;
1593 char *sub, *end;
1594 unsigned int vcnum;
1595
1596 if (*st->container_devnm == '\0')
1597 return DDF_CONTAINER;
1598
1599 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1600 if (!sra || sra->array.major_version != -1 ||
1601 sra->array.minor_version != -2 ||
1602 !is_subarray(sra->text_version))
1603 return DDF_NOTFOUND;
1604
1605 sub = strchr(sra->text_version + 1, '/');
1606 if (sub != NULL)
1607 vcnum = strtoul(sub + 1, &end, 10);
1608 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1609 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1610 return DDF_NOTFOUND;
1611
1612 return vcnum;
1613 }
1614
1615 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1616 {
1617 /* We just write a generic DDF ARRAY entry
1618 */
1619 struct mdinfo info;
1620 char nbuf[64];
1621 getinfo_super_ddf(st, &info, NULL);
1622 fname_from_uuid(st, &info, nbuf, ':');
1623
1624 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1625 }
1626
1627 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1628 {
1629 /* We write a DDF ARRAY member entry for each vd, identifying container
1630 * by uuid and member by unit number and uuid.
1631 */
1632 struct ddf_super *ddf = st->sb;
1633 struct mdinfo info;
1634 unsigned int i;
1635 char nbuf[64];
1636 getinfo_super_ddf(st, &info, NULL);
1637 fname_from_uuid(st, &info, nbuf, ':');
1638
1639 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1640 struct virtual_entry *ve = &ddf->virt->entries[i];
1641 struct vcl vcl;
1642 char nbuf1[64];
1643 char namebuf[17];
1644 if (all_ff(ve->guid))
1645 continue;
1646 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1647 ddf->currentconf =&vcl;
1648 vcl.vcnum = i;
1649 uuid_from_super_ddf(st, info.uuid);
1650 fname_from_uuid(st, &info, nbuf1, ':');
1651 _ddf_array_name(namebuf, ddf, i);
1652 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1653 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1654 nbuf+5, i, nbuf1+5);
1655 }
1656 }
1657
1658 static void export_examine_super_ddf(struct supertype *st)
1659 {
1660 struct mdinfo info;
1661 char nbuf[64];
1662 getinfo_super_ddf(st, &info, NULL);
1663 fname_from_uuid(st, &info, nbuf, ':');
1664 printf("MD_METADATA=ddf\n");
1665 printf("MD_LEVEL=container\n");
1666 printf("MD_UUID=%s\n", nbuf+5);
1667 printf("MD_DEVICES=%u\n",
1668 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1669 }
1670
1671 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1672 {
1673 void *buf;
1674 unsigned long long dsize, offset;
1675 int bytes;
1676 struct ddf_header *ddf;
1677 int written = 0;
1678
1679 /* The meta consists of an anchor, a primary, and a secondary.
1680 * This all lives at the end of the device.
1681 * So it is easiest to find the earliest of primary and
1682 * secondary, and copy everything from there.
1683 *
1684 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1685 * we choose one of those
1686 */
1687
1688 if (posix_memalign(&buf, 4096, 4096) != 0)
1689 return 1;
1690
1691 if (!get_dev_size(from, NULL, &dsize))
1692 goto err;
1693
1694 if (lseek64(from, dsize-512, 0) < 0)
1695 goto err;
1696 if (read(from, buf, 512) != 512)
1697 goto err;
1698 ddf = buf;
1699 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1700 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1701 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1702 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1703 goto err;
1704
1705 offset = dsize - 512;
1706 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1707 offset = be64_to_cpu(ddf->primary_lba) << 9;
1708 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1709 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1710
1711 bytes = dsize - offset;
1712
1713 if (lseek64(from, offset, 0) < 0 ||
1714 lseek64(to, offset, 0) < 0)
1715 goto err;
1716 while (written < bytes) {
1717 int n = bytes - written;
1718 if (n > 4096)
1719 n = 4096;
1720 if (read(from, buf, n) != n)
1721 goto err;
1722 if (write(to, buf, n) != n)
1723 goto err;
1724 written += n;
1725 }
1726 free(buf);
1727 return 0;
1728 err:
1729 free(buf);
1730 return 1;
1731 }
1732
1733 static void detail_super_ddf(struct supertype *st, char *homehost)
1734 {
1735 struct ddf_super *sb = st->sb;
1736 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1737
1738 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1739 printf("\n");
1740 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1741 printf(" Virtual Disks : %d\n", cnt);
1742 printf("\n");
1743 }
1744
1745 static const char *vendors_with_variable_volume_UUID[] = {
1746 "LSI ",
1747 };
1748
1749 static int volume_id_is_reliable(const struct ddf_super *ddf)
1750 {
1751 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1752 int i;
1753 for (i = 0; i < n; i++)
1754 if (!memcmp(ddf->controller.guid,
1755 vendors_with_variable_volume_UUID[i], 8))
1756 return 0;
1757 return 1;
1758 }
1759
1760 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1761 unsigned int vcnum, int uuid[4])
1762 {
1763 char buf[DDF_GUID_LEN+18], sha[20], *p;
1764 struct sha1_ctx ctx;
1765 if (volume_id_is_reliable(ddf)) {
1766 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1767 return;
1768 }
1769 /*
1770 * Some fake RAID BIOSes (in particular, LSI ones) change the
1771 * VD GUID at every boot. These GUIDs are not suitable for
1772 * identifying an array. Luckily the header GUID appears to
1773 * remain constant.
1774 * We construct a pseudo-UUID from the header GUID and those
1775 * properties of the subarray that we expect to remain constant.
1776 */
1777 memset(buf, 0, sizeof(buf));
1778 p = buf;
1779 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1780 p += DDF_GUID_LEN;
1781 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1782 p += 16;
1783 *((__u16 *) p) = vcnum;
1784 sha1_init_ctx(&ctx);
1785 sha1_process_bytes(buf, sizeof(buf), &ctx);
1786 sha1_finish_ctx(&ctx, sha);
1787 memcpy(uuid, sha, 4*4);
1788 }
1789
1790 static void brief_detail_super_ddf(struct supertype *st)
1791 {
1792 struct mdinfo info;
1793 char nbuf[64];
1794 struct ddf_super *ddf = st->sb;
1795 unsigned int vcnum = get_vd_num_of_subarray(st);
1796 if (vcnum == DDF_CONTAINER)
1797 uuid_from_super_ddf(st, info.uuid);
1798 else if (vcnum == DDF_NOTFOUND)
1799 return;
1800 else
1801 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1802 fname_from_uuid(st, &info, nbuf,':');
1803 printf(" UUID=%s", nbuf + 5);
1804 }
1805
1806 static int match_home_ddf(struct supertype *st, char *homehost)
1807 {
1808 /* It matches 'this' host if the controller is a
1809 * Linux-MD controller with vendor_data matching
1810 * the hostname. It would be nice if we could
1811 * test against controller found in /sys or somewhere...
1812 */
1813 struct ddf_super *ddf = st->sb;
1814 unsigned int len;
1815
1816 if (!homehost)
1817 return 0;
1818 len = strlen(homehost);
1819
1820 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1821 len < sizeof(ddf->controller.vendor_data) &&
1822 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1823 ddf->controller.vendor_data[len] == 0);
1824 }
1825
1826 static int find_index_in_bvd(const struct ddf_super *ddf,
1827 const struct vd_config *conf, unsigned int n,
1828 unsigned int *n_bvd)
1829 {
1830 /*
1831 * Find the index of the n-th valid physical disk in this BVD.
1832 * Unused entries can be sprinkled in with the used entries,
1833 * but don't count.
1834 */
1835 unsigned int i, j;
1836 for (i = 0, j = 0;
1837 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1838 i++) {
1839 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1840 if (n == j) {
1841 *n_bvd = i;
1842 return 1;
1843 }
1844 j++;
1845 }
1846 }
1847 dprintf("couldn't find BVD member %u (total %u)\n",
1848 n, be16_to_cpu(conf->prim_elmnt_count));
1849 return 0;
1850 }
1851
1852 /* Given a member array instance number, and a raid disk within that instance,
1853 * find the vd_config structure. The offset of the given disk in the phys_refnum
1854 * table is returned in n_bvd.
1855 * For two-level members with a secondary raid level the vd_config for
1856 * the appropriate BVD is returned.
1857 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1858 */
1859 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1860 unsigned int n,
1861 unsigned int *n_bvd, struct vcl **vcl)
1862 {
1863 struct vcl *v;
1864
1865 for (v = ddf->conflist; v; v = v->next) {
1866 unsigned int nsec, ibvd = 0;
1867 struct vd_config *conf;
1868 if (inst != v->vcnum)
1869 continue;
1870 conf = &v->conf;
1871 if (conf->sec_elmnt_count == 1) {
1872 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1873 *vcl = v;
1874 return conf;
1875 } else
1876 goto bad;
1877 }
1878 if (v->other_bvds == NULL) {
1879 pr_err("BUG: other_bvds is NULL, nsec=%u\n",
1880 conf->sec_elmnt_count);
1881 goto bad;
1882 }
1883 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1884 if (conf->sec_elmnt_seq != nsec) {
1885 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1886 if (v->other_bvds[ibvd-1]->sec_elmnt_seq ==
1887 nsec)
1888 break;
1889 }
1890 if (ibvd == conf->sec_elmnt_count)
1891 goto bad;
1892 conf = v->other_bvds[ibvd-1];
1893 }
1894 if (!find_index_in_bvd(ddf, conf,
1895 n - nsec*conf->sec_elmnt_count, n_bvd))
1896 goto bad;
1897 dprintf("found disk %u as member %u in bvd %d of array %u\n",
1898 n, *n_bvd, ibvd, inst);
1899 *vcl = v;
1900 return conf;
1901 }
1902 bad:
1903 pr_err("Couldn't find disk %d in array %u\n", n, inst);
1904 return NULL;
1905 }
1906
1907 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1908 {
1909 /* Find the entry in phys_disk which has the given refnum
1910 * and return it's index
1911 */
1912 unsigned int i;
1913 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1914 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1915 return i;
1916 return -1;
1917 }
1918
1919 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1920 {
1921 char buf[20];
1922 struct sha1_ctx ctx;
1923 sha1_init_ctx(&ctx);
1924 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1925 sha1_finish_ctx(&ctx, buf);
1926 memcpy(uuid, buf, 4*4);
1927 }
1928
1929 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1930 {
1931 /* The uuid returned here is used for:
1932 * uuid to put into bitmap file (Create, Grow)
1933 * uuid for backup header when saving critical section (Grow)
1934 * comparing uuids when re-adding a device into an array
1935 * In these cases the uuid required is that of the data-array,
1936 * not the device-set.
1937 * uuid to recognise same set when adding a missing device back
1938 * to an array. This is a uuid for the device-set.
1939 *
1940 * For each of these we can make do with a truncated
1941 * or hashed uuid rather than the original, as long as
1942 * everyone agrees.
1943 * In the case of SVD we assume the BVD is of interest,
1944 * though that might be the case if a bitmap were made for
1945 * a mirrored SVD - worry about that later.
1946 * So we need to find the VD configuration record for the
1947 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1948 * The first 16 bytes of the sha1 of these is used.
1949 */
1950 struct ddf_super *ddf = st->sb;
1951 struct vcl *vcl = ddf->currentconf;
1952
1953 if (vcl)
1954 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1955 else
1956 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1957 }
1958
1959 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1960 {
1961 struct ddf_super *ddf = st->sb;
1962 int map_disks = info->array.raid_disks;
1963 __u32 *cptr;
1964
1965 if (ddf->currentconf) {
1966 getinfo_super_ddf_bvd(st, info, map);
1967 return;
1968 }
1969 memset(info, 0, sizeof(*info));
1970
1971 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1972 info->array.level = LEVEL_CONTAINER;
1973 info->array.layout = 0;
1974 info->array.md_minor = -1;
1975 cptr = (__u32 *)(ddf->anchor.guid + 16);
1976 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1977
1978 info->array.chunk_size = 0;
1979 info->container_enough = 1;
1980
1981 info->disk.major = 0;
1982 info->disk.minor = 0;
1983 if (ddf->dlist) {
1984 struct phys_disk_entry *pde = NULL;
1985 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1986 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1987
1988 info->data_offset = be64_to_cpu(ddf->phys->
1989 entries[info->disk.raid_disk].
1990 config_size);
1991 info->component_size = ddf->dlist->size - info->data_offset;
1992 if (info->disk.raid_disk >= 0)
1993 pde = ddf->phys->entries + info->disk.raid_disk;
1994 if (pde &&
1995 !(be16_to_cpu(pde->state) & DDF_Failed) &&
1996 !(be16_to_cpu(pde->state) & DDF_Missing))
1997 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1998 else
1999 info->disk.state = 1 << MD_DISK_FAULTY;
2000
2001 } else {
2002 /* There should always be a dlist, but just in case...*/
2003 info->disk.number = -1;
2004 info->disk.raid_disk = -1;
2005 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2006 }
2007 info->events = be32_to_cpu(ddf->active->seq);
2008 info->array.utime = DECADE + be32_to_cpu(ddf->active->timestamp);
2009
2010 info->recovery_start = MaxSector;
2011 info->reshape_active = 0;
2012 info->recovery_blocked = 0;
2013 info->name[0] = 0;
2014
2015 info->array.major_version = -1;
2016 info->array.minor_version = -2;
2017 strcpy(info->text_version, "ddf");
2018 info->safe_mode_delay = 0;
2019
2020 uuid_from_super_ddf(st, info->uuid);
2021
2022 if (map) {
2023 int i, e = 0;
2024 int max = be16_to_cpu(ddf->phys->max_pdes);
2025 for (i = e = 0 ; i < map_disks ; i++, e++) {
2026 while (e < max &&
2027 be32_to_cpu(ddf->phys->entries[e].refnum) == 0xffffffff)
2028 e++;
2029 if (i < info->array.raid_disks && e < max &&
2030 !(be16_to_cpu(ddf->phys->entries[e].state) &
2031 DDF_Failed))
2032 map[i] = 1;
2033 else
2034 map[i] = 0;
2035 }
2036 }
2037 }
2038
2039 /* size of name must be at least 17 bytes! */
2040 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2041 {
2042 int j;
2043 memcpy(name, ddf->virt->entries[i].name, 16);
2044 name[16] = 0;
2045 for(j = 0; j < 16; j++)
2046 if (name[j] == ' ')
2047 name[j] = 0;
2048 }
2049
2050 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2051 {
2052 struct ddf_super *ddf = st->sb;
2053 struct vcl *vc = ddf->currentconf;
2054 int cd = ddf->currentdev;
2055 int n_prim;
2056 int j;
2057 struct dl *dl = NULL;
2058 int map_disks = info->array.raid_disks;
2059 __u32 *cptr;
2060 struct vd_config *conf;
2061
2062 memset(info, 0, sizeof(*info));
2063 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2064 return;
2065 info->array.md_minor = -1;
2066 cptr = (__u32 *)(vc->conf.guid + 16);
2067 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2068 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2069 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2070 info->custom_array_size = be64_to_cpu(vc->conf.array_blocks);
2071
2072 conf = &vc->conf;
2073 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2074 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2075 int ibvd = cd / n_prim - 1;
2076 cd %= n_prim;
2077 conf = vc->other_bvds[ibvd];
2078 }
2079
2080 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2081 info->data_offset =
2082 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2083 if (vc->block_sizes)
2084 info->component_size = vc->block_sizes[cd];
2085 else
2086 info->component_size = be64_to_cpu(conf->blocks);
2087
2088 for (dl = ddf->dlist; dl ; dl = dl->next)
2089 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2090 break;
2091 }
2092
2093 info->disk.major = 0;
2094 info->disk.minor = 0;
2095 info->disk.state = 0;
2096 if (dl && dl->pdnum >= 0) {
2097 info->disk.major = dl->major;
2098 info->disk.minor = dl->minor;
2099 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2100 * be16_to_cpu(conf->prim_elmnt_count);
2101 info->disk.number = dl->pdnum;
2102 info->disk.state = 0;
2103 if (info->disk.number >= 0 &&
2104 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2105 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2106 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2107 info->events = be32_to_cpu(ddf->active->seq);
2108 }
2109
2110 info->container_member = ddf->currentconf->vcnum;
2111
2112 info->recovery_start = MaxSector;
2113 info->resync_start = 0;
2114 info->reshape_active = 0;
2115 info->recovery_blocked = 0;
2116 if (!(ddf->virt->entries[info->container_member].state &
2117 DDF_state_inconsistent) &&
2118 (ddf->virt->entries[info->container_member].init_state &
2119 DDF_initstate_mask) == DDF_init_full)
2120 info->resync_start = MaxSector;
2121
2122 uuid_from_super_ddf(st, info->uuid);
2123
2124 info->array.major_version = -1;
2125 info->array.minor_version = -2;
2126 sprintf(info->text_version, "/%s/%d",
2127 st->container_devnm,
2128 info->container_member);
2129 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2130
2131 _ddf_array_name(info->name, ddf, info->container_member);
2132
2133 if (map)
2134 for (j = 0; j < map_disks; j++) {
2135 map[j] = 0;
2136 if (j < info->array.raid_disks) {
2137 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2138 if (i >= 0 &&
2139 (be16_to_cpu(ddf->phys->entries[i].state)
2140 & DDF_Online) &&
2141 !(be16_to_cpu(ddf->phys->entries[i].state)
2142 & DDF_Failed))
2143 map[i] = 1;
2144 }
2145 }
2146 }
2147
2148 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2149 char *update,
2150 char *devname, int verbose,
2151 int uuid_set, char *homehost)
2152 {
2153 /* For 'assemble' and 'force' we need to return non-zero if any
2154 * change was made. For others, the return value is ignored.
2155 * Update options are:
2156 * force-one : This device looks a bit old but needs to be included,
2157 * update age info appropriately.
2158 * assemble: clear any 'faulty' flag to allow this device to
2159 * be assembled.
2160 * force-array: Array is degraded but being forced, mark it clean
2161 * if that will be needed to assemble it.
2162 *
2163 * newdev: not used ????
2164 * grow: Array has gained a new device - this is currently for
2165 * linear only
2166 * resync: mark as dirty so a resync will happen.
2167 * uuid: Change the uuid of the array to match what is given
2168 * homehost: update the recorded homehost
2169 * name: update the name - preserving the homehost
2170 * _reshape_progress: record new reshape_progress position.
2171 *
2172 * Following are not relevant for this version:
2173 * sparc2.2 : update from old dodgey metadata
2174 * super-minor: change the preferred_minor number
2175 * summaries: update redundant counters.
2176 */
2177 int rv = 0;
2178 // struct ddf_super *ddf = st->sb;
2179 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2180 // struct virtual_entry *ve = find_ve(ddf);
2181
2182 /* we don't need to handle "force-*" or "assemble" as
2183 * there is no need to 'trick' the kernel. When the metadata is
2184 * first updated to activate the array, all the implied modifications
2185 * will just happen.
2186 */
2187
2188 if (strcmp(update, "grow") == 0) {
2189 /* FIXME */
2190 } else if (strcmp(update, "resync") == 0) {
2191 // info->resync_checkpoint = 0;
2192 } else if (strcmp(update, "homehost") == 0) {
2193 /* homehost is stored in controller->vendor_data,
2194 * or it is when we are the vendor
2195 */
2196 // if (info->vendor_is_local)
2197 // strcpy(ddf->controller.vendor_data, homehost);
2198 rv = -1;
2199 } else if (strcmp(update, "name") == 0) {
2200 /* name is stored in virtual_entry->name */
2201 // memset(ve->name, ' ', 16);
2202 // strncpy(ve->name, info->name, 16);
2203 rv = -1;
2204 } else if (strcmp(update, "_reshape_progress") == 0) {
2205 /* We don't support reshape yet */
2206 } else if (strcmp(update, "assemble") == 0 ) {
2207 /* Do nothing, just succeed */
2208 rv = 0;
2209 } else
2210 rv = -1;
2211
2212 // update_all_csum(ddf);
2213
2214 return rv;
2215 }
2216
2217 static void make_header_guid(char *guid)
2218 {
2219 be32 stamp;
2220 /* Create a DDF Header of Virtual Disk GUID */
2221
2222 /* 24 bytes of fiction required.
2223 * first 8 are a 'vendor-id' - "Linux-MD"
2224 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2225 * Remaining 8 random number plus timestamp
2226 */
2227 memcpy(guid, T10, sizeof(T10));
2228 stamp = cpu_to_be32(0xdeadbeef);
2229 memcpy(guid+8, &stamp, 4);
2230 stamp = cpu_to_be32(0);
2231 memcpy(guid+12, &stamp, 4);
2232 stamp = cpu_to_be32(time(0) - DECADE);
2233 memcpy(guid+16, &stamp, 4);
2234 stamp._v32 = random32();
2235 memcpy(guid+20, &stamp, 4);
2236 }
2237
2238 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2239 {
2240 unsigned int i;
2241 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2242 if (all_ff(ddf->virt->entries[i].guid))
2243 return i;
2244 }
2245 return DDF_NOTFOUND;
2246 }
2247
2248 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2249 const char *name)
2250 {
2251 unsigned int i;
2252 if (name == NULL)
2253 return DDF_NOTFOUND;
2254 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2255 if (all_ff(ddf->virt->entries[i].guid))
2256 continue;
2257 if (!strncmp(name, ddf->virt->entries[i].name,
2258 sizeof(ddf->virt->entries[i].name)))
2259 return i;
2260 }
2261 return DDF_NOTFOUND;
2262 }
2263
2264 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2265 const char *guid)
2266 {
2267 unsigned int i;
2268 if (guid == NULL || all_ff(guid))
2269 return DDF_NOTFOUND;
2270 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2271 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2272 return i;
2273 return DDF_NOTFOUND;
2274 }
2275
2276 static int init_super_ddf(struct supertype *st,
2277 mdu_array_info_t *info,
2278 struct shape *s, char *name, char *homehost,
2279 int *uuid, unsigned long long data_offset)
2280 {
2281 /* This is primarily called by Create when creating a new array.
2282 * We will then get add_to_super called for each component, and then
2283 * write_init_super called to write it out to each device.
2284 * For DDF, Create can create on fresh devices or on a pre-existing
2285 * array.
2286 * To create on a pre-existing array a different method will be called.
2287 * This one is just for fresh drives.
2288 *
2289 * We need to create the entire 'ddf' structure which includes:
2290 * DDF headers - these are easy.
2291 * Controller data - a Sector describing this controller .. not that
2292 * this is a controller exactly.
2293 * Physical Disk Record - one entry per device, so
2294 * leave plenty of space.
2295 * Virtual Disk Records - again, just leave plenty of space.
2296 * This just lists VDs, doesn't give details.
2297 * Config records - describe the VDs that use this disk
2298 * DiskData - describes 'this' device.
2299 * BadBlockManagement - empty
2300 * Diag Space - empty
2301 * Vendor Logs - Could we put bitmaps here?
2302 *
2303 */
2304 struct ddf_super *ddf;
2305 char hostname[17];
2306 int hostlen;
2307 int max_phys_disks, max_virt_disks;
2308 unsigned long long sector;
2309 int clen;
2310 int i;
2311 int pdsize, vdsize;
2312 struct phys_disk *pd;
2313 struct virtual_disk *vd;
2314
2315 if (st->sb)
2316 return init_super_ddf_bvd(st, info, s->size, name, homehost, uuid,
2317 data_offset);
2318
2319 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2320 pr_err("could not allocate superblock\n");
2321 return 0;
2322 }
2323 memset(ddf, 0, sizeof(*ddf));
2324 st->sb = ddf;
2325
2326 if (info == NULL) {
2327 /* zeroing superblock */
2328 return 0;
2329 }
2330
2331 /* At least 32MB *must* be reserved for the ddf. So let's just
2332 * start 32MB from the end, and put the primary header there.
2333 * Don't do secondary for now.
2334 * We don't know exactly where that will be yet as it could be
2335 * different on each device. So just set up the lengths.
2336 */
2337
2338 ddf->anchor.magic = DDF_HEADER_MAGIC;
2339 make_header_guid(ddf->anchor.guid);
2340
2341 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2342 ddf->anchor.seq = cpu_to_be32(1);
2343 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2344 ddf->anchor.openflag = 0xFF;
2345 ddf->anchor.foreignflag = 0;
2346 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2347 ddf->anchor.pad0 = 0xff;
2348 memset(ddf->anchor.pad1, 0xff, 12);
2349 memset(ddf->anchor.header_ext, 0xff, 32);
2350 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2351 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2352 ddf->anchor.type = DDF_HEADER_ANCHOR;
2353 memset(ddf->anchor.pad2, 0xff, 3);
2354 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2355 /* Put this at bottom of 32M reserved.. */
2356 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2357 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2358 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2359 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2360 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2361 ddf->max_part = 64;
2362 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2363 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2364 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2365 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2366 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2367 memset(ddf->anchor.pad3, 0xff, 54);
2368 /* Controller section is one sector long immediately
2369 * after the ddf header */
2370 sector = 1;
2371 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2372 ddf->anchor.controller_section_length = cpu_to_be32(1);
2373 sector += 1;
2374
2375 /* phys is 8 sectors after that */
2376 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2377 sizeof(struct phys_disk_entry)*max_phys_disks,
2378 512);
2379 switch(pdsize/512) {
2380 case 2: case 8: case 32: case 128: case 512: break;
2381 default: abort();
2382 }
2383 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2384 ddf->anchor.phys_section_length =
2385 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2386 sector += pdsize/512;
2387
2388 /* virt is another 32 sectors */
2389 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2390 sizeof(struct virtual_entry) * max_virt_disks,
2391 512);
2392 switch(vdsize/512) {
2393 case 2: case 8: case 32: case 128: case 512: break;
2394 default: abort();
2395 }
2396 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2397 ddf->anchor.virt_section_length =
2398 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2399 sector += vdsize/512;
2400
2401 clen = ddf->conf_rec_len * (ddf->max_part+1);
2402 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2403 ddf->anchor.config_section_length = cpu_to_be32(clen);
2404 sector += clen;
2405
2406 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2407 ddf->anchor.data_section_length = cpu_to_be32(1);
2408 sector += 1;
2409
2410 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2411 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2412 ddf->anchor.diag_space_length = cpu_to_be32(0);
2413 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2414 ddf->anchor.vendor_length = cpu_to_be32(0);
2415 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2416
2417 memset(ddf->anchor.pad4, 0xff, 256);
2418
2419 memcpy(&ddf->primary, &ddf->anchor, 512);
2420 memcpy(&ddf->secondary, &ddf->anchor, 512);
2421
2422 ddf->primary.openflag = 1; /* I guess.. */
2423 ddf->primary.type = DDF_HEADER_PRIMARY;
2424
2425 ddf->secondary.openflag = 1; /* I guess.. */
2426 ddf->secondary.type = DDF_HEADER_SECONDARY;
2427
2428 ddf->active = &ddf->primary;
2429
2430 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2431
2432 /* 24 more bytes of fiction required.
2433 * first 8 are a 'vendor-id' - "Linux-MD"
2434 * Remaining 16 are serial number.... maybe a hostname would do?
2435 */
2436 memcpy(ddf->controller.guid, T10, sizeof(T10));
2437 gethostname(hostname, sizeof(hostname));
2438 hostname[sizeof(hostname) - 1] = 0;
2439 hostlen = strlen(hostname);
2440 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2441 for (i = strlen(T10) ; i+hostlen < 24; i++)
2442 ddf->controller.guid[i] = ' ';
2443
2444 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2445 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2446 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2447 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2448 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2449 memset(ddf->controller.pad, 0xff, 8);
2450 memset(ddf->controller.vendor_data, 0xff, 448);
2451 if (homehost && strlen(homehost) < 440)
2452 strcpy((char*)ddf->controller.vendor_data, homehost);
2453
2454 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2455 pr_err("could not allocate pd\n");
2456 return 0;
2457 }
2458 ddf->phys = pd;
2459 ddf->pdsize = pdsize;
2460
2461 memset(pd, 0xff, pdsize);
2462 memset(pd, 0, sizeof(*pd));
2463 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2464 pd->used_pdes = cpu_to_be16(0);
2465 pd->max_pdes = cpu_to_be16(max_phys_disks);
2466 memset(pd->pad, 0xff, 52);
2467 for (i = 0; i < max_phys_disks; i++)
2468 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2469
2470 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2471 pr_err("could not allocate vd\n");
2472 return 0;
2473 }
2474 ddf->virt = vd;
2475 ddf->vdsize = vdsize;
2476 memset(vd, 0, vdsize);
2477 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2478 vd->populated_vdes = cpu_to_be16(0);
2479 vd->max_vdes = cpu_to_be16(max_virt_disks);
2480 memset(vd->pad, 0xff, 52);
2481
2482 for (i=0; i<max_virt_disks; i++)
2483 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2484
2485 st->sb = ddf;
2486 ddf_set_updates_pending(ddf, NULL);
2487 return 1;
2488 }
2489
2490 static int chunk_to_shift(int chunksize)
2491 {
2492 return ffs(chunksize/512)-1;
2493 }
2494
2495 struct extent {
2496 unsigned long long start, size;
2497 };
2498 static int cmp_extent(const void *av, const void *bv)
2499 {
2500 const struct extent *a = av;
2501 const struct extent *b = bv;
2502 if (a->start < b->start)
2503 return -1;
2504 if (a->start > b->start)
2505 return 1;
2506 return 0;
2507 }
2508
2509 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2510 {
2511 /* Find a list of used extents on the given physical device
2512 * (dnum) of the given ddf.
2513 * Return a malloced array of 'struct extent'
2514 */
2515 struct extent *rv;
2516 int n = 0;
2517 unsigned int i;
2518 __u16 state;
2519
2520 if (dl->pdnum < 0)
2521 return NULL;
2522 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2523
2524 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2525 return NULL;
2526
2527 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2528
2529 for (i = 0; i < ddf->max_part; i++) {
2530 const struct vd_config *bvd;
2531 unsigned int ibvd;
2532 struct vcl *v = dl->vlist[i];
2533 if (v == NULL ||
2534 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2535 &bvd, &ibvd) == DDF_NOTFOUND)
2536 continue;
2537 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2538 rv[n].size = be64_to_cpu(bvd->blocks);
2539 n++;
2540 }
2541 qsort(rv, n, sizeof(*rv), cmp_extent);
2542
2543 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2544 rv[n].size = 0;
2545 return rv;
2546 }
2547
2548 static unsigned long long find_space(
2549 struct ddf_super *ddf, struct dl *dl,
2550 unsigned long long data_offset,
2551 unsigned long long *size)
2552 {
2553 /* Find if the requested amount of space is available.
2554 * If it is, return start.
2555 * If not, set *size to largest space.
2556 * If data_offset != INVALID_SECTORS, then the space must start
2557 * at this location.
2558 */
2559 struct extent *e = get_extents(ddf, dl);
2560 int i = 0;
2561 unsigned long long pos = 0;
2562 unsigned long long max_size = 0;
2563
2564 if (!e) {
2565 *size = 0;
2566 return INVALID_SECTORS;
2567 }
2568 do {
2569 unsigned long long esize = e[i].start - pos;
2570 if (data_offset != INVALID_SECTORS &&
2571 pos <= data_offset &&
2572 e[i].start > data_offset) {
2573 pos = data_offset;
2574 esize = e[i].start - pos;
2575 }
2576 if (data_offset != INVALID_SECTORS &&
2577 pos != data_offset) {
2578 i++;
2579 continue;
2580 }
2581 if (esize >= *size) {
2582 /* Found! */
2583 free(e);
2584 return pos;
2585 }
2586 if (esize > max_size)
2587 max_size = esize;
2588 pos = e[i].start + e[i].size;
2589 i++;
2590 } while (e[i-1].size);
2591 *size = max_size;
2592 free(e);
2593 return INVALID_SECTORS;
2594 }
2595
2596 static int init_super_ddf_bvd(struct supertype *st,
2597 mdu_array_info_t *info,
2598 unsigned long long size,
2599 char *name, char *homehost,
2600 int *uuid, unsigned long long data_offset)
2601 {
2602 /* We are creating a BVD inside a pre-existing container.
2603 * so st->sb is already set.
2604 * We need to create a new vd_config and a new virtual_entry
2605 */
2606 struct ddf_super *ddf = st->sb;
2607 unsigned int venum, i;
2608 struct virtual_entry *ve;
2609 struct vcl *vcl;
2610 struct vd_config *vc;
2611
2612 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2613 pr_err("This ddf already has an array called %s\n", name);
2614 return 0;
2615 }
2616 venum = find_unused_vde(ddf);
2617 if (venum == DDF_NOTFOUND) {
2618 pr_err("Cannot find spare slot for virtual disk\n");
2619 return 0;
2620 }
2621 ve = &ddf->virt->entries[venum];
2622
2623 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2624 * timestamp, random number
2625 */
2626 make_header_guid(ve->guid);
2627 ve->unit = cpu_to_be16(info->md_minor);
2628 ve->pad0 = 0xFFFF;
2629 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2630 DDF_GUID_LEN);
2631 ve->type = cpu_to_be16(0);
2632 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2633 if (info->state & 1) /* clean */
2634 ve->init_state = DDF_init_full;
2635 else
2636 ve->init_state = DDF_init_not;
2637
2638 memset(ve->pad1, 0xff, 14);
2639 memset(ve->name, ' ', 16);
2640 if (name)
2641 strncpy(ve->name, name, 16);
2642 ddf->virt->populated_vdes =
2643 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2644
2645 /* Now create a new vd_config */
2646 if (posix_memalign((void**)&vcl, 512,
2647 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2648 pr_err("could not allocate vd_config\n");
2649 return 0;
2650 }
2651 vcl->vcnum = venum;
2652 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2653 vc = &vcl->conf;
2654
2655 vc->magic = DDF_VD_CONF_MAGIC;
2656 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2657 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2658 vc->seqnum = cpu_to_be32(1);
2659 memset(vc->pad0, 0xff, 24);
2660 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2661 if (layout_md2ddf(info, vc) == -1 ||
2662 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2663 pr_err("unsupported RAID level/layout %d/%d with %d disks\n",
2664 info->level, info->layout, info->raid_disks);
2665 free(vcl);
2666 return 0;
2667 }
2668 vc->sec_elmnt_seq = 0;
2669 if (alloc_other_bvds(ddf, vcl) != 0) {
2670 pr_err("could not allocate other bvds\n");
2671 free(vcl);
2672 return 0;
2673 }
2674 vc->blocks = cpu_to_be64(size * 2);
2675 vc->array_blocks = cpu_to_be64(
2676 calc_array_size(info->level, info->raid_disks, info->layout,
2677 info->chunk_size, size * 2));
2678 memset(vc->pad1, 0xff, 8);
2679 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2680 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2681 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2682 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2683 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2684 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2685 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2686 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2687 memset(vc->cache_pol, 0, 8);
2688 vc->bg_rate = 0x80;
2689 memset(vc->pad2, 0xff, 3);
2690 memset(vc->pad3, 0xff, 52);
2691 memset(vc->pad4, 0xff, 192);
2692 memset(vc->v0, 0xff, 32);
2693 memset(vc->v1, 0xff, 32);
2694 memset(vc->v2, 0xff, 16);
2695 memset(vc->v3, 0xff, 16);
2696 memset(vc->vendor, 0xff, 32);
2697
2698 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2699 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2700
2701 for (i = 1; i < vc->sec_elmnt_count; i++) {
2702 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2703 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2704 }
2705
2706 vcl->next = ddf->conflist;
2707 ddf->conflist = vcl;
2708 ddf->currentconf = vcl;
2709 ddf_set_updates_pending(ddf, NULL);
2710 return 1;
2711 }
2712
2713 static void add_to_super_ddf_bvd(struct supertype *st,
2714 mdu_disk_info_t *dk, int fd, char *devname,
2715 unsigned long long data_offset)
2716 {
2717 /* fd and devname identify a device within the ddf container (st).
2718 * dk identifies a location in the new BVD.
2719 * We need to find suitable free space in that device and update
2720 * the phys_refnum and lba_offset for the newly created vd_config.
2721 * We might also want to update the type in the phys_disk
2722 * section.
2723 *
2724 * Alternately: fd == -1 and we have already chosen which device to
2725 * use and recorded in dlist->raid_disk;
2726 */
2727 struct dl *dl;
2728 struct ddf_super *ddf = st->sb;
2729 struct vd_config *vc;
2730 unsigned int i;
2731 unsigned long long blocks, pos;
2732 unsigned int raid_disk = dk->raid_disk;
2733
2734 if (fd == -1) {
2735 for (dl = ddf->dlist; dl ; dl = dl->next)
2736 if (dl->raiddisk == dk->raid_disk)
2737 break;
2738 } else {
2739 for (dl = ddf->dlist; dl ; dl = dl->next)
2740 if (dl->major == dk->major &&
2741 dl->minor == dk->minor)
2742 break;
2743 }
2744 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2745 return;
2746
2747 vc = &ddf->currentconf->conf;
2748 if (vc->sec_elmnt_count > 1) {
2749 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2750 if (raid_disk >= n)
2751 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2752 raid_disk %= n;
2753 }
2754
2755 blocks = be64_to_cpu(vc->blocks);
2756 if (ddf->currentconf->block_sizes)
2757 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2758
2759 pos = find_space(ddf, dl, data_offset, &blocks);
2760 if (pos == INVALID_SECTORS)
2761 return;
2762
2763 ddf->currentdev = dk->raid_disk;
2764 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2765 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2766
2767 for (i = 0; i < ddf->max_part ; i++)
2768 if (dl->vlist[i] == NULL)
2769 break;
2770 if (i == ddf->max_part)
2771 return;
2772 dl->vlist[i] = ddf->currentconf;
2773
2774 if (fd >= 0)
2775 dl->fd = fd;
2776 if (devname)
2777 dl->devname = devname;
2778
2779 /* Check if we can mark array as optimal yet */
2780 i = ddf->currentconf->vcnum;
2781 ddf->virt->entries[i].state =
2782 (ddf->virt->entries[i].state & ~DDF_state_mask)
2783 | get_svd_state(ddf, ddf->currentconf);
2784 be16_clear(ddf->phys->entries[dl->pdnum].type,
2785 cpu_to_be16(DDF_Global_Spare));
2786 be16_set(ddf->phys->entries[dl->pdnum].type,
2787 cpu_to_be16(DDF_Active_in_VD));
2788 dprintf("added disk %d/%08x to VD %d/%s as disk %d\n",
2789 dl->pdnum, be32_to_cpu(dl->disk.refnum),
2790 ddf->currentconf->vcnum, guid_str(vc->guid),
2791 dk->raid_disk);
2792 ddf_set_updates_pending(ddf, vc);
2793 }
2794
2795 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2796 {
2797 unsigned int i;
2798 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2799 if (all_ff(ddf->phys->entries[i].guid))
2800 return i;
2801 }
2802 return DDF_NOTFOUND;
2803 }
2804
2805 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2806 {
2807 __u64 cfs, t;
2808 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2809 t = be64_to_cpu(dl->secondary_lba);
2810 if (t != ~(__u64)0)
2811 cfs = min(cfs, t);
2812 /*
2813 * Some vendor DDF structures interpret workspace_lba
2814 * very differently than we do: Make a sanity check on the value.
2815 */
2816 t = be64_to_cpu(dl->workspace_lba);
2817 if (t < cfs) {
2818 __u64 wsp = cfs - t;
2819 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2820 pr_err("%x:%x: workspace size 0x%llx too big, ignoring\n",
2821 dl->major, dl->minor, (unsigned long long)wsp);
2822 } else
2823 cfs = t;
2824 }
2825 pde->config_size = cpu_to_be64(cfs);
2826 dprintf("%x:%x config_size %llx, DDF structure is %llx blocks\n",
2827 dl->major, dl->minor,
2828 (unsigned long long)cfs, (unsigned long long)(dl->size-cfs));
2829 }
2830
2831 /* Add a device to a container, either while creating it or while
2832 * expanding a pre-existing container
2833 */
2834 static int add_to_super_ddf(struct supertype *st,
2835 mdu_disk_info_t *dk, int fd, char *devname,
2836 unsigned long long data_offset)
2837 {
2838 struct ddf_super *ddf = st->sb;
2839 struct dl *dd;
2840 time_t now;
2841 struct tm *tm;
2842 unsigned long long size;
2843 struct phys_disk_entry *pde;
2844 unsigned int n, i;
2845 struct stat stb;
2846 __u32 *tptr;
2847
2848 if (ddf->currentconf) {
2849 add_to_super_ddf_bvd(st, dk, fd, devname, data_offset);
2850 return 0;
2851 }
2852
2853 /* This is device numbered dk->number. We need to create
2854 * a phys_disk entry and a more detailed disk_data entry.
2855 */
2856 fstat(fd, &stb);
2857 n = find_unused_pde(ddf);
2858 if (n == DDF_NOTFOUND) {
2859 pr_err("No free slot in array, cannot add disk\n");
2860 return 1;
2861 }
2862 pde = &ddf->phys->entries[n];
2863 get_dev_size(fd, NULL, &size);
2864 if (size <= 32*1024*1024) {
2865 pr_err("device size must be at least 32MB\n");
2866 return 1;
2867 }
2868 size >>= 9;
2869
2870 if (posix_memalign((void**)&dd, 512,
2871 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2872 pr_err("could allocate buffer for new disk, aborting\n");
2873 return 1;
2874 }
2875 dd->major = major(stb.st_rdev);
2876 dd->minor = minor(stb.st_rdev);
2877 dd->devname = devname;
2878 dd->fd = fd;
2879 dd->spare = NULL;
2880
2881 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2882 now = time(0);
2883 tm = localtime(&now);
2884 sprintf(dd->disk.guid, "%8s%04d%02d%02d", T10,
2885 (__u16)tm->tm_year+1900,
2886 (__u8)tm->tm_mon+1, (__u8)tm->tm_mday);
2887 tptr = (__u32 *)(dd->disk.guid + 16);
2888 *tptr++ = random32();
2889 *tptr = random32();
2890
2891 do {
2892 /* Cannot be bothered finding a CRC of some irrelevant details*/
2893 dd->disk.refnum._v32 = random32();
2894 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2895 i > 0; i--)
2896 if (be32_eq(ddf->phys->entries[i-1].refnum,
2897 dd->disk.refnum))
2898 break;
2899 } while (i > 0);
2900
2901 dd->disk.forced_ref = 1;
2902 dd->disk.forced_guid = 1;
2903 memset(dd->disk.vendor, ' ', 32);
2904 memcpy(dd->disk.vendor, "Linux", 5);
2905 memset(dd->disk.pad, 0xff, 442);
2906 for (i = 0; i < ddf->max_part ; i++)
2907 dd->vlist[i] = NULL;
2908
2909 dd->pdnum = n;
2910
2911 if (st->update_tail) {
2912 int len = (sizeof(struct phys_disk) +
2913 sizeof(struct phys_disk_entry));
2914 struct phys_disk *pd;
2915
2916 pd = xmalloc(len);
2917 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2918 pd->used_pdes = cpu_to_be16(n);
2919 pde = &pd->entries[0];
2920 dd->mdupdate = pd;
2921 } else
2922 ddf->phys->used_pdes = cpu_to_be16(
2923 1 + be16_to_cpu(ddf->phys->used_pdes));
2924
2925 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2926 pde->refnum = dd->disk.refnum;
2927 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2928 pde->state = cpu_to_be16(DDF_Online);
2929 dd->size = size;
2930 /*
2931 * If there is already a device in dlist, try to reserve the same
2932 * amount of workspace. Otherwise, use 32MB.
2933 * We checked disk size above already.
2934 */
2935 #define __calc_lba(new, old, lba, mb) do { \
2936 unsigned long long dif; \
2937 if ((old) != NULL) \
2938 dif = (old)->size - be64_to_cpu((old)->lba); \
2939 else \
2940 dif = (new)->size; \
2941 if ((new)->size > dif) \
2942 (new)->lba = cpu_to_be64((new)->size - dif); \
2943 else \
2944 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2945 } while (0)
2946 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2947 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2948 if (ddf->dlist == NULL ||
2949 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2950 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2951 _set_config_size(pde, dd);
2952
2953 sprintf(pde->path, "%17.17s","Information: nil") ;
2954 memset(pde->pad, 0xff, 6);
2955
2956 if (st->update_tail) {
2957 dd->next = ddf->add_list;
2958 ddf->add_list = dd;
2959 } else {
2960 dd->next = ddf->dlist;
2961 ddf->dlist = dd;
2962 ddf_set_updates_pending(ddf, NULL);
2963 }
2964
2965 return 0;
2966 }
2967
2968 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2969 {
2970 struct ddf_super *ddf = st->sb;
2971 struct dl *dl;
2972
2973 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2974 * disappeared from the container.
2975 * We need to arrange that it disappears from the metadata and
2976 * internal data structures too.
2977 * Most of the work is done by ddf_process_update which edits
2978 * the metadata and closes the file handle and attaches the memory
2979 * where free_updates will free it.
2980 */
2981 for (dl = ddf->dlist; dl ; dl = dl->next)
2982 if (dl->major == dk->major &&
2983 dl->minor == dk->minor)
2984 break;
2985 if (!dl || dl->pdnum < 0)
2986 return -1;
2987
2988 if (st->update_tail) {
2989 int len = (sizeof(struct phys_disk) +
2990 sizeof(struct phys_disk_entry));
2991 struct phys_disk *pd;
2992
2993 pd = xmalloc(len);
2994 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2995 pd->used_pdes = cpu_to_be16(dl->pdnum);
2996 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2997 append_metadata_update(st, pd, len);
2998 }
2999 return 0;
3000 }
3001
3002 /*
3003 * This is the write_init_super method for a ddf container. It is
3004 * called when creating a container or adding another device to a
3005 * container.
3006 */
3007
3008 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
3009 {
3010 unsigned long long sector;
3011 struct ddf_header *header;
3012 int fd, i, n_config, conf_size, buf_size;
3013 int ret = 0;
3014 char *conf;
3015
3016 fd = d->fd;
3017
3018 switch (type) {
3019 case DDF_HEADER_PRIMARY:
3020 header = &ddf->primary;
3021 sector = be64_to_cpu(header->primary_lba);
3022 break;
3023 case DDF_HEADER_SECONDARY:
3024 header = &ddf->secondary;
3025 sector = be64_to_cpu(header->secondary_lba);
3026 break;
3027 default:
3028 return 0;
3029 }
3030 if (sector == ~(__u64)0)
3031 return 0;
3032
3033 header->type = type;
3034 header->openflag = 1;
3035 header->crc = calc_crc(header, 512);
3036
3037 lseek64(fd, sector<<9, 0);
3038 if (write(fd, header, 512) < 0)
3039 goto out;
3040
3041 ddf->controller.crc = calc_crc(&ddf->controller, 512);
3042 if (write(fd, &ddf->controller, 512) < 0)
3043 goto out;
3044
3045 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
3046 if (write(fd, ddf->phys, ddf->pdsize) < 0)
3047 goto out;
3048 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
3049 if (write(fd, ddf->virt, ddf->vdsize) < 0)
3050 goto out;
3051
3052 /* Now write lots of config records. */
3053 n_config = ddf->max_part;
3054 conf_size = ddf->conf_rec_len * 512;
3055 conf = ddf->conf;
3056 buf_size = conf_size * (n_config + 1);
3057 if (!conf) {
3058 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
3059 goto out;
3060 ddf->conf = conf;
3061 }
3062 for (i = 0 ; i <= n_config ; i++) {
3063 struct vcl *c;
3064 struct vd_config *vdc = NULL;
3065 if (i == n_config) {
3066 c = (struct vcl *)d->spare;
3067 if (c)
3068 vdc = &c->conf;
3069 } else {
3070 unsigned int dummy;
3071 c = d->vlist[i];
3072 if (c)
3073 get_pd_index_from_refnum(
3074 c, d->disk.refnum,
3075 ddf->mppe,
3076 (const struct vd_config **)&vdc,
3077 &dummy);
3078 }
3079 if (vdc) {
3080 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3081 i, be32_to_cpu(d->disk.refnum),
3082 guid_str(vdc->guid),
3083 vdc->sec_elmnt_seq);
3084 vdc->crc = calc_crc(vdc, conf_size);
3085 memcpy(conf + i*conf_size, vdc, conf_size);
3086 } else
3087 memset(conf + i*conf_size, 0xff, conf_size);
3088 }
3089 if (write(fd, conf, buf_size) != buf_size)
3090 goto out;
3091
3092 d->disk.crc = calc_crc(&d->disk, 512);
3093 if (write(fd, &d->disk, 512) < 0)
3094 goto out;
3095
3096 ret = 1;
3097 out:
3098 header->openflag = 0;
3099 header->crc = calc_crc(header, 512);
3100
3101 lseek64(fd, sector<<9, 0);
3102 if (write(fd, header, 512) < 0)
3103 ret = 0;
3104
3105 return ret;
3106 }
3107
3108 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3109 {
3110 unsigned long long size;
3111 int fd = d->fd;
3112 if (fd < 0)
3113 return 0;
3114
3115 /* We need to fill in the primary, (secondary) and workspace
3116 * lba's in the headers, set their checksums,
3117 * Also checksum phys, virt....
3118 *
3119 * Then write everything out, finally the anchor is written.
3120 */
3121 get_dev_size(fd, NULL, &size);
3122 size /= 512;
3123 memcpy(&ddf->anchor, ddf->active, 512);
3124 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3125 ddf->anchor.workspace_lba = d->workspace_lba;
3126 else
3127 ddf->anchor.workspace_lba =
3128 cpu_to_be64(size - 32*1024*2);
3129 if (be64_to_cpu(d->primary_lba) != 0ULL)
3130 ddf->anchor.primary_lba = d->primary_lba;
3131 else
3132 ddf->anchor.primary_lba =
3133 cpu_to_be64(size - 16*1024*2);
3134 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3135 ddf->anchor.secondary_lba = d->secondary_lba;
3136 else
3137 ddf->anchor.secondary_lba =
3138 cpu_to_be64(size - 32*1024*2);
3139 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
3140 memcpy(&ddf->primary, &ddf->anchor, 512);
3141 memcpy(&ddf->secondary, &ddf->anchor, 512);
3142
3143 ddf->anchor.type = DDF_HEADER_ANCHOR;
3144 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3145 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3146 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3147
3148 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3149 return 0;
3150
3151 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3152 return 0;
3153
3154 lseek64(fd, (size-1)*512, SEEK_SET);
3155 if (write(fd, &ddf->anchor, 512) < 0)
3156 return 0;
3157
3158 return 1;
3159 }
3160
3161 static int __write_init_super_ddf(struct supertype *st)
3162 {
3163 struct ddf_super *ddf = st->sb;
3164 struct dl *d;
3165 int attempts = 0;
3166 int successes = 0;
3167
3168 pr_state(ddf, __func__);
3169
3170 /* try to write updated metadata,
3171 * if we catch a failure move on to the next disk
3172 */
3173 for (d = ddf->dlist; d; d=d->next) {
3174 attempts++;
3175 successes += _write_super_to_disk(ddf, d);
3176 }
3177
3178 return attempts != successes;
3179 }
3180
3181 static int write_init_super_ddf(struct supertype *st)
3182 {
3183 struct ddf_super *ddf = st->sb;
3184 struct vcl *currentconf = ddf->currentconf;
3185
3186 /* We are done with currentconf - reset it so st refers to the container */
3187 ddf->currentconf = NULL;
3188
3189 if (st->update_tail) {
3190 /* queue the virtual_disk and vd_config as metadata updates */
3191 struct virtual_disk *vd;
3192 struct vd_config *vc;
3193 int len, tlen;
3194 unsigned int i;
3195
3196 if (!currentconf) {
3197 /* Must be adding a physical disk to the container */
3198 int len = (sizeof(struct phys_disk) +
3199 sizeof(struct phys_disk_entry));
3200
3201 /* adding a disk to the container. */
3202 if (!ddf->add_list)
3203 return 0;
3204
3205 append_metadata_update(st, ddf->add_list->mdupdate, len);
3206 ddf->add_list->mdupdate = NULL;
3207 return 0;
3208 }
3209
3210 /* Newly created VD */
3211
3212 /* First the virtual disk. We have a slightly fake header */
3213 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3214 vd = xmalloc(len);
3215 *vd = *ddf->virt;
3216 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3217 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3218 append_metadata_update(st, vd, len);
3219
3220 /* Then the vd_config */
3221 len = ddf->conf_rec_len * 512;
3222 tlen = len * currentconf->conf.sec_elmnt_count;
3223 vc = xmalloc(tlen);
3224 memcpy(vc, &currentconf->conf, len);
3225 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3226 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3227 len);
3228 append_metadata_update(st, vc, tlen);
3229
3230 return 0;
3231 } else {
3232 struct dl *d;
3233 if (!currentconf)
3234 for (d = ddf->dlist; d; d=d->next)
3235 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3236 /* Note: we don't close the fd's now, but a subsequent
3237 * ->free_super() will
3238 */
3239 return __write_init_super_ddf(st);
3240 }
3241 }
3242
3243 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3244 unsigned long long data_offset)
3245 {
3246 /* We must reserve the last 32Meg */
3247 if (devsize <= 32*1024*2)
3248 return 0;
3249 return devsize - 32*1024*2;
3250 }
3251
3252 static int reserve_space(struct supertype *st, int raiddisks,
3253 unsigned long long size, int chunk,
3254 unsigned long long data_offset,
3255 unsigned long long *freesize)
3256 {
3257 /* Find 'raiddisks' spare extents at least 'size' big (but
3258 * only caring about multiples of 'chunk') and remember
3259 * them. If size==0, find the largest size possible.
3260 * Report available size in *freesize
3261 * If space cannot be found, fail.
3262 */
3263 struct dl *dl;
3264 struct ddf_super *ddf = st->sb;
3265 int cnt = 0;
3266
3267 for (dl = ddf->dlist; dl ; dl=dl->next) {
3268 dl->raiddisk = -1;
3269 dl->esize = 0;
3270 }
3271 /* Now find largest extent on each device */
3272 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3273 unsigned long long minsize = ULLONG_MAX;
3274
3275 find_space(ddf, dl, data_offset, &minsize);
3276 if (minsize >= size && minsize >= (unsigned)chunk) {
3277 cnt++;
3278 dl->esize = minsize;
3279 }
3280 }
3281 if (cnt < raiddisks) {
3282 pr_err("not enough devices with space to create array.\n");
3283 return 0; /* No enough free spaces large enough */
3284 }
3285 if (size == 0) {
3286 /* choose the largest size of which there are at least 'raiddisk' */
3287 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3288 struct dl *dl2;
3289 if (dl->esize <= size)
3290 continue;
3291 /* This is bigger than 'size', see if there are enough */
3292 cnt = 0;
3293 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3294 if (dl2->esize >= dl->esize)
3295 cnt++;
3296 if (cnt >= raiddisks)
3297 size = dl->esize;
3298 }
3299 if (chunk) {
3300 size = size / chunk;
3301 size *= chunk;
3302 }
3303 *freesize = size;
3304 if (size < 32) {
3305 pr_err("not enough spare devices to create array.\n");
3306 return 0;
3307 }
3308 }
3309 /* We have a 'size' of which there are enough spaces.
3310 * We simply do a first-fit */
3311 cnt = 0;
3312 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3313 if (dl->esize < size)
3314 continue;
3315
3316 dl->raiddisk = cnt;
3317 cnt++;
3318 }
3319 return 1;
3320 }
3321
3322 static int validate_geometry_ddf(struct supertype *st,
3323 int level, int layout, int raiddisks,
3324 int *chunk, unsigned long long size,
3325 unsigned long long data_offset,
3326 char *dev, unsigned long long *freesize,
3327 int consistency_policy, int verbose)
3328 {
3329 int fd;
3330 struct mdinfo *sra;
3331 int cfd;
3332
3333 /* ddf potentially supports lots of things, but it depends on
3334 * what devices are offered (and maybe kernel version?)
3335 * If given unused devices, we will make a container.
3336 * If given devices in a container, we will make a BVD.
3337 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3338 */
3339
3340 if (*chunk == UnSet)
3341 *chunk = DEFAULT_CHUNK;
3342
3343 if (level == LEVEL_NONE)
3344 level = LEVEL_CONTAINER;
3345 if (level == LEVEL_CONTAINER) {
3346 /* Must be a fresh device to add to a container */
3347 return validate_geometry_ddf_container(st, level, layout,
3348 raiddisks, *chunk,
3349 size, data_offset, dev,
3350 freesize,
3351 verbose);
3352 }
3353
3354 if (!dev) {
3355 mdu_array_info_t array = {
3356 .level = level,
3357 .layout = layout,
3358 .raid_disks = raiddisks
3359 };
3360 struct vd_config conf;
3361 if (layout_md2ddf(&array, &conf) == -1) {
3362 if (verbose)
3363 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3364 level, layout, raiddisks);
3365 return 0;
3366 }
3367 /* Should check layout? etc */
3368
3369 if (st->sb && freesize) {
3370 /* --create was given a container to create in.
3371 * So we need to check that there are enough
3372 * free spaces and return the amount of space.
3373 * We may as well remember which drives were
3374 * chosen so that add_to_super/getinfo_super
3375 * can return them.
3376 */
3377 return reserve_space(st, raiddisks, size, *chunk,
3378 data_offset, freesize);
3379 }
3380 return 1;
3381 }
3382
3383 if (st->sb) {
3384 /* A container has already been opened, so we are
3385 * creating in there. Maybe a BVD, maybe an SVD.
3386 * Should make a distinction one day.
3387 */
3388 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3389 chunk, size, data_offset, dev,
3390 freesize,
3391 verbose);
3392 }
3393 /* This is the first device for the array.
3394 * If it is a container, we read it in and do automagic allocations,
3395 * no other devices should be given.
3396 * Otherwise it must be a member device of a container, and we
3397 * do manual allocation.
3398 * Later we should check for a BVD and make an SVD.
3399 */
3400 fd = open(dev, O_RDONLY|O_EXCL, 0);
3401 if (fd >= 0) {
3402 close(fd);
3403 /* Just a bare device, no good to us */
3404 if (verbose)
3405 pr_err("ddf: Cannot create this array on device %s - a container is required.\n",
3406 dev);
3407 return 0;
3408 }
3409 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3410 if (verbose)
3411 pr_err("ddf: Cannot open %s: %s\n",
3412 dev, strerror(errno));
3413 return 0;
3414 }
3415 /* Well, it is in use by someone, maybe a 'ddf' container. */
3416 cfd = open_container(fd);
3417 if (cfd < 0) {
3418 close(fd);
3419 if (verbose)
3420 pr_err("ddf: Cannot use %s: %s\n",
3421 dev, strerror(EBUSY));
3422 return 0;
3423 }
3424 sra = sysfs_read(cfd, NULL, GET_VERSION);
3425 close(fd);
3426 if (sra && sra->array.major_version == -1 &&
3427 strcmp(sra->text_version, "ddf") == 0) {
3428 /* This is a member of a ddf container. Load the container
3429 * and try to create a bvd
3430 */
3431 struct ddf_super *ddf;
3432 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3433 st->sb = ddf;
3434 strcpy(st->container_devnm, fd2devnm(cfd));
3435 close(cfd);
3436 return validate_geometry_ddf_bvd(st, level, layout,
3437 raiddisks, chunk, size,
3438 data_offset,
3439 dev, freesize,
3440 verbose);
3441 }
3442 close(cfd);
3443 } else /* device may belong to a different container */
3444 return 0;
3445
3446 return 1;
3447 }
3448
3449 static int
3450 validate_geometry_ddf_container(struct supertype *st,
3451 int level, int layout, int raiddisks,
3452 int chunk, unsigned long long size,
3453 unsigned long long data_offset,
3454 char *dev, unsigned long long *freesize,
3455 int verbose)
3456 {
3457 int fd;
3458 unsigned long long ldsize;
3459
3460 if (level != LEVEL_CONTAINER)
3461 return 0;
3462 if (!dev)
3463 return 1;
3464
3465 fd = open(dev, O_RDONLY|O_EXCL, 0);
3466 if (fd < 0) {
3467 if (verbose)
3468 pr_err("ddf: Cannot open %s: %s\n",
3469 dev, strerror(errno));
3470 return 0;
3471 }
3472 if (!get_dev_size(fd, dev, &ldsize)) {
3473 close(fd);
3474 return 0;
3475 }
3476 close(fd);
3477
3478 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3479 if (*freesize == 0)
3480 return 0;
3481
3482 return 1;
3483 }
3484
3485 static int validate_geometry_ddf_bvd(struct supertype *st,
3486 int level, int layout, int raiddisks,
3487 int *chunk, unsigned long long size,
3488 unsigned long long data_offset,
3489 char *dev, unsigned long long *freesize,
3490 int verbose)
3491 {
3492 dev_t rdev;
3493 struct ddf_super *ddf = st->sb;
3494 struct dl *dl;
3495 unsigned long long maxsize;
3496 /* ddf/bvd supports lots of things, but not containers */
3497 if (level == LEVEL_CONTAINER) {
3498 if (verbose)
3499 pr_err("DDF cannot create a container within an container\n");
3500 return 0;
3501 }
3502 /* We must have the container info already read in. */
3503 if (!ddf)
3504 return 0;
3505
3506 if (!dev) {
3507 /* General test: make sure there is space for
3508 * 'raiddisks' device extents of size 'size'.
3509 */
3510 unsigned long long minsize = size;
3511 int dcnt = 0;
3512 if (minsize == 0)
3513 minsize = 8;
3514 for (dl = ddf->dlist; dl ; dl = dl->next) {
3515 if (find_space(ddf, dl, data_offset, &minsize) !=
3516 INVALID_SECTORS)
3517 dcnt++;
3518 }
3519 if (dcnt < raiddisks) {
3520 if (verbose)
3521 pr_err("ddf: Not enough devices with space for this array (%d < %d)\n",
3522 dcnt, raiddisks);
3523 return 0;
3524 }
3525 return 1;
3526 }
3527 /* This device must be a member of the set */
3528 if (!stat_is_blkdev(dev, &rdev))
3529 return 0;
3530 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3531 if (dl->major == (int)major(rdev) &&
3532 dl->minor == (int)minor(rdev))
3533 break;
3534 }
3535 if (!dl) {
3536 if (verbose)
3537 pr_err("ddf: %s is not in the same DDF set\n",
3538 dev);
3539 return 0;
3540 }
3541 maxsize = ULLONG_MAX;
3542 find_space(ddf, dl, data_offset, &maxsize);
3543 *freesize = maxsize;
3544
3545 return 1;
3546 }
3547
3548 static int load_super_ddf_all(struct supertype *st, int fd,
3549 void **sbp, char *devname)
3550 {
3551 struct mdinfo *sra;
3552 struct ddf_super *super;
3553 struct mdinfo *sd, *best = NULL;
3554 int bestseq = 0;
3555 int seq;
3556 char nm[20];
3557 int dfd;
3558
3559 sra = sysfs_read(fd, NULL, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3560 if (!sra)
3561 return 1;
3562 if (sra->array.major_version != -1 ||
3563 sra->array.minor_version != -2 ||
3564 strcmp(sra->text_version, "ddf") != 0)
3565 return 1;
3566
3567 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3568 return 1;
3569 memset(super, 0, sizeof(*super));
3570
3571 /* first, try each device, and choose the best ddf */
3572 for (sd = sra->devs ; sd ; sd = sd->next) {
3573 int rv;
3574 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3575 dfd = dev_open(nm, O_RDONLY);
3576 if (dfd < 0)
3577 return 2;
3578 rv = load_ddf_headers(dfd, super, NULL);
3579 close(dfd);
3580 if (rv == 0) {
3581 seq = be32_to_cpu(super->active->seq);
3582 if (super->active->openflag)
3583 seq--;
3584 if (!best || seq > bestseq) {
3585 bestseq = seq;
3586 best = sd;
3587 }
3588 }
3589 }
3590 if (!best)
3591 return 1;
3592 /* OK, load this ddf */
3593 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3594 dfd = dev_open(nm, O_RDONLY);
3595 if (dfd < 0)
3596 return 1;
3597 load_ddf_headers(dfd, super, NULL);
3598 load_ddf_global(dfd, super, NULL);
3599 close(dfd);
3600 /* Now we need the device-local bits */
3601 for (sd = sra->devs ; sd ; sd = sd->next) {
3602 int rv;
3603
3604 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3605 dfd = dev_open(nm, O_RDWR);
3606 if (dfd < 0)
3607 return 2;
3608 rv = load_ddf_headers(dfd, super, NULL);
3609 if (rv == 0)
3610 rv = load_ddf_local(dfd, super, NULL, 1);
3611 if (rv)
3612 return 1;
3613 }
3614
3615 *sbp = super;
3616 if (st->ss == NULL) {
3617 st->ss = &super_ddf;
3618 st->minor_version = 0;
3619 st->max_devs = 512;
3620 }
3621 strcpy(st->container_devnm, fd2devnm(fd));
3622 return 0;
3623 }
3624
3625 static int load_container_ddf(struct supertype *st, int fd,
3626 char *devname)
3627 {
3628 return load_super_ddf_all(st, fd, &st->sb, devname);
3629 }
3630
3631 static int check_secondary(const struct vcl *vc)
3632 {
3633 const struct vd_config *conf = &vc->conf;
3634 int i;
3635
3636 /* The only DDF secondary RAID level md can support is
3637 * RAID 10, if the stripe sizes and Basic volume sizes
3638 * are all equal.
3639 * Other configurations could in theory be supported by exposing
3640 * the BVDs to user space and using device mapper for the secondary
3641 * mapping. So far we don't support that.
3642 */
3643
3644 __u64 sec_elements[4] = {0, 0, 0, 0};
3645 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3646 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3647
3648 if (vc->other_bvds == NULL) {
3649 pr_err("No BVDs for secondary RAID found\n");
3650 return -1;
3651 }
3652 if (conf->prl != DDF_RAID1) {
3653 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3654 return -1;
3655 }
3656 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3657 pr_err("Secondary RAID level %d is unsupported\n",
3658 conf->srl);
3659 return -1;
3660 }
3661 __set_sec_seen(conf->sec_elmnt_seq);
3662 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3663 const struct vd_config *bvd = vc->other_bvds[i];
3664 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3665 continue;
3666 if (bvd->srl != conf->srl) {
3667 pr_err("Inconsistent secondary RAID level across BVDs\n");
3668 return -1;
3669 }
3670 if (bvd->prl != conf->prl) {
3671 pr_err("Different RAID levels for BVDs are unsupported\n");
3672 return -1;
3673 }
3674 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3675 pr_err("All BVDs must have the same number of primary elements\n");
3676 return -1;
3677 }
3678 if (bvd->chunk_shift != conf->chunk_shift) {
3679 pr_err("Different strip sizes for BVDs are unsupported\n");
3680 return -1;
3681 }
3682 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3683 pr_err("Different BVD sizes are unsupported\n");
3684 return -1;
3685 }
3686 __set_sec_seen(bvd->sec_elmnt_seq);
3687 }
3688 for (i = 0; i < conf->sec_elmnt_count; i++) {
3689 if (!__was_sec_seen(i)) {
3690 /* pr_err("BVD %d is missing\n", i); */
3691 return -1;
3692 }
3693 }
3694 return 0;
3695 }
3696
3697 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3698 be32 refnum, unsigned int nmax,
3699 const struct vd_config **bvd,
3700 unsigned int *idx)
3701 {
3702 unsigned int i, j, n, sec, cnt;
3703
3704 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3705 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3706
3707 for (i = 0, j = 0 ; i < nmax ; i++) {
3708 /* j counts valid entries for this BVD */
3709 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3710 *bvd = &vc->conf;
3711 *idx = i;
3712 return sec * cnt + j;
3713 }
3714 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3715 j++;
3716 }
3717 if (vc->other_bvds == NULL)
3718 goto bad;
3719
3720 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3721 struct vd_config *vd = vc->other_bvds[n-1];
3722 sec = vd->sec_elmnt_seq;
3723 if (sec == DDF_UNUSED_BVD)
3724 continue;
3725 for (i = 0, j = 0 ; i < nmax ; i++) {
3726 if (be32_eq(vd->phys_refnum[i], refnum)) {
3727 *bvd = vd;
3728 *idx = i;
3729 return sec * cnt + j;
3730 }
3731 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3732 j++;
3733 }
3734 }
3735 bad:
3736 *bvd = NULL;
3737 return DDF_NOTFOUND;
3738 }
3739
3740 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3741 {
3742 /* Given a container loaded by load_super_ddf_all,
3743 * extract information about all the arrays into
3744 * an mdinfo tree.
3745 *
3746 * For each vcl in conflist: create an mdinfo, fill it in,
3747 * then look for matching devices (phys_refnum) in dlist
3748 * and create appropriate device mdinfo.
3749 */
3750 struct ddf_super *ddf = st->sb;
3751 struct mdinfo *rest = NULL;
3752 struct vcl *vc;
3753
3754 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3755 unsigned int i;
3756 struct mdinfo *this;
3757 char *ep;
3758 __u32 *cptr;
3759 unsigned int pd;
3760
3761 if (subarray &&
3762 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3763 *ep != '\0'))
3764 continue;
3765
3766 if (vc->conf.sec_elmnt_count > 1) {
3767 if (check_secondary(vc) != 0)
3768 continue;
3769 }
3770
3771 this = xcalloc(1, sizeof(*this));
3772 this->next = rest;
3773 rest = this;
3774
3775 if (layout_ddf2md(&vc->conf, &this->array))
3776 continue;
3777 this->array.md_minor = -1;
3778 this->array.major_version = -1;
3779 this->array.minor_version = -2;
3780 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3781 cptr = (__u32 *)(vc->conf.guid + 16);
3782 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3783 this->array.utime = DECADE +
3784 be32_to_cpu(vc->conf.timestamp);
3785 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3786
3787 i = vc->vcnum;
3788 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3789 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3790 DDF_init_full) {
3791 this->array.state = 0;
3792 this->resync_start = 0;
3793 } else {
3794 this->array.state = 1;
3795 this->resync_start = MaxSector;
3796 }
3797 _ddf_array_name(this->name, ddf, i);
3798 memset(this->uuid, 0, sizeof(this->uuid));
3799 this->component_size = be64_to_cpu(vc->conf.blocks);
3800 this->array.size = this->component_size / 2;
3801 this->container_member = i;
3802
3803 ddf->currentconf = vc;
3804 uuid_from_super_ddf(st, this->uuid);
3805 if (!subarray)
3806 ddf->currentconf = NULL;
3807
3808 sprintf(this->text_version, "/%s/%d",
3809 st->container_devnm, this->container_member);
3810
3811 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3812 struct mdinfo *dev;
3813 struct dl *d;
3814 const struct vd_config *bvd;
3815 unsigned int iphys;
3816 int stt;
3817
3818 if (be32_to_cpu(ddf->phys->entries[pd].refnum) ==
3819 0xffffffff)
3820 continue;
3821
3822 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3823 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding)) !=
3824 DDF_Online)
3825 continue;
3826
3827 i = get_pd_index_from_refnum(
3828 vc, ddf->phys->entries[pd].refnum,
3829 ddf->mppe, &bvd, &iphys);
3830 if (i == DDF_NOTFOUND)
3831 continue;
3832
3833 this->array.working_disks++;
3834
3835 for (d = ddf->dlist; d ; d=d->next)
3836 if (be32_eq(d->disk.refnum,
3837 ddf->phys->entries[pd].refnum))
3838 break;
3839 if (d == NULL)
3840 /* Haven't found that one yet, maybe there are others */
3841 continue;
3842
3843 dev = xcalloc(1, sizeof(*dev));
3844 dev->next = this->devs;
3845 this->devs = dev;
3846
3847 dev->disk.number = be32_to_cpu(d->disk.refnum);
3848 dev->disk.major = d->major;
3849 dev->disk.minor = d->minor;
3850 dev->disk.raid_disk = i;
3851 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3852 dev->recovery_start = MaxSector;
3853
3854 dev->events = be32_to_cpu(ddf->active->seq);
3855 dev->data_offset =
3856 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3857 dev->component_size = be64_to_cpu(bvd->blocks);
3858 if (d->devname)
3859 strcpy(dev->name, d->devname);
3860 }
3861 }
3862 return rest;
3863 }
3864
3865 static int store_super_ddf(struct supertype *st, int fd)
3866 {
3867 struct ddf_super *ddf = st->sb;
3868 unsigned long long dsize;
3869 void *buf;
3870 int rc;
3871
3872 if (!ddf)
3873 return 1;
3874
3875 if (!get_dev_size(fd, NULL, &dsize))
3876 return 1;
3877
3878 if (ddf->dlist || ddf->conflist) {
3879 struct stat sta;
3880 struct dl *dl;
3881 int ofd, ret;
3882
3883 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3884 pr_err("file descriptor for invalid device\n");
3885 return 1;
3886 }
3887 for (dl = ddf->dlist; dl; dl = dl->next)
3888 if (dl->major == (int)major(sta.st_rdev) &&
3889 dl->minor == (int)minor(sta.st_rdev))
3890 break;
3891 if (!dl) {
3892 pr_err("couldn't find disk %d/%d\n",
3893 (int)major(sta.st_rdev),
3894 (int)minor(sta.st_rdev));
3895 return 1;
3896 }
3897 ofd = dl->fd;
3898 dl->fd = fd;
3899 ret = (_write_super_to_disk(ddf, dl) != 1);
3900 dl->fd = ofd;
3901 return ret;
3902 }
3903
3904 if (posix_memalign(&buf, 512, 512) != 0)
3905 return 1;
3906 memset(buf, 0, 512);
3907
3908 lseek64(fd, dsize-512, 0);
3909 rc = write(fd, buf, 512);
3910 free(buf);
3911 if (rc < 0)
3912 return 1;
3913 return 0;
3914 }
3915
3916 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3917 {
3918 /*
3919 * return:
3920 * 0 same, or first was empty, and second was copied
3921 * 1 second had wrong magic number - but that isn't possible
3922 * 2 wrong uuid
3923 * 3 wrong other info
3924 */
3925 struct ddf_super *first = st->sb;
3926 struct ddf_super *second = tst->sb;
3927 struct dl *dl1, *dl2;
3928 struct vcl *vl1, *vl2;
3929 unsigned int max_vds, max_pds, pd, vd;
3930
3931 if (!first) {
3932 st->sb = tst->sb;
3933 tst->sb = NULL;
3934 return 0;
3935 }
3936
3937 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3938 return 2;
3939
3940 /* It is only OK to compare info in the anchor. Anything else
3941 * could be changing due to a reconfig so must be ignored.
3942 * guid really should be enough anyway.
3943 */
3944
3945 if (!be32_eq(first->active->seq, second->active->seq)) {
3946 dprintf("sequence number mismatch %u<->%u\n",
3947 be32_to_cpu(first->active->seq),
3948 be32_to_cpu(second->active->seq));
3949 return 0;
3950 }
3951
3952 /*
3953 * At this point we are fairly sure that the meta data matches.
3954 * But the new disk may contain additional local data.
3955 * Add it to the super block.
3956 */
3957 max_vds = be16_to_cpu(first->active->max_vd_entries);
3958 max_pds = be16_to_cpu(first->phys->max_pdes);
3959 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3960 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3961 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3962 DDF_GUID_LEN))
3963 break;
3964 if (vl1) {
3965 if (vl1->other_bvds != NULL &&
3966 vl1->conf.sec_elmnt_seq !=
3967 vl2->conf.sec_elmnt_seq) {
3968 dprintf("adding BVD %u\n",
3969 vl2->conf.sec_elmnt_seq);
3970 add_other_bvd(vl1, &vl2->conf,
3971 first->conf_rec_len*512);
3972 }
3973 continue;
3974 }
3975
3976 if (posix_memalign((void **)&vl1, 512,
3977 (first->conf_rec_len*512 +
3978 offsetof(struct vcl, conf))) != 0) {
3979 pr_err("could not allocate vcl buf\n");
3980 return 3;
3981 }
3982
3983 vl1->next = first->conflist;
3984 vl1->block_sizes = NULL;
3985 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
3986 if (alloc_other_bvds(first, vl1) != 0) {
3987 pr_err("could not allocate other bvds\n");
3988 free(vl1);
3989 return 3;
3990 }
3991 for (vd = 0; vd < max_vds; vd++)
3992 if (!memcmp(first->virt->entries[vd].guid,
3993 vl1->conf.guid, DDF_GUID_LEN))
3994 break;
3995 vl1->vcnum = vd;
3996 dprintf("added config for VD %u\n", vl1->vcnum);
3997 first->conflist = vl1;
3998 }
3999
4000 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4001 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4002 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4003 break;
4004 if (dl1)
4005 continue;
4006
4007 if (posix_memalign((void **)&dl1, 512,
4008 sizeof(*dl1) + (first->max_part) *
4009 sizeof(dl1->vlist[0])) != 0) {
4010 pr_err("could not allocate disk info buffer\n");
4011 return 3;
4012 }
4013 memcpy(dl1, dl2, sizeof(*dl1));
4014 dl1->mdupdate = NULL;
4015 dl1->next = first->dlist;
4016 dl1->fd = -1;
4017 for (pd = 0; pd < max_pds; pd++)
4018 if (be32_eq(first->phys->entries[pd].refnum,
4019 dl1->disk.refnum))
4020 break;
4021 dl1->pdnum = pd < max_pds ? (int)pd : -1;
4022 if (dl2->spare) {
4023 if (posix_memalign((void **)&dl1->spare, 512,
4024 first->conf_rec_len*512) != 0) {
4025 pr_err("could not allocate spare info buf\n");
4026 return 3;
4027 }
4028 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4029 }
4030 for (vd = 0 ; vd < first->max_part ; vd++) {
4031 if (!dl2->vlist[vd]) {
4032 dl1->vlist[vd] = NULL;
4033 continue;
4034 }
4035 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4036 if (!memcmp(vl1->conf.guid,
4037 dl2->vlist[vd]->conf.guid,
4038 DDF_GUID_LEN))
4039 break;
4040 dl1->vlist[vd] = vl1;
4041 }
4042 }
4043 first->dlist = dl1;
4044 dprintf("added disk %d: %08x\n", dl1->pdnum,
4045 be32_to_cpu(dl1->disk.refnum));
4046 }
4047
4048 return 0;
4049 }
4050
4051 /*
4052 * A new array 'a' has been started which claims to be instance 'inst'
4053 * within container 'c'.
4054 * We need to confirm that the array matches the metadata in 'c' so
4055 * that we don't corrupt any metadata.
4056 */
4057 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4058 {
4059 struct ddf_super *ddf = c->sb;
4060 int n = atoi(inst);
4061 struct mdinfo *dev;
4062 struct dl *dl;
4063 static const char faulty[] = "faulty";
4064
4065 if (all_ff(ddf->virt->entries[n].guid)) {
4066 pr_err("subarray %d doesn't exist\n", n);
4067 return -ENODEV;
4068 }
4069 dprintf("new subarray %d, GUID: %s\n", n,
4070 guid_str(ddf->virt->entries[n].guid));
4071 for (dev = a->info.devs; dev; dev = dev->next) {
4072 for (dl = ddf->dlist; dl; dl = dl->next)
4073 if (dl->major == dev->disk.major &&
4074 dl->minor == dev->disk.minor)
4075 break;
4076 if (!dl || dl->pdnum < 0) {
4077 pr_err("device %d/%d of subarray %d not found in meta data\n",
4078 dev->disk.major, dev->disk.minor, n);
4079 return -1;
4080 }
4081 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4082 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4083 pr_err("new subarray %d contains broken device %d/%d (%02x)\n",
4084 n, dl->major, dl->minor,
4085 be16_to_cpu(ddf->phys->entries[dl->pdnum].state));
4086 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4087 sizeof(faulty) - 1)
4088 pr_err("Write to state_fd failed\n");
4089 dev->curr_state = DS_FAULTY;
4090 }
4091 }
4092 a->info.container_member = n;
4093 return 0;
4094 }
4095
4096 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4097 {
4098 /* This member array is being activated. If any devices
4099 * are missing they must now be marked as failed.
4100 */
4101 struct vd_config *vc;
4102 unsigned int n_bvd;
4103 struct vcl *vcl;
4104 struct dl *dl;
4105 int pd;
4106 int n;
4107 int state;
4108
4109 for (n = 0; ; n++) {
4110 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4111 if (!vc)
4112 break;
4113 for (dl = ddf->dlist; dl; dl = dl->next)
4114 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4115 break;
4116 if (dl)
4117 /* Found this disk, so not missing */
4118 continue;
4119
4120 /* Mark the device as failed/missing. */
4121 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4122 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4123 cpu_to_be16(DDF_Online))) {
4124 be16_clear(ddf->phys->entries[pd].state,
4125 cpu_to_be16(DDF_Online));
4126 be16_set(ddf->phys->entries[pd].state,
4127 cpu_to_be16(DDF_Failed|DDF_Missing));
4128 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4129 ddf_set_updates_pending(ddf, vc);
4130 }
4131
4132 /* Mark the array as Degraded */
4133 state = get_svd_state(ddf, vcl);
4134 if (ddf->virt->entries[inst].state !=
4135 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4136 | state)) {
4137 ddf->virt->entries[inst].state =
4138 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4139 | state;
4140 a->check_degraded = 1;
4141 ddf_set_updates_pending(ddf, vc);
4142 }
4143 }
4144 }
4145
4146 /*
4147 * The array 'a' is to be marked clean in the metadata.
4148 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4149 * clean up to the point (in sectors). If that cannot be recorded in the
4150 * metadata, then leave it as dirty.
4151 *
4152 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4153 * !global! virtual_disk.virtual_entry structure.
4154 */
4155 static int ddf_set_array_state(struct active_array *a, int consistent)
4156 {
4157 struct ddf_super *ddf = a->container->sb;
4158 int inst = a->info.container_member;
4159 int old = ddf->virt->entries[inst].state;
4160 if (consistent == 2) {
4161 handle_missing(ddf, a, inst);
4162 consistent = 1;
4163 if (!is_resync_complete(&a->info))
4164 consistent = 0;
4165 }
4166 if (consistent)
4167 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4168 else
4169 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4170 if (old != ddf->virt->entries[inst].state)
4171 ddf_set_updates_pending(ddf, NULL);
4172
4173 old = ddf->virt->entries[inst].init_state;
4174 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4175 if (is_resync_complete(&a->info))
4176 ddf->virt->entries[inst].init_state |= DDF_init_full;
4177 else if (a->info.resync_start == 0)
4178 ddf->virt->entries[inst].init_state |= DDF_init_not;
4179 else
4180 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4181 if (old != ddf->virt->entries[inst].init_state)
4182 ddf_set_updates_pending(ddf, NULL);
4183
4184 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4185 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4186 consistent?"clean":"dirty",
4187 a->info.resync_start);
4188 return consistent;
4189 }
4190
4191 static int get_bvd_state(const struct ddf_super *ddf,
4192 const struct vd_config *vc)
4193 {
4194 unsigned int i, n_bvd, working = 0;
4195 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4196 int pd, st, state;
4197 char *avail = xcalloc(1, n_prim);
4198 mdu_array_info_t array;
4199
4200 layout_ddf2md(vc, &array);
4201
4202 for (i = 0; i < n_prim; i++) {
4203 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4204 continue;
4205 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4206 if (pd < 0)
4207 continue;
4208 st = be16_to_cpu(ddf->phys->entries[pd].state);
4209 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding)) ==
4210 DDF_Online) {
4211 working++;
4212 avail[i] = 1;
4213 }
4214 }
4215
4216 state = DDF_state_degraded;
4217 if (working == n_prim)
4218 state = DDF_state_optimal;
4219 else
4220 switch (vc->prl) {
4221 case DDF_RAID0:
4222 case DDF_CONCAT:
4223 case DDF_JBOD:
4224 state = DDF_state_failed;
4225 break;
4226 case DDF_RAID1:
4227 if (working == 0)
4228 state = DDF_state_failed;
4229 else if (working >= 2)
4230 state = DDF_state_part_optimal;
4231 break;
4232 case DDF_RAID1E:
4233 if (!enough(10, n_prim, array.layout, 1, avail))
4234 state = DDF_state_failed;
4235 break;
4236 case DDF_RAID4:
4237 case DDF_RAID5:
4238 if (working < n_prim - 1)
4239 state = DDF_state_failed;
4240 break;
4241 case DDF_RAID6:
4242 if (working < n_prim - 2)
4243 state = DDF_state_failed;
4244 else if (working == n_prim - 1)
4245 state = DDF_state_part_optimal;
4246 break;
4247 }
4248 return state;
4249 }
4250
4251 static int secondary_state(int state, int other, int seclevel)
4252 {
4253 if (state == DDF_state_optimal && other == DDF_state_optimal)
4254 return DDF_state_optimal;
4255 if (seclevel == DDF_2MIRRORED) {
4256 if (state == DDF_state_optimal || other == DDF_state_optimal)
4257 return DDF_state_part_optimal;
4258 if (state == DDF_state_failed && other == DDF_state_failed)
4259 return DDF_state_failed;
4260 return DDF_state_degraded;
4261 } else {
4262 if (state == DDF_state_failed || other == DDF_state_failed)
4263 return DDF_state_failed;
4264 if (state == DDF_state_degraded || other == DDF_state_degraded)
4265 return DDF_state_degraded;
4266 return DDF_state_part_optimal;
4267 }
4268 }
4269
4270 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4271 {
4272 int state = get_bvd_state(ddf, &vcl->conf);
4273 unsigned int i;
4274 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4275 state = secondary_state(
4276 state,
4277 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4278 vcl->conf.srl);
4279 }
4280 return state;
4281 }
4282
4283 /*
4284 * The state of each disk is stored in the global phys_disk structure
4285 * in phys_disk.entries[n].state.
4286 * This makes various combinations awkward.
4287 * - When a device fails in any array, it must be failed in all arrays
4288 * that include a part of this device.
4289 * - When a component is rebuilding, we cannot include it officially in the
4290 * array unless this is the only array that uses the device.
4291 *
4292 * So: when transitioning:
4293 * Online -> failed, just set failed flag. monitor will propagate
4294 * spare -> online, the device might need to be added to the array.
4295 * spare -> failed, just set failed. Don't worry if in array or not.
4296 */
4297 static void ddf_set_disk(struct active_array *a, int n, int state)
4298 {
4299 struct ddf_super *ddf = a->container->sb;
4300 unsigned int inst = a->info.container_member, n_bvd;
4301 struct vcl *vcl;
4302 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4303 &n_bvd, &vcl);
4304 int pd;
4305 struct mdinfo *mdi;
4306 struct dl *dl;
4307 int update = 0;
4308
4309 dprintf("%d to %x\n", n, state);
4310 if (vc == NULL) {
4311 dprintf("ddf: cannot find instance %d!!\n", inst);
4312 return;
4313 }
4314 /* Find the matching slot in 'info'. */
4315 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4316 if (mdi->disk.raid_disk == n)
4317 break;
4318 if (!mdi) {
4319 pr_err("cannot find raid disk %d\n", n);
4320 return;
4321 }
4322
4323 /* and find the 'dl' entry corresponding to that. */
4324 for (dl = ddf->dlist; dl; dl = dl->next)
4325 if (mdi->state_fd >= 0 &&
4326 mdi->disk.major == dl->major &&
4327 mdi->disk.minor == dl->minor)
4328 break;
4329 if (!dl) {
4330 pr_err("cannot find raid disk %d (%d/%d)\n",
4331 n, mdi->disk.major, mdi->disk.minor);
4332 return;
4333 }
4334
4335 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4336 if (pd < 0 || pd != dl->pdnum) {
4337 /* disk doesn't currently exist or has changed.
4338 * If it is now in_sync, insert it. */
4339 dprintf("phys disk not found for %d: %d/%d ref %08x\n",
4340 dl->pdnum, dl->major, dl->minor,
4341 be32_to_cpu(dl->disk.refnum));
4342 dprintf("array %u disk %u ref %08x pd %d\n",
4343 inst, n_bvd,
4344 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4345 if ((state & DS_INSYNC) && ! (state & DS_FAULTY) &&
4346 dl->pdnum >= 0) {
4347 pd = dl->pdnum;
4348 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4349 LBA_OFFSET(ddf, vc)[n_bvd] =
4350 cpu_to_be64(mdi->data_offset);
4351 be16_clear(ddf->phys->entries[pd].type,
4352 cpu_to_be16(DDF_Global_Spare));
4353 be16_set(ddf->phys->entries[pd].type,
4354 cpu_to_be16(DDF_Active_in_VD));
4355 update = 1;
4356 }
4357 } else {
4358 be16 old = ddf->phys->entries[pd].state;
4359 if (state & DS_FAULTY)
4360 be16_set(ddf->phys->entries[pd].state,
4361 cpu_to_be16(DDF_Failed));
4362 if (state & DS_INSYNC) {
4363 be16_set(ddf->phys->entries[pd].state,
4364 cpu_to_be16(DDF_Online));
4365 be16_clear(ddf->phys->entries[pd].state,
4366 cpu_to_be16(DDF_Rebuilding));
4367 }
4368 if (!be16_eq(old, ddf->phys->entries[pd].state))
4369 update = 1;
4370 }
4371
4372 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4373 be32_to_cpu(dl->disk.refnum), state,
4374 be16_to_cpu(ddf->phys->entries[pd].state));
4375
4376 /* Now we need to check the state of the array and update
4377 * virtual_disk.entries[n].state.
4378 * It needs to be one of "optimal", "degraded", "failed".
4379 * I don't understand 'deleted' or 'missing'.
4380 */
4381 state = get_svd_state(ddf, vcl);
4382
4383 if (ddf->virt->entries[inst].state !=
4384 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4385 | state)) {
4386 ddf->virt->entries[inst].state =
4387 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4388 | state;
4389 update = 1;
4390 }
4391 if (update)
4392 ddf_set_updates_pending(ddf, vc);
4393 }
4394
4395 static void ddf_sync_metadata(struct supertype *st)
4396 {
4397 /*
4398 * Write all data to all devices.
4399 * Later, we might be able to track whether only local changes
4400 * have been made, or whether any global data has been changed,
4401 * but ddf is sufficiently weird that it probably always
4402 * changes global data ....
4403 */
4404 struct ddf_super *ddf = st->sb;
4405 if (!ddf->updates_pending)
4406 return;
4407 ddf->updates_pending = 0;
4408 __write_init_super_ddf(st);
4409 dprintf("ddf: sync_metadata\n");
4410 }
4411
4412 static int del_from_conflist(struct vcl **list, const char *guid)
4413 {
4414 struct vcl **p;
4415 int found = 0;
4416 for (p = list; p && *p; p = &((*p)->next))
4417 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4418 found = 1;
4419 *p = (*p)->next;
4420 }
4421 return found;
4422 }
4423
4424 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4425 {
4426 struct dl *dl;
4427 unsigned int vdnum, i;
4428 vdnum = find_vde_by_guid(ddf, guid);
4429 if (vdnum == DDF_NOTFOUND) {
4430 pr_err("could not find VD %s\n", guid_str(guid));
4431 return -1;
4432 }
4433 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4434 pr_err("could not find conf %s\n", guid_str(guid));
4435 return -1;
4436 }
4437 for (dl = ddf->dlist; dl; dl = dl->next)
4438 for (i = 0; i < ddf->max_part; i++)
4439 if (dl->vlist[i] != NULL &&
4440 !memcmp(dl->vlist[i]->conf.guid, guid,
4441 DDF_GUID_LEN))
4442 dl->vlist[i] = NULL;
4443 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4444 dprintf("deleted %s\n", guid_str(guid));
4445 return 0;
4446 }
4447
4448 static int kill_subarray_ddf(struct supertype *st)
4449 {
4450 struct ddf_super *ddf = st->sb;
4451 /*
4452 * currentconf is set in container_content_ddf,
4453 * called with subarray arg
4454 */
4455 struct vcl *victim = ddf->currentconf;
4456 struct vd_config *conf;
4457 unsigned int vdnum;
4458
4459 ddf->currentconf = NULL;
4460 if (!victim) {
4461 pr_err("nothing to kill\n");
4462 return -1;
4463 }
4464 conf = &victim->conf;
4465 vdnum = find_vde_by_guid(ddf, conf->guid);
4466 if (vdnum == DDF_NOTFOUND) {
4467 pr_err("could not find VD %s\n", guid_str(conf->guid));
4468 return -1;
4469 }
4470 if (st->update_tail) {
4471 struct virtual_disk *vd;
4472 int len = sizeof(struct virtual_disk)
4473 + sizeof(struct virtual_entry);
4474 vd = xmalloc(len);
4475 if (vd == NULL) {
4476 pr_err("failed to allocate %d bytes\n", len);
4477 return -1;
4478 }
4479 memset(vd, 0 , len);
4480 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4481 vd->populated_vdes = cpu_to_be16(0);
4482 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4483 /* we use DDF_state_deleted as marker */
4484 vd->entries[0].state = DDF_state_deleted;
4485 append_metadata_update(st, vd, len);
4486 } else {
4487 _kill_subarray_ddf(ddf, conf->guid);
4488 ddf_set_updates_pending(ddf, NULL);
4489 ddf_sync_metadata(st);
4490 }
4491 return 0;
4492 }
4493
4494 static void copy_matching_bvd(struct ddf_super *ddf,
4495 struct vd_config *conf,
4496 const struct metadata_update *update)
4497 {
4498 unsigned int mppe =
4499 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4500 unsigned int len = ddf->conf_rec_len * 512;
4501 char *p;
4502 struct vd_config *vc;
4503 for (p = update->buf; p < update->buf + update->len; p += len) {
4504 vc = (struct vd_config *) p;
4505 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4506 memcpy(conf->phys_refnum, vc->phys_refnum,
4507 mppe * (sizeof(__u32) + sizeof(__u64)));
4508 return;
4509 }
4510 }
4511 pr_err("no match for BVD %d of %s in update\n",
4512 conf->sec_elmnt_seq, guid_str(conf->guid));
4513 }
4514
4515 static void ddf_process_phys_update(struct supertype *st,
4516 struct metadata_update *update)
4517 {
4518 struct ddf_super *ddf = st->sb;
4519 struct phys_disk *pd;
4520 unsigned int ent;
4521
4522 pd = (struct phys_disk*)update->buf;
4523 ent = be16_to_cpu(pd->used_pdes);
4524 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4525 return;
4526 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4527 struct dl **dlp;
4528 /* removing this disk. */
4529 be16_set(ddf->phys->entries[ent].state,
4530 cpu_to_be16(DDF_Missing));
4531 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4532 struct dl *dl = *dlp;
4533 if (dl->pdnum == (signed)ent) {
4534 close(dl->fd);
4535 dl->fd = -1;
4536 *dlp = dl->next;
4537 update->space = dl->devname;
4538 *(void**)dl = update->space_list;
4539 update->space_list = (void**)dl;
4540 break;
4541 }
4542 }
4543 ddf_set_updates_pending(ddf, NULL);
4544 return;
4545 }
4546 if (!all_ff(ddf->phys->entries[ent].guid))
4547 return;
4548 ddf->phys->entries[ent] = pd->entries[0];
4549 ddf->phys->used_pdes = cpu_to_be16
4550 (1 + be16_to_cpu(ddf->phys->used_pdes));
4551 ddf_set_updates_pending(ddf, NULL);
4552 if (ddf->add_list) {
4553 struct active_array *a;
4554 struct dl *al = ddf->add_list;
4555 ddf->add_list = al->next;
4556
4557 al->next = ddf->dlist;
4558 ddf->dlist = al;
4559
4560 /* As a device has been added, we should check
4561 * for any degraded devices that might make
4562 * use of this spare */
4563 for (a = st->arrays ; a; a=a->next)
4564 a->check_degraded = 1;
4565 }
4566 }
4567
4568 static void ddf_process_virt_update(struct supertype *st,
4569 struct metadata_update *update)
4570 {
4571 struct ddf_super *ddf = st->sb;
4572 struct virtual_disk *vd;
4573 unsigned int ent;
4574
4575 vd = (struct virtual_disk*)update->buf;
4576
4577 if (vd->entries[0].state == DDF_state_deleted) {
4578 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4579 return;
4580 } else {
4581 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4582 if (ent != DDF_NOTFOUND) {
4583 dprintf("VD %s exists already in slot %d\n",
4584 guid_str(vd->entries[0].guid),
4585 ent);
4586 return;
4587 }
4588 ent = find_unused_vde(ddf);
4589 if (ent == DDF_NOTFOUND)
4590 return;
4591 ddf->virt->entries[ent] = vd->entries[0];
4592 ddf->virt->populated_vdes =
4593 cpu_to_be16(
4594 1 + be16_to_cpu(
4595 ddf->virt->populated_vdes));
4596 dprintf("added VD %s in slot %d(s=%02x i=%02x)\n",
4597 guid_str(vd->entries[0].guid), ent,
4598 ddf->virt->entries[ent].state,
4599 ddf->virt->entries[ent].init_state);
4600 }
4601 ddf_set_updates_pending(ddf, NULL);
4602 }
4603
4604 static void ddf_remove_failed(struct ddf_super *ddf)
4605 {
4606 /* Now remove any 'Failed' devices that are not part
4607 * of any VD. They will have the Transition flag set.
4608 * Once done, we need to update all dl->pdnum numbers.
4609 */
4610 unsigned int pdnum;
4611 unsigned int pd2 = 0;
4612 struct dl *dl;
4613
4614 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4615 pdnum++) {
4616 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4617 0xFFFFFFFF)
4618 continue;
4619 if (be16_and(ddf->phys->entries[pdnum].state,
4620 cpu_to_be16(DDF_Failed)) &&
4621 be16_and(ddf->phys->entries[pdnum].state,
4622 cpu_to_be16(DDF_Transition))) {
4623 /* skip this one unless in dlist*/
4624 for (dl = ddf->dlist; dl; dl = dl->next)
4625 if (dl->pdnum == (int)pdnum)
4626 break;
4627 if (!dl)
4628 continue;
4629 }
4630 if (pdnum == pd2)
4631 pd2++;
4632 else {
4633 ddf->phys->entries[pd2] =
4634 ddf->phys->entries[pdnum];
4635 for (dl = ddf->dlist; dl; dl = dl->next)
4636 if (dl->pdnum == (int)pdnum)
4637 dl->pdnum = pd2;
4638 pd2++;
4639 }
4640 }
4641 ddf->phys->used_pdes = cpu_to_be16(pd2);
4642 while (pd2 < pdnum) {
4643 memset(ddf->phys->entries[pd2].guid, 0xff,
4644 DDF_GUID_LEN);
4645 pd2++;
4646 }
4647 }
4648
4649 static void ddf_update_vlist(struct ddf_super *ddf, struct dl *dl)
4650 {
4651 struct vcl *vcl;
4652 unsigned int vn = 0;
4653 int in_degraded = 0;
4654
4655 if (dl->pdnum < 0)
4656 return;
4657 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4658 unsigned int dn, ibvd;
4659 const struct vd_config *conf;
4660 int vstate;
4661 dn = get_pd_index_from_refnum(vcl,
4662 dl->disk.refnum,
4663 ddf->mppe,
4664 &conf, &ibvd);
4665 if (dn == DDF_NOTFOUND)
4666 continue;
4667 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4668 dl->pdnum,
4669 be32_to_cpu(dl->disk.refnum),
4670 guid_str(conf->guid),
4671 conf->sec_elmnt_seq, vn);
4672 /* Clear the Transition flag */
4673 if (be16_and
4674 (ddf->phys->entries[dl->pdnum].state,
4675 cpu_to_be16(DDF_Failed)))
4676 be16_clear(ddf->phys
4677 ->entries[dl->pdnum].state,
4678 cpu_to_be16(DDF_Transition));
4679 dl->vlist[vn++] = vcl;
4680 vstate = ddf->virt->entries[vcl->vcnum].state
4681 & DDF_state_mask;
4682 if (vstate == DDF_state_degraded ||
4683 vstate == DDF_state_part_optimal)
4684 in_degraded = 1;
4685 }
4686 while (vn < ddf->max_part)
4687 dl->vlist[vn++] = NULL;
4688 if (dl->vlist[0]) {
4689 be16_clear(ddf->phys->entries[dl->pdnum].type,
4690 cpu_to_be16(DDF_Global_Spare));
4691 if (!be16_and(ddf->phys
4692 ->entries[dl->pdnum].type,
4693 cpu_to_be16(DDF_Active_in_VD))) {
4694 be16_set(ddf->phys
4695 ->entries[dl->pdnum].type,
4696 cpu_to_be16(DDF_Active_in_VD));
4697 if (in_degraded)
4698 be16_set(ddf->phys
4699 ->entries[dl->pdnum]
4700 .state,
4701 cpu_to_be16
4702 (DDF_Rebuilding));
4703 }
4704 }
4705 if (dl->spare) {
4706 be16_clear(ddf->phys->entries[dl->pdnum].type,
4707 cpu_to_be16(DDF_Global_Spare));
4708 be16_set(ddf->phys->entries[dl->pdnum].type,
4709 cpu_to_be16(DDF_Spare));
4710 }
4711 if (!dl->vlist[0] && !dl->spare) {
4712 be16_set(ddf->phys->entries[dl->pdnum].type,
4713 cpu_to_be16(DDF_Global_Spare));
4714 be16_clear(ddf->phys->entries[dl->pdnum].type,
4715 cpu_to_be16(DDF_Spare));
4716 be16_clear(ddf->phys->entries[dl->pdnum].type,
4717 cpu_to_be16(DDF_Active_in_VD));
4718 }
4719 }
4720
4721 static void ddf_process_conf_update(struct supertype *st,
4722 struct metadata_update *update)
4723 {
4724 struct ddf_super *ddf = st->sb;
4725 struct vd_config *vc;
4726 struct vcl *vcl;
4727 struct dl *dl;
4728 unsigned int ent;
4729 unsigned int pdnum, len;
4730
4731 vc = (struct vd_config*)update->buf;
4732 len = ddf->conf_rec_len * 512;
4733 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4734 pr_err("%s: insufficient data (%d) for %u BVDs\n",
4735 guid_str(vc->guid), update->len,
4736 vc->sec_elmnt_count);
4737 return;
4738 }
4739 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4740 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4741 break;
4742 dprintf("conf update for %s (%s)\n",
4743 guid_str(vc->guid), (vcl ? "old" : "new"));
4744 if (vcl) {
4745 /* An update, just copy the phys_refnum and lba_offset
4746 * fields
4747 */
4748 unsigned int i;
4749 unsigned int k;
4750 copy_matching_bvd(ddf, &vcl->conf, update);
4751 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4752 dprintf("BVD %u has %08x at %llu\n", 0,
4753 be32_to_cpu(vcl->conf.phys_refnum[k]),
4754 be64_to_cpu(LBA_OFFSET(ddf,
4755 &vcl->conf)[k]));
4756 for (i = 1; i < vc->sec_elmnt_count; i++) {
4757 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4758 update);
4759 for (k = 0; k < be16_to_cpu(
4760 vc->prim_elmnt_count); k++)
4761 dprintf("BVD %u has %08x at %llu\n", i,
4762 be32_to_cpu
4763 (vcl->other_bvds[i-1]->
4764 phys_refnum[k]),
4765 be64_to_cpu
4766 (LBA_OFFSET
4767 (ddf,
4768 vcl->other_bvds[i-1])[k]));
4769 }
4770 } else {
4771 /* A new VD_CONF */
4772 unsigned int i;
4773 if (!update->space)
4774 return;
4775 vcl = update->space;
4776 update->space = NULL;
4777 vcl->next = ddf->conflist;
4778 memcpy(&vcl->conf, vc, len);
4779 ent = find_vde_by_guid(ddf, vc->guid);
4780 if (ent == DDF_NOTFOUND)
4781 return;
4782 vcl->vcnum = ent;
4783 ddf->conflist = vcl;
4784 for (i = 1; i < vc->sec_elmnt_count; i++)
4785 memcpy(vcl->other_bvds[i-1],
4786 update->buf + len * i, len);
4787 }
4788 /* Set DDF_Transition on all Failed devices - to help
4789 * us detect those that are no longer in use
4790 */
4791 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4792 pdnum++)
4793 if (be16_and(ddf->phys->entries[pdnum].state,
4794 cpu_to_be16(DDF_Failed)))
4795 be16_set(ddf->phys->entries[pdnum].state,
4796 cpu_to_be16(DDF_Transition));
4797
4798 /* Now make sure vlist is correct for each dl. */
4799 for (dl = ddf->dlist; dl; dl = dl->next)
4800 ddf_update_vlist(ddf, dl);
4801 ddf_remove_failed(ddf);
4802
4803 ddf_set_updates_pending(ddf, vc);
4804 }
4805
4806 static void ddf_process_update(struct supertype *st,
4807 struct metadata_update *update)
4808 {
4809 /* Apply this update to the metadata.
4810 * The first 4 bytes are a DDF_*_MAGIC which guides
4811 * our actions.
4812 * Possible update are:
4813 * DDF_PHYS_RECORDS_MAGIC
4814 * Add a new physical device or remove an old one.
4815 * Changes to this record only happen implicitly.
4816 * used_pdes is the device number.
4817 * DDF_VIRT_RECORDS_MAGIC
4818 * Add a new VD. Possibly also change the 'access' bits.
4819 * populated_vdes is the entry number.
4820 * DDF_VD_CONF_MAGIC
4821 * New or updated VD. the VIRT_RECORD must already
4822 * exist. For an update, phys_refnum and lba_offset
4823 * (at least) are updated, and the VD_CONF must
4824 * be written to precisely those devices listed with
4825 * a phys_refnum.
4826 * DDF_SPARE_ASSIGN_MAGIC
4827 * replacement Spare Assignment Record... but for which device?
4828 *
4829 * So, e.g.:
4830 * - to create a new array, we send a VIRT_RECORD and
4831 * a VD_CONF. Then assemble and start the array.
4832 * - to activate a spare we send a VD_CONF to add the phys_refnum
4833 * and offset. This will also mark the spare as active with
4834 * a spare-assignment record.
4835 */
4836 be32 *magic = (be32 *)update->buf;
4837
4838 dprintf("Process update %x\n", be32_to_cpu(*magic));
4839
4840 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4841 if (update->len == (sizeof(struct phys_disk) +
4842 sizeof(struct phys_disk_entry)))
4843 ddf_process_phys_update(st, update);
4844 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4845 if (update->len == (sizeof(struct virtual_disk) +
4846 sizeof(struct virtual_entry)))
4847 ddf_process_virt_update(st, update);
4848 } else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4849 ddf_process_conf_update(st, update);
4850 }
4851 /* case DDF_SPARE_ASSIGN_MAGIC */
4852 }
4853
4854 static int ddf_prepare_update(struct supertype *st,
4855 struct metadata_update *update)
4856 {
4857 /* This update arrived at managemon.
4858 * We are about to pass it to monitor.
4859 * If a malloc is needed, do it here.
4860 */
4861 struct ddf_super *ddf = st->sb;
4862 be32 *magic;
4863 if (update->len < 4)
4864 return 0;
4865 magic = (be32 *)update->buf;
4866 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4867 struct vcl *vcl;
4868 struct vd_config *conf;
4869 if (update->len < (int)sizeof(*conf))
4870 return 0;
4871 conf = (struct vd_config *) update->buf;
4872 if (posix_memalign(&update->space, 512,
4873 offsetof(struct vcl, conf)
4874 + ddf->conf_rec_len * 512) != 0) {
4875 update->space = NULL;
4876 return 0;
4877 }
4878 vcl = update->space;
4879 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4880 if (alloc_other_bvds(ddf, vcl) != 0) {
4881 free(update->space);
4882 update->space = NULL;
4883 return 0;
4884 }
4885 }
4886 return 1;
4887 }
4888
4889 /*
4890 * Check degraded state of a RAID10.
4891 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4892 */
4893 static int raid10_degraded(struct mdinfo *info)
4894 {
4895 int n_prim, n_bvds;
4896 int i;
4897 struct mdinfo *d;
4898 char *found;
4899 int ret = -1;
4900
4901 n_prim = info->array.layout & ~0x100;
4902 n_bvds = info->array.raid_disks / n_prim;
4903 found = xmalloc(n_bvds);
4904 if (found == NULL)
4905 return ret;
4906 memset(found, 0, n_bvds);
4907 for (d = info->devs; d; d = d->next) {
4908 i = d->disk.raid_disk / n_prim;
4909 if (i >= n_bvds) {
4910 pr_err("BUG: invalid raid disk\n");
4911 goto out;
4912 }
4913 if (d->state_fd > 0)
4914 found[i]++;
4915 }
4916 ret = 2;
4917 for (i = 0; i < n_bvds; i++)
4918 if (!found[i]) {
4919 dprintf("BVD %d/%d failed\n", i, n_bvds);
4920 ret = 0;
4921 goto out;
4922 } else if (found[i] < n_prim) {
4923 dprintf("BVD %d/%d degraded\n", i, n_bvds);
4924 ret = 1;
4925 }
4926 out:
4927 free(found);
4928 return ret;
4929 }
4930
4931 /*
4932 * Check if the array 'a' is degraded but not failed.
4933 * If it is, find as many spares as are available and needed and
4934 * arrange for their inclusion.
4935 * We only choose devices which are not already in the array,
4936 * and prefer those with a spare-assignment to this array.
4937 * Otherwise we choose global spares - assuming always that
4938 * there is enough room.
4939 * For each spare that we assign, we return an 'mdinfo' which
4940 * describes the position for the device in the array.
4941 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4942 * the new phys_refnum and lba_offset values.
4943 *
4944 * Only worry about BVDs at the moment.
4945 */
4946 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4947 struct metadata_update **updates)
4948 {
4949 int working = 0;
4950 struct mdinfo *d;
4951 struct ddf_super *ddf = a->container->sb;
4952 int global_ok = 0;
4953 struct mdinfo *rv = NULL;
4954 struct mdinfo *di;
4955 struct metadata_update *mu;
4956 struct dl *dl;
4957 int i;
4958 unsigned int j;
4959 struct vcl *vcl;
4960 struct vd_config *vc;
4961 unsigned int n_bvd;
4962
4963 for (d = a->info.devs ; d ; d = d->next) {
4964 if ((d->curr_state & DS_FAULTY) &&
4965 d->state_fd >= 0)
4966 /* wait for Removal to happen */
4967 return NULL;
4968 if (d->state_fd >= 0)
4969 working ++;
4970 }
4971
4972 dprintf("working=%d (%d) level=%d\n", working,
4973 a->info.array.raid_disks,
4974 a->info.array.level);
4975 if (working == a->info.array.raid_disks)
4976 return NULL; /* array not degraded */
4977 switch (a->info.array.level) {
4978 case 1:
4979 if (working == 0)
4980 return NULL; /* failed */
4981 break;
4982 case 4:
4983 case 5:
4984 if (working < a->info.array.raid_disks - 1)
4985 return NULL; /* failed */
4986 break;
4987 case 6:
4988 if (working < a->info.array.raid_disks - 2)
4989 return NULL; /* failed */
4990 break;
4991 case 10:
4992 if (raid10_degraded(&a->info) < 1)
4993 return NULL;
4994 break;
4995 default: /* concat or stripe */
4996 return NULL; /* failed */
4997 }
4998
4999 /* For each slot, if it is not working, find a spare */
5000 dl = ddf->dlist;
5001 for (i = 0; i < a->info.array.raid_disks; i++) {
5002 for (d = a->info.devs ; d ; d = d->next)
5003 if (d->disk.raid_disk == i)
5004 break;
5005 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
5006 if (d && (d->state_fd >= 0))
5007 continue;
5008
5009 /* OK, this device needs recovery. Find a spare */
5010 again:
5011 for ( ; dl ; dl = dl->next) {
5012 unsigned long long esize;
5013 unsigned long long pos;
5014 struct mdinfo *d2;
5015 int is_global = 0;
5016 int is_dedicated = 0;
5017 be16 state;
5018
5019 if (dl->pdnum < 0)
5020 continue;
5021 state = ddf->phys->entries[dl->pdnum].state;
5022 if (be16_and(state,
5023 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
5024 !be16_and(state,
5025 cpu_to_be16(DDF_Online)))
5026 continue;
5027
5028 /* If in this array, skip */
5029 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
5030 if (d2->state_fd >= 0 &&
5031 d2->disk.major == dl->major &&
5032 d2->disk.minor == dl->minor) {
5033 dprintf("%x:%x (%08x) already in array\n",
5034 dl->major, dl->minor,
5035 be32_to_cpu(dl->disk.refnum));
5036 break;
5037 }
5038 if (d2)
5039 continue;
5040 if (be16_and(ddf->phys->entries[dl->pdnum].type,
5041 cpu_to_be16(DDF_Spare))) {
5042 /* Check spare assign record */
5043 if (dl->spare) {
5044 if (dl->spare->type & DDF_spare_dedicated) {
5045 /* check spare_ents for guid */
5046 unsigned int j;
5047 for (j = 0 ;
5048 j < be16_to_cpu
5049 (dl->spare
5050 ->populated);
5051 j++) {
5052 if (memcmp(dl->spare->spare_ents[j].guid,
5053 ddf->virt->entries[a->info.container_member].guid,
5054 DDF_GUID_LEN) == 0)
5055 is_dedicated = 1;
5056 }
5057 } else
5058 is_global = 1;
5059 }
5060 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5061 cpu_to_be16(DDF_Global_Spare))) {
5062 is_global = 1;
5063 } else if (!be16_and(ddf->phys
5064 ->entries[dl->pdnum].state,
5065 cpu_to_be16(DDF_Failed))) {
5066 /* we can possibly use some of this */
5067 is_global = 1;
5068 }
5069 if ( ! (is_dedicated ||
5070 (is_global && global_ok))) {
5071 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5072 is_dedicated, is_global);
5073 continue;
5074 }
5075
5076 /* We are allowed to use this device - is there space?
5077 * We need a->info.component_size sectors */
5078 esize = a->info.component_size;
5079 pos = find_space(ddf, dl, INVALID_SECTORS, &esize);
5080
5081 if (esize < a->info.component_size) {
5082 dprintf("%x:%x has no room: %llu %llu\n",
5083 dl->major, dl->minor,
5084 esize, a->info.component_size);
5085 /* No room */
5086 continue;
5087 }
5088
5089 /* Cool, we have a device with some space at pos */
5090 di = xcalloc(1, sizeof(*di));
5091 di->disk.number = i;
5092 di->disk.raid_disk = i;
5093 di->disk.major = dl->major;
5094 di->disk.minor = dl->minor;
5095 di->disk.state = 0;
5096 di->recovery_start = 0;
5097 di->data_offset = pos;
5098 di->component_size = a->info.component_size;
5099 di->next = rv;
5100 rv = di;
5101 dprintf("%x:%x (%08x) to be %d at %llu\n",
5102 dl->major, dl->minor,
5103 be32_to_cpu(dl->disk.refnum), i, pos);
5104
5105 break;
5106 }
5107 if (!dl && ! global_ok) {
5108 /* not enough dedicated spares, try global */
5109 global_ok = 1;
5110 dl = ddf->dlist;
5111 goto again;
5112 }
5113 }
5114
5115 if (!rv)
5116 /* No spares found */
5117 return rv;
5118 /* Now 'rv' has a list of devices to return.
5119 * Create a metadata_update record to update the
5120 * phys_refnum and lba_offset values
5121 */
5122 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5123 &n_bvd, &vcl);
5124 if (vc == NULL)
5125 return NULL;
5126
5127 mu = xmalloc(sizeof(*mu));
5128 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5129 free(mu);
5130 mu = NULL;
5131 }
5132
5133 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5134 mu->buf = xmalloc(mu->len);
5135 mu->space = NULL;
5136 mu->space_list = NULL;
5137 mu->next = *updates;
5138 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5139 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5140 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5141 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5142
5143 vc = (struct vd_config*)mu->buf;
5144 for (di = rv ; di ; di = di->next) {
5145 unsigned int i_sec, i_prim;
5146 i_sec = di->disk.raid_disk
5147 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5148 i_prim = di->disk.raid_disk
5149 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5150 vc = (struct vd_config *)(mu->buf
5151 + i_sec * ddf->conf_rec_len * 512);
5152 for (dl = ddf->dlist; dl; dl = dl->next)
5153 if (dl->major == di->disk.major &&
5154 dl->minor == di->disk.minor)
5155 break;
5156 if (!dl || dl->pdnum < 0) {
5157 pr_err("BUG: can't find disk %d (%d/%d)\n",
5158 di->disk.raid_disk,
5159 di->disk.major, di->disk.minor);
5160 return NULL;
5161 }
5162 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5163 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5164 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5165 be32_to_cpu(vc->phys_refnum[i_prim]),
5166 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5167 }
5168 *updates = mu;
5169 return rv;
5170 }
5171
5172 static int ddf_level_to_layout(int level)
5173 {
5174 switch(level) {
5175 case 0:
5176 case 1:
5177 return 0;
5178 case 5:
5179 return ALGORITHM_LEFT_SYMMETRIC;
5180 case 6:
5181 return ALGORITHM_ROTATING_N_CONTINUE;
5182 case 10:
5183 return 0x102;
5184 default:
5185 return UnSet;
5186 }
5187 }
5188
5189 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5190 {
5191 if (level && *level == UnSet)
5192 *level = LEVEL_CONTAINER;
5193
5194 if (level && layout && *layout == UnSet)
5195 *layout = ddf_level_to_layout(*level);
5196 }
5197
5198 struct superswitch super_ddf = {
5199 .examine_super = examine_super_ddf,
5200 .brief_examine_super = brief_examine_super_ddf,
5201 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5202 .export_examine_super = export_examine_super_ddf,
5203 .detail_super = detail_super_ddf,
5204 .brief_detail_super = brief_detail_super_ddf,
5205 .validate_geometry = validate_geometry_ddf,
5206 .write_init_super = write_init_super_ddf,
5207 .add_to_super = add_to_super_ddf,
5208 .remove_from_super = remove_from_super_ddf,
5209 .load_container = load_container_ddf,
5210 .copy_metadata = copy_metadata_ddf,
5211 .kill_subarray = kill_subarray_ddf,
5212 .match_home = match_home_ddf,
5213 .uuid_from_super= uuid_from_super_ddf,
5214 .getinfo_super = getinfo_super_ddf,
5215 .update_super = update_super_ddf,
5216
5217 .avail_size = avail_size_ddf,
5218
5219 .compare_super = compare_super_ddf,
5220
5221 .load_super = load_super_ddf,
5222 .init_super = init_super_ddf,
5223 .store_super = store_super_ddf,
5224 .free_super = free_super_ddf,
5225 .match_metadata_desc = match_metadata_desc_ddf,
5226 .container_content = container_content_ddf,
5227 .default_geometry = default_geometry_ddf,
5228
5229 .external = 1,
5230
5231 /* for mdmon */
5232 .open_new = ddf_open_new,
5233 .set_array_state= ddf_set_array_state,
5234 .set_disk = ddf_set_disk,
5235 .sync_metadata = ddf_sync_metadata,
5236 .process_update = ddf_process_update,
5237 .prepare_update = ddf_prepare_update,
5238 .activate_spare = ddf_activate_spare,
5239 .name = "ddf",
5240 };