]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
DDF: update timestamp in DDF header.
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33
34 /* a non-official T10 name for creation GUIDs */
35 static char T10[] = "Linux-MD";
36
37 /* DDF timestamps are 1980 based, so we need to add
38 * second-in-decade-of-seventies to convert to linux timestamps.
39 * 10 years with 2 leap years.
40 */
41 #define DECADE (3600*24*(365*10+2))
42 unsigned long crc32(
43 unsigned long crc,
44 const unsigned char *buf,
45 unsigned len);
46
47 #define DDF_NOTFOUND (~0U)
48 #define DDF_CONTAINER (DDF_NOTFOUND-1)
49
50 /* Default for safe_mode_delay. Same value as for IMSM.
51 */
52 static const int DDF_SAFE_MODE_DELAY = 4000;
53
54 /* The DDF metadata handling.
55 * DDF metadata lives at the end of the device.
56 * The last 512 byte block provides an 'anchor' which is used to locate
57 * the rest of the metadata which usually lives immediately behind the anchor.
58 *
59 * Note:
60 * - all multibyte numeric fields are bigendian.
61 * - all strings are space padded.
62 *
63 */
64
65 typedef struct __be16 {
66 __u16 _v16;
67 } be16;
68 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
69 #define be16_and(x, y) ((x)._v16 & (y)._v16)
70 #define be16_or(x, y) ((x)._v16 | (y)._v16)
71 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
72 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
73
74 typedef struct __be32 {
75 __u32 _v32;
76 } be32;
77 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
78
79 typedef struct __be64 {
80 __u64 _v64;
81 } be64;
82 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
83
84 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
85 static inline be16 cpu_to_be16(__u16 x)
86 {
87 be16 be = { ._v16 = __cpu_to_be16(x) };
88 return be;
89 }
90
91 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
92 static inline be32 cpu_to_be32(__u32 x)
93 {
94 be32 be = { ._v32 = __cpu_to_be32(x) };
95 return be;
96 }
97
98 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
99 static inline be64 cpu_to_be64(__u64 x)
100 {
101 be64 be = { ._v64 = __cpu_to_be64(x) };
102 return be;
103 }
104
105 /* Primary Raid Level (PRL) */
106 #define DDF_RAID0 0x00
107 #define DDF_RAID1 0x01
108 #define DDF_RAID3 0x03
109 #define DDF_RAID4 0x04
110 #define DDF_RAID5 0x05
111 #define DDF_RAID1E 0x11
112 #define DDF_JBOD 0x0f
113 #define DDF_CONCAT 0x1f
114 #define DDF_RAID5E 0x15
115 #define DDF_RAID5EE 0x25
116 #define DDF_RAID6 0x06
117
118 /* Raid Level Qualifier (RLQ) */
119 #define DDF_RAID0_SIMPLE 0x00
120 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
121 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
122 #define DDF_RAID3_0 0x00 /* parity in first extent */
123 #define DDF_RAID3_N 0x01 /* parity in last extent */
124 #define DDF_RAID4_0 0x00 /* parity in first extent */
125 #define DDF_RAID4_N 0x01 /* parity in last extent */
126 /* these apply to raid5e and raid5ee as well */
127 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
128 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
129 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
130 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
131
132 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
133 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
134
135 /* Secondary RAID Level (SRL) */
136 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
137 #define DDF_2MIRRORED 0x01
138 #define DDF_2CONCAT 0x02
139 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
140
141 /* Magic numbers */
142 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
143 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
144 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
145 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
146 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
147 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
148 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
149 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
150 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
151 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
152
153 #define DDF_GUID_LEN 24
154 #define DDF_REVISION_0 "01.00.00"
155 #define DDF_REVISION_2 "01.02.00"
156
157 struct ddf_header {
158 be32 magic; /* DDF_HEADER_MAGIC */
159 be32 crc;
160 char guid[DDF_GUID_LEN];
161 char revision[8]; /* 01.02.00 */
162 be32 seq; /* starts at '1' */
163 be32 timestamp;
164 __u8 openflag;
165 __u8 foreignflag;
166 __u8 enforcegroups;
167 __u8 pad0; /* 0xff */
168 __u8 pad1[12]; /* 12 * 0xff */
169 /* 64 bytes so far */
170 __u8 header_ext[32]; /* reserved: fill with 0xff */
171 be64 primary_lba;
172 be64 secondary_lba;
173 __u8 type;
174 __u8 pad2[3]; /* 0xff */
175 be32 workspace_len; /* sectors for vendor space -
176 * at least 32768(sectors) */
177 be64 workspace_lba;
178 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
179 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
180 be16 max_partitions; /* i.e. max num of configuration
181 record entries per disk */
182 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
183 *12/512) */
184 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
185 __u8 pad3[54]; /* 0xff */
186 /* 192 bytes so far */
187 be32 controller_section_offset;
188 be32 controller_section_length;
189 be32 phys_section_offset;
190 be32 phys_section_length;
191 be32 virt_section_offset;
192 be32 virt_section_length;
193 be32 config_section_offset;
194 be32 config_section_length;
195 be32 data_section_offset;
196 be32 data_section_length;
197 be32 bbm_section_offset;
198 be32 bbm_section_length;
199 be32 diag_space_offset;
200 be32 diag_space_length;
201 be32 vendor_offset;
202 be32 vendor_length;
203 /* 256 bytes so far */
204 __u8 pad4[256]; /* 0xff */
205 };
206
207 /* type field */
208 #define DDF_HEADER_ANCHOR 0x00
209 #define DDF_HEADER_PRIMARY 0x01
210 #define DDF_HEADER_SECONDARY 0x02
211
212 /* The content of the 'controller section' - global scope */
213 struct ddf_controller_data {
214 be32 magic; /* DDF_CONTROLLER_MAGIC */
215 be32 crc;
216 char guid[DDF_GUID_LEN];
217 struct controller_type {
218 be16 vendor_id;
219 be16 device_id;
220 be16 sub_vendor_id;
221 be16 sub_device_id;
222 } type;
223 char product_id[16];
224 __u8 pad[8]; /* 0xff */
225 __u8 vendor_data[448];
226 };
227
228 /* The content of phys_section - global scope */
229 struct phys_disk {
230 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
231 be32 crc;
232 be16 used_pdes; /* This is a counter, not a max - the list
233 * of used entries may not be dense */
234 be16 max_pdes;
235 __u8 pad[52];
236 struct phys_disk_entry {
237 char guid[DDF_GUID_LEN];
238 be32 refnum;
239 be16 type;
240 be16 state;
241 be64 config_size; /* DDF structures must be after here */
242 char path[18]; /* Another horrible structure really
243 * but is "used for information
244 * purposes only" */
245 __u8 pad[6];
246 } entries[0];
247 };
248
249 /* phys_disk_entry.type is a bitmap - bigendian remember */
250 #define DDF_Forced_PD_GUID 1
251 #define DDF_Active_in_VD 2
252 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
253 #define DDF_Spare 8 /* overrides Global_spare */
254 #define DDF_Foreign 16
255 #define DDF_Legacy 32 /* no DDF on this device */
256
257 #define DDF_Interface_mask 0xf00
258 #define DDF_Interface_SCSI 0x100
259 #define DDF_Interface_SAS 0x200
260 #define DDF_Interface_SATA 0x300
261 #define DDF_Interface_FC 0x400
262
263 /* phys_disk_entry.state is a bigendian bitmap */
264 #define DDF_Online 1
265 #define DDF_Failed 2 /* overrides 1,4,8 */
266 #define DDF_Rebuilding 4
267 #define DDF_Transition 8
268 #define DDF_SMART 16
269 #define DDF_ReadErrors 32
270 #define DDF_Missing 64
271
272 /* The content of the virt_section global scope */
273 struct virtual_disk {
274 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
275 be32 crc;
276 be16 populated_vdes;
277 be16 max_vdes;
278 __u8 pad[52];
279 struct virtual_entry {
280 char guid[DDF_GUID_LEN];
281 be16 unit;
282 __u16 pad0; /* 0xffff */
283 be16 guid_crc;
284 be16 type;
285 __u8 state;
286 __u8 init_state;
287 __u8 pad1[14];
288 char name[16];
289 } entries[0];
290 };
291
292 /* virtual_entry.type is a bitmap - bigendian */
293 #define DDF_Shared 1
294 #define DDF_Enforce_Groups 2
295 #define DDF_Unicode 4
296 #define DDF_Owner_Valid 8
297
298 /* virtual_entry.state is a bigendian bitmap */
299 #define DDF_state_mask 0x7
300 #define DDF_state_optimal 0x0
301 #define DDF_state_degraded 0x1
302 #define DDF_state_deleted 0x2
303 #define DDF_state_missing 0x3
304 #define DDF_state_failed 0x4
305 #define DDF_state_part_optimal 0x5
306
307 #define DDF_state_morphing 0x8
308 #define DDF_state_inconsistent 0x10
309
310 /* virtual_entry.init_state is a bigendian bitmap */
311 #define DDF_initstate_mask 0x03
312 #define DDF_init_not 0x00
313 #define DDF_init_quick 0x01 /* initialisation is progress.
314 * i.e. 'state_inconsistent' */
315 #define DDF_init_full 0x02
316
317 #define DDF_access_mask 0xc0
318 #define DDF_access_rw 0x00
319 #define DDF_access_ro 0x80
320 #define DDF_access_blocked 0xc0
321
322 /* The content of the config_section - local scope
323 * It has multiple records each config_record_len sectors
324 * They can be vd_config or spare_assign
325 */
326
327 struct vd_config {
328 be32 magic; /* DDF_VD_CONF_MAGIC */
329 be32 crc;
330 char guid[DDF_GUID_LEN];
331 be32 timestamp;
332 be32 seqnum;
333 __u8 pad0[24];
334 be16 prim_elmnt_count;
335 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
336 __u8 prl;
337 __u8 rlq;
338 __u8 sec_elmnt_count;
339 __u8 sec_elmnt_seq;
340 __u8 srl;
341 be64 blocks; /* blocks per component could be different
342 * on different component devices...(only
343 * for concat I hope) */
344 be64 array_blocks; /* blocks in array */
345 __u8 pad1[8];
346 be32 spare_refs[8];
347 __u8 cache_pol[8];
348 __u8 bg_rate;
349 __u8 pad2[3];
350 __u8 pad3[52];
351 __u8 pad4[192];
352 __u8 v0[32]; /* reserved- 0xff */
353 __u8 v1[32]; /* reserved- 0xff */
354 __u8 v2[16]; /* reserved- 0xff */
355 __u8 v3[16]; /* reserved- 0xff */
356 __u8 vendor[32];
357 be32 phys_refnum[0]; /* refnum of each disk in sequence */
358 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
359 bvd are always the same size */
360 };
361 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
362
363 /* vd_config.cache_pol[7] is a bitmap */
364 #define DDF_cache_writeback 1 /* else writethrough */
365 #define DDF_cache_wadaptive 2 /* only applies if writeback */
366 #define DDF_cache_readahead 4
367 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
368 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
369 #define DDF_cache_wallowed 32 /* enable write caching */
370 #define DDF_cache_rallowed 64 /* enable read caching */
371
372 struct spare_assign {
373 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
374 be32 crc;
375 be32 timestamp;
376 __u8 reserved[7];
377 __u8 type;
378 be16 populated; /* SAEs used */
379 be16 max; /* max SAEs */
380 __u8 pad[8];
381 struct spare_assign_entry {
382 char guid[DDF_GUID_LEN];
383 be16 secondary_element;
384 __u8 pad[6];
385 } spare_ents[0];
386 };
387 /* spare_assign.type is a bitmap */
388 #define DDF_spare_dedicated 0x1 /* else global */
389 #define DDF_spare_revertible 0x2 /* else committable */
390 #define DDF_spare_active 0x4 /* else not active */
391 #define DDF_spare_affinity 0x8 /* enclosure affinity */
392
393 /* The data_section contents - local scope */
394 struct disk_data {
395 be32 magic; /* DDF_PHYS_DATA_MAGIC */
396 be32 crc;
397 char guid[DDF_GUID_LEN];
398 be32 refnum; /* crc of some magic drive data ... */
399 __u8 forced_ref; /* set when above was not result of magic */
400 __u8 forced_guid; /* set if guid was forced rather than magic */
401 __u8 vendor[32];
402 __u8 pad[442];
403 };
404
405 /* bbm_section content */
406 struct bad_block_log {
407 be32 magic;
408 be32 crc;
409 be16 entry_count;
410 be32 spare_count;
411 __u8 pad[10];
412 be64 first_spare;
413 struct mapped_block {
414 be64 defective_start;
415 be32 replacement_start;
416 be16 remap_count;
417 __u8 pad[2];
418 } entries[0];
419 };
420
421 /* Struct for internally holding ddf structures */
422 /* The DDF structure stored on each device is potentially
423 * quite different, as some data is global and some is local.
424 * The global data is:
425 * - ddf header
426 * - controller_data
427 * - Physical disk records
428 * - Virtual disk records
429 * The local data is:
430 * - Configuration records
431 * - Physical Disk data section
432 * ( and Bad block and vendor which I don't care about yet).
433 *
434 * The local data is parsed into separate lists as it is read
435 * and reconstructed for writing. This means that we only need
436 * to make config changes once and they are automatically
437 * propagated to all devices.
438 * The global (config and disk data) records are each in a list
439 * of separate data structures. When writing we find the entry
440 * or entries applicable to the particular device.
441 */
442 struct ddf_super {
443 struct ddf_header anchor, primary, secondary;
444 struct ddf_controller_data controller;
445 struct ddf_header *active;
446 struct phys_disk *phys;
447 struct virtual_disk *virt;
448 char *conf;
449 int pdsize, vdsize;
450 unsigned int max_part, mppe, conf_rec_len;
451 int currentdev;
452 int updates_pending;
453 struct vcl {
454 union {
455 char space[512];
456 struct {
457 struct vcl *next;
458 unsigned int vcnum; /* index into ->virt */
459 /* For an array with a secondary level there are
460 * multiple vd_config structures, all with the same
461 * guid but with different sec_elmnt_seq.
462 * One of these structures is in 'conf' below.
463 * The others are in other_bvds, not in any
464 * particular order.
465 */
466 struct vd_config **other_bvds;
467 __u64 *block_sizes; /* NULL if all the same */
468 };
469 };
470 struct vd_config conf;
471 } *conflist, *currentconf;
472 struct dl {
473 union {
474 char space[512];
475 struct {
476 struct dl *next;
477 int major, minor;
478 char *devname;
479 int fd;
480 unsigned long long size; /* sectors */
481 be64 primary_lba; /* sectors */
482 be64 secondary_lba; /* sectors */
483 be64 workspace_lba; /* sectors */
484 int pdnum; /* index in ->phys */
485 struct spare_assign *spare;
486 void *mdupdate; /* hold metadata update */
487
488 /* These fields used by auto-layout */
489 int raiddisk; /* slot to fill in autolayout */
490 __u64 esize;
491 int displayed;
492 };
493 };
494 struct disk_data disk;
495 struct vcl *vlist[0]; /* max_part in size */
496 } *dlist, *add_list;
497 };
498
499 #ifndef MDASSEMBLE
500 static int load_super_ddf_all(struct supertype *st, int fd,
501 void **sbp, char *devname);
502 static int get_svd_state(const struct ddf_super *, const struct vcl *);
503 static int
504 validate_geometry_ddf_container(struct supertype *st,
505 int level, int layout, int raiddisks,
506 int chunk, unsigned long long size,
507 unsigned long long data_offset,
508 char *dev, unsigned long long *freesize,
509 int verbose);
510
511 static int validate_geometry_ddf_bvd(struct supertype *st,
512 int level, int layout, int raiddisks,
513 int *chunk, unsigned long long size,
514 unsigned long long data_offset,
515 char *dev, unsigned long long *freesize,
516 int verbose);
517 #endif
518
519 static void free_super_ddf(struct supertype *st);
520 static int all_ff(const char *guid);
521 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
522 be32 refnum, unsigned int nmax,
523 const struct vd_config **bvd,
524 unsigned int *idx);
525 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
526 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
527 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
528 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
529 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
530 static int init_super_ddf_bvd(struct supertype *st,
531 mdu_array_info_t *info,
532 unsigned long long size,
533 char *name, char *homehost,
534 int *uuid, unsigned long long data_offset);
535
536 #ifndef offsetof
537 #define offsetof(t,f) ((size_t)&(((t*)0)->f))
538 #endif
539
540 #if DEBUG
541 static void pr_state(struct ddf_super *ddf, const char *msg)
542 {
543 unsigned int i;
544 dprintf("%s/%s: ", __func__, msg);
545 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
546 if (all_ff(ddf->virt->entries[i].guid))
547 continue;
548 dprintf("%u(s=%02x i=%02x) ", i,
549 ddf->virt->entries[i].state,
550 ddf->virt->entries[i].init_state);
551 }
552 dprintf("\n");
553 }
554 #else
555 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
556 #endif
557
558 static void _ddf_set_updates_pending(struct ddf_super *ddf, const char *func)
559 {
560 if (ddf->updates_pending)
561 return;
562 ddf->updates_pending = 1;
563 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
564 pr_state(ddf, func);
565 }
566
567 #define ddf_set_updates_pending(x) _ddf_set_updates_pending((x), __func__)
568
569 static be32 calc_crc(void *buf, int len)
570 {
571 /* crcs are always at the same place as in the ddf_header */
572 struct ddf_header *ddf = buf;
573 be32 oldcrc = ddf->crc;
574 __u32 newcrc;
575 ddf->crc = cpu_to_be32(0xffffffff);
576
577 newcrc = crc32(0, buf, len);
578 ddf->crc = oldcrc;
579 /* The crc is stored (like everything) bigendian, so convert
580 * here for simplicity
581 */
582 return cpu_to_be32(newcrc);
583 }
584
585 #define DDF_INVALID_LEVEL 0xff
586 #define DDF_NO_SECONDARY 0xff
587 static int err_bad_md_layout(const mdu_array_info_t *array)
588 {
589 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
590 array->level, array->layout, array->raid_disks);
591 return -1;
592 }
593
594 static int layout_md2ddf(const mdu_array_info_t *array,
595 struct vd_config *conf)
596 {
597 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
598 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
599 __u8 sec_elmnt_count = 1;
600 __u8 srl = DDF_NO_SECONDARY;
601
602 switch (array->level) {
603 case LEVEL_LINEAR:
604 prl = DDF_CONCAT;
605 break;
606 case 0:
607 rlq = DDF_RAID0_SIMPLE;
608 prl = DDF_RAID0;
609 break;
610 case 1:
611 switch (array->raid_disks) {
612 case 2:
613 rlq = DDF_RAID1_SIMPLE;
614 break;
615 case 3:
616 rlq = DDF_RAID1_MULTI;
617 break;
618 default:
619 return err_bad_md_layout(array);
620 }
621 prl = DDF_RAID1;
622 break;
623 case 4:
624 if (array->layout != 0)
625 return err_bad_md_layout(array);
626 rlq = DDF_RAID4_N;
627 prl = DDF_RAID4;
628 break;
629 case 5:
630 switch (array->layout) {
631 case ALGORITHM_LEFT_ASYMMETRIC:
632 rlq = DDF_RAID5_N_RESTART;
633 break;
634 case ALGORITHM_RIGHT_ASYMMETRIC:
635 rlq = DDF_RAID5_0_RESTART;
636 break;
637 case ALGORITHM_LEFT_SYMMETRIC:
638 rlq = DDF_RAID5_N_CONTINUE;
639 break;
640 case ALGORITHM_RIGHT_SYMMETRIC:
641 /* not mentioned in standard */
642 default:
643 return err_bad_md_layout(array);
644 }
645 prl = DDF_RAID5;
646 break;
647 case 6:
648 switch (array->layout) {
649 case ALGORITHM_ROTATING_N_RESTART:
650 rlq = DDF_RAID5_N_RESTART;
651 break;
652 case ALGORITHM_ROTATING_ZERO_RESTART:
653 rlq = DDF_RAID6_0_RESTART;
654 break;
655 case ALGORITHM_ROTATING_N_CONTINUE:
656 rlq = DDF_RAID5_N_CONTINUE;
657 break;
658 default:
659 return err_bad_md_layout(array);
660 }
661 prl = DDF_RAID6;
662 break;
663 case 10:
664 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
665 rlq = DDF_RAID1_SIMPLE;
666 prim_elmnt_count = cpu_to_be16(2);
667 sec_elmnt_count = array->raid_disks / 2;
668 } else if (array->raid_disks % 3 == 0
669 && array->layout == 0x103) {
670 rlq = DDF_RAID1_MULTI;
671 prim_elmnt_count = cpu_to_be16(3);
672 sec_elmnt_count = array->raid_disks / 3;
673 } else
674 return err_bad_md_layout(array);
675 srl = DDF_2SPANNED;
676 prl = DDF_RAID1;
677 break;
678 default:
679 return err_bad_md_layout(array);
680 }
681 conf->prl = prl;
682 conf->prim_elmnt_count = prim_elmnt_count;
683 conf->rlq = rlq;
684 conf->srl = srl;
685 conf->sec_elmnt_count = sec_elmnt_count;
686 return 0;
687 }
688
689 static int err_bad_ddf_layout(const struct vd_config *conf)
690 {
691 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
692 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
693 return -1;
694 }
695
696 static int layout_ddf2md(const struct vd_config *conf,
697 mdu_array_info_t *array)
698 {
699 int level = LEVEL_UNSUPPORTED;
700 int layout = 0;
701 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
702
703 if (conf->sec_elmnt_count > 1) {
704 /* see also check_secondary() */
705 if (conf->prl != DDF_RAID1 ||
706 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
707 pr_err("Unsupported secondary RAID level %u/%u\n",
708 conf->prl, conf->srl);
709 return -1;
710 }
711 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
712 layout = 0x102;
713 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
714 layout = 0x103;
715 else
716 return err_bad_ddf_layout(conf);
717 raiddisks *= conf->sec_elmnt_count;
718 level = 10;
719 goto good;
720 }
721
722 switch (conf->prl) {
723 case DDF_CONCAT:
724 level = LEVEL_LINEAR;
725 break;
726 case DDF_RAID0:
727 if (conf->rlq != DDF_RAID0_SIMPLE)
728 return err_bad_ddf_layout(conf);
729 level = 0;
730 break;
731 case DDF_RAID1:
732 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
733 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
734 return err_bad_ddf_layout(conf);
735 level = 1;
736 break;
737 case DDF_RAID4:
738 if (conf->rlq != DDF_RAID4_N)
739 return err_bad_ddf_layout(conf);
740 level = 4;
741 break;
742 case DDF_RAID5:
743 switch (conf->rlq) {
744 case DDF_RAID5_N_RESTART:
745 layout = ALGORITHM_LEFT_ASYMMETRIC;
746 break;
747 case DDF_RAID5_0_RESTART:
748 layout = ALGORITHM_RIGHT_ASYMMETRIC;
749 break;
750 case DDF_RAID5_N_CONTINUE:
751 layout = ALGORITHM_LEFT_SYMMETRIC;
752 break;
753 default:
754 return err_bad_ddf_layout(conf);
755 }
756 level = 5;
757 break;
758 case DDF_RAID6:
759 switch (conf->rlq) {
760 case DDF_RAID5_N_RESTART:
761 layout = ALGORITHM_ROTATING_N_RESTART;
762 break;
763 case DDF_RAID6_0_RESTART:
764 layout = ALGORITHM_ROTATING_ZERO_RESTART;
765 break;
766 case DDF_RAID5_N_CONTINUE:
767 layout = ALGORITHM_ROTATING_N_CONTINUE;
768 break;
769 default:
770 return err_bad_ddf_layout(conf);
771 }
772 level = 6;
773 break;
774 default:
775 return err_bad_ddf_layout(conf);
776 };
777
778 good:
779 array->level = level;
780 array->layout = layout;
781 array->raid_disks = raiddisks;
782 return 0;
783 }
784
785 static int load_ddf_header(int fd, unsigned long long lba,
786 unsigned long long size,
787 int type,
788 struct ddf_header *hdr, struct ddf_header *anchor)
789 {
790 /* read a ddf header (primary or secondary) from fd/lba
791 * and check that it is consistent with anchor
792 * Need to check:
793 * magic, crc, guid, rev, and LBA's header_type, and
794 * everything after header_type must be the same
795 */
796 if (lba >= size-1)
797 return 0;
798
799 if (lseek64(fd, lba<<9, 0) < 0)
800 return 0;
801
802 if (read(fd, hdr, 512) != 512)
803 return 0;
804
805 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
806 pr_err("%s: bad header magic\n", __func__);
807 return 0;
808 }
809 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
810 pr_err("%s: bad CRC\n", __func__);
811 return 0;
812 }
813 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
814 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
815 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
816 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
817 hdr->type != type ||
818 memcmp(anchor->pad2, hdr->pad2, 512 -
819 offsetof(struct ddf_header, pad2)) != 0) {
820 pr_err("%s: header mismatch\n", __func__);
821 return 0;
822 }
823
824 /* Looks good enough to me... */
825 return 1;
826 }
827
828 static void *load_section(int fd, struct ddf_super *super, void *buf,
829 be32 offset_be, be32 len_be, int check)
830 {
831 unsigned long long offset = be32_to_cpu(offset_be);
832 unsigned long long len = be32_to_cpu(len_be);
833 int dofree = (buf == NULL);
834
835 if (check)
836 if (len != 2 && len != 8 && len != 32
837 && len != 128 && len != 512)
838 return NULL;
839
840 if (len > 1024)
841 return NULL;
842 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
843 buf = NULL;
844
845 if (!buf)
846 return NULL;
847
848 if (super->active->type == 1)
849 offset += be64_to_cpu(super->active->primary_lba);
850 else
851 offset += be64_to_cpu(super->active->secondary_lba);
852
853 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
854 if (dofree)
855 free(buf);
856 return NULL;
857 }
858 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
859 if (dofree)
860 free(buf);
861 return NULL;
862 }
863 return buf;
864 }
865
866 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
867 {
868 unsigned long long dsize;
869
870 get_dev_size(fd, NULL, &dsize);
871
872 if (lseek64(fd, dsize-512, 0) < 0) {
873 if (devname)
874 pr_err("Cannot seek to anchor block on %s: %s\n",
875 devname, strerror(errno));
876 return 1;
877 }
878 if (read(fd, &super->anchor, 512) != 512) {
879 if (devname)
880 pr_err("Cannot read anchor block on %s: %s\n",
881 devname, strerror(errno));
882 return 1;
883 }
884 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
885 if (devname)
886 pr_err("no DDF anchor found on %s\n",
887 devname);
888 return 2;
889 }
890 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
891 if (devname)
892 pr_err("bad CRC on anchor on %s\n",
893 devname);
894 return 2;
895 }
896 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
897 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
898 if (devname)
899 pr_err("can only support super revision"
900 " %.8s and earlier, not %.8s on %s\n",
901 DDF_REVISION_2, super->anchor.revision,devname);
902 return 2;
903 }
904 super->active = NULL;
905 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
906 dsize >> 9, 1,
907 &super->primary, &super->anchor) == 0) {
908 if (devname)
909 pr_err("Failed to load primary DDF header "
910 "on %s\n", devname);
911 } else
912 super->active = &super->primary;
913
914 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
915 dsize >> 9, 2,
916 &super->secondary, &super->anchor)) {
917 if (super->active == NULL
918 || (be32_to_cpu(super->primary.seq)
919 < be32_to_cpu(super->secondary.seq) &&
920 !super->secondary.openflag)
921 || (be32_to_cpu(super->primary.seq)
922 == be32_to_cpu(super->secondary.seq) &&
923 super->primary.openflag && !super->secondary.openflag)
924 )
925 super->active = &super->secondary;
926 } else if (devname &&
927 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
928 pr_err("Failed to load secondary DDF header on %s\n",
929 devname);
930 if (super->active == NULL)
931 return 2;
932 return 0;
933 }
934
935 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
936 {
937 void *ok;
938 ok = load_section(fd, super, &super->controller,
939 super->active->controller_section_offset,
940 super->active->controller_section_length,
941 0);
942 super->phys = load_section(fd, super, NULL,
943 super->active->phys_section_offset,
944 super->active->phys_section_length,
945 1);
946 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
947
948 super->virt = load_section(fd, super, NULL,
949 super->active->virt_section_offset,
950 super->active->virt_section_length,
951 1);
952 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
953 if (!ok ||
954 !super->phys ||
955 !super->virt) {
956 free(super->phys);
957 free(super->virt);
958 super->phys = NULL;
959 super->virt = NULL;
960 return 2;
961 }
962 super->conflist = NULL;
963 super->dlist = NULL;
964
965 super->max_part = be16_to_cpu(super->active->max_partitions);
966 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
967 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
968 return 0;
969 }
970
971 #define DDF_UNUSED_BVD 0xff
972 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
973 {
974 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
975 unsigned int i, vdsize;
976 void *p;
977 if (n_vds == 0) {
978 vcl->other_bvds = NULL;
979 return 0;
980 }
981 vdsize = ddf->conf_rec_len * 512;
982 if (posix_memalign(&p, 512, n_vds *
983 (vdsize + sizeof(struct vd_config *))) != 0)
984 return -1;
985 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
986 for (i = 0; i < n_vds; i++) {
987 vcl->other_bvds[i] = p + i * vdsize;
988 memset(vcl->other_bvds[i], 0, vdsize);
989 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
990 }
991 return 0;
992 }
993
994 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
995 unsigned int len)
996 {
997 int i;
998 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
999 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
1000 break;
1001
1002 if (i < vcl->conf.sec_elmnt_count-1) {
1003 if (be32_to_cpu(vd->seqnum) <=
1004 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1005 return;
1006 } else {
1007 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1008 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1009 break;
1010 if (i == vcl->conf.sec_elmnt_count-1) {
1011 pr_err("no space for sec level config %u, count is %u\n",
1012 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1013 return;
1014 }
1015 }
1016 memcpy(vcl->other_bvds[i], vd, len);
1017 }
1018
1019 static int load_ddf_local(int fd, struct ddf_super *super,
1020 char *devname, int keep)
1021 {
1022 struct dl *dl;
1023 struct stat stb;
1024 char *conf;
1025 unsigned int i;
1026 unsigned int confsec;
1027 int vnum;
1028 unsigned int max_virt_disks =
1029 be16_to_cpu(super->active->max_vd_entries);
1030 unsigned long long dsize;
1031
1032 /* First the local disk info */
1033 if (posix_memalign((void**)&dl, 512,
1034 sizeof(*dl) +
1035 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1036 pr_err("%s could not allocate disk info buffer\n",
1037 __func__);
1038 return 1;
1039 }
1040
1041 load_section(fd, super, &dl->disk,
1042 super->active->data_section_offset,
1043 super->active->data_section_length,
1044 0);
1045 dl->devname = devname ? xstrdup(devname) : NULL;
1046
1047 fstat(fd, &stb);
1048 dl->major = major(stb.st_rdev);
1049 dl->minor = minor(stb.st_rdev);
1050 dl->next = super->dlist;
1051 dl->fd = keep ? fd : -1;
1052
1053 dl->size = 0;
1054 if (get_dev_size(fd, devname, &dsize))
1055 dl->size = dsize >> 9;
1056 /* If the disks have different sizes, the LBAs will differ
1057 * between phys disks.
1058 * At this point here, the values in super->active must be valid
1059 * for this phys disk. */
1060 dl->primary_lba = super->active->primary_lba;
1061 dl->secondary_lba = super->active->secondary_lba;
1062 dl->workspace_lba = super->active->workspace_lba;
1063 dl->spare = NULL;
1064 for (i = 0 ; i < super->max_part ; i++)
1065 dl->vlist[i] = NULL;
1066 super->dlist = dl;
1067 dl->pdnum = -1;
1068 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1069 if (memcmp(super->phys->entries[i].guid,
1070 dl->disk.guid, DDF_GUID_LEN) == 0)
1071 dl->pdnum = i;
1072
1073 /* Now the config list. */
1074 /* 'conf' is an array of config entries, some of which are
1075 * probably invalid. Those which are good need to be copied into
1076 * the conflist
1077 */
1078
1079 conf = load_section(fd, super, super->conf,
1080 super->active->config_section_offset,
1081 super->active->config_section_length,
1082 0);
1083 super->conf = conf;
1084 vnum = 0;
1085 for (confsec = 0;
1086 confsec < be32_to_cpu(super->active->config_section_length);
1087 confsec += super->conf_rec_len) {
1088 struct vd_config *vd =
1089 (struct vd_config *)((char*)conf + confsec*512);
1090 struct vcl *vcl;
1091
1092 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1093 if (dl->spare)
1094 continue;
1095 if (posix_memalign((void**)&dl->spare, 512,
1096 super->conf_rec_len*512) != 0) {
1097 pr_err("%s could not allocate spare info buf\n",
1098 __func__);
1099 return 1;
1100 }
1101
1102 memcpy(dl->spare, vd, super->conf_rec_len*512);
1103 continue;
1104 }
1105 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1106 /* Must be vendor-unique - I cannot handle those */
1107 continue;
1108
1109 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1110 if (memcmp(vcl->conf.guid,
1111 vd->guid, DDF_GUID_LEN) == 0)
1112 break;
1113 }
1114
1115 if (vcl) {
1116 dl->vlist[vnum++] = vcl;
1117 if (vcl->other_bvds != NULL &&
1118 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1119 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1120 continue;
1121 }
1122 if (be32_to_cpu(vd->seqnum) <=
1123 be32_to_cpu(vcl->conf.seqnum))
1124 continue;
1125 } else {
1126 if (posix_memalign((void**)&vcl, 512,
1127 (super->conf_rec_len*512 +
1128 offsetof(struct vcl, conf))) != 0) {
1129 pr_err("%s could not allocate vcl buf\n",
1130 __func__);
1131 return 1;
1132 }
1133 vcl->next = super->conflist;
1134 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1135 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1136 if (alloc_other_bvds(super, vcl) != 0) {
1137 pr_err("%s could not allocate other bvds\n",
1138 __func__);
1139 free(vcl);
1140 return 1;
1141 };
1142 super->conflist = vcl;
1143 dl->vlist[vnum++] = vcl;
1144 }
1145 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1146 for (i=0; i < max_virt_disks ; i++)
1147 if (memcmp(super->virt->entries[i].guid,
1148 vcl->conf.guid, DDF_GUID_LEN)==0)
1149 break;
1150 if (i < max_virt_disks)
1151 vcl->vcnum = i;
1152 }
1153
1154 return 0;
1155 }
1156
1157 static int load_super_ddf(struct supertype *st, int fd,
1158 char *devname)
1159 {
1160 unsigned long long dsize;
1161 struct ddf_super *super;
1162 int rv;
1163
1164 if (get_dev_size(fd, devname, &dsize) == 0)
1165 return 1;
1166
1167 if (test_partition(fd))
1168 /* DDF is not allowed on partitions */
1169 return 1;
1170
1171 /* 32M is a lower bound */
1172 if (dsize <= 32*1024*1024) {
1173 if (devname)
1174 pr_err("%s is too small for ddf: "
1175 "size is %llu sectors.\n",
1176 devname, dsize>>9);
1177 return 1;
1178 }
1179 if (dsize & 511) {
1180 if (devname)
1181 pr_err("%s is an odd size for ddf: "
1182 "size is %llu bytes.\n",
1183 devname, dsize);
1184 return 1;
1185 }
1186
1187 free_super_ddf(st);
1188
1189 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1190 pr_err("malloc of %zu failed.\n",
1191 sizeof(*super));
1192 return 1;
1193 }
1194 memset(super, 0, sizeof(*super));
1195
1196 rv = load_ddf_headers(fd, super, devname);
1197 if (rv) {
1198 free(super);
1199 return rv;
1200 }
1201
1202 /* Have valid headers and have chosen the best. Let's read in the rest*/
1203
1204 rv = load_ddf_global(fd, super, devname);
1205
1206 if (rv) {
1207 if (devname)
1208 pr_err("Failed to load all information "
1209 "sections on %s\n", devname);
1210 free(super);
1211 return rv;
1212 }
1213
1214 rv = load_ddf_local(fd, super, devname, 0);
1215
1216 if (rv) {
1217 if (devname)
1218 pr_err("Failed to load all information "
1219 "sections on %s\n", devname);
1220 free(super);
1221 return rv;
1222 }
1223
1224 /* Should possibly check the sections .... */
1225
1226 st->sb = super;
1227 if (st->ss == NULL) {
1228 st->ss = &super_ddf;
1229 st->minor_version = 0;
1230 st->max_devs = 512;
1231 }
1232 return 0;
1233
1234 }
1235
1236 static void free_super_ddf(struct supertype *st)
1237 {
1238 struct ddf_super *ddf = st->sb;
1239 if (ddf == NULL)
1240 return;
1241 free(ddf->phys);
1242 free(ddf->virt);
1243 free(ddf->conf);
1244 while (ddf->conflist) {
1245 struct vcl *v = ddf->conflist;
1246 ddf->conflist = v->next;
1247 if (v->block_sizes)
1248 free(v->block_sizes);
1249 if (v->other_bvds)
1250 /*
1251 v->other_bvds[0] points to beginning of buffer,
1252 see alloc_other_bvds()
1253 */
1254 free(v->other_bvds[0]);
1255 free(v);
1256 }
1257 while (ddf->dlist) {
1258 struct dl *d = ddf->dlist;
1259 ddf->dlist = d->next;
1260 if (d->fd >= 0)
1261 close(d->fd);
1262 if (d->spare)
1263 free(d->spare);
1264 free(d);
1265 }
1266 while (ddf->add_list) {
1267 struct dl *d = ddf->add_list;
1268 ddf->add_list = d->next;
1269 if (d->fd >= 0)
1270 close(d->fd);
1271 if (d->spare)
1272 free(d->spare);
1273 free(d);
1274 }
1275 free(ddf);
1276 st->sb = NULL;
1277 }
1278
1279 static struct supertype *match_metadata_desc_ddf(char *arg)
1280 {
1281 /* 'ddf' only supports containers */
1282 struct supertype *st;
1283 if (strcmp(arg, "ddf") != 0 &&
1284 strcmp(arg, "default") != 0
1285 )
1286 return NULL;
1287
1288 st = xcalloc(1, sizeof(*st));
1289 st->ss = &super_ddf;
1290 st->max_devs = 512;
1291 st->minor_version = 0;
1292 st->sb = NULL;
1293 return st;
1294 }
1295
1296 #ifndef MDASSEMBLE
1297
1298 static mapping_t ddf_state[] = {
1299 { "Optimal", 0},
1300 { "Degraded", 1},
1301 { "Deleted", 2},
1302 { "Missing", 3},
1303 { "Failed", 4},
1304 { "Partially Optimal", 5},
1305 { "-reserved-", 6},
1306 { "-reserved-", 7},
1307 { NULL, 0}
1308 };
1309
1310 static mapping_t ddf_init_state[] = {
1311 { "Not Initialised", 0},
1312 { "QuickInit in Progress", 1},
1313 { "Fully Initialised", 2},
1314 { "*UNKNOWN*", 3},
1315 { NULL, 0}
1316 };
1317 static mapping_t ddf_access[] = {
1318 { "Read/Write", 0},
1319 { "Reserved", 1},
1320 { "Read Only", 2},
1321 { "Blocked (no access)", 3},
1322 { NULL ,0}
1323 };
1324
1325 static mapping_t ddf_level[] = {
1326 { "RAID0", DDF_RAID0},
1327 { "RAID1", DDF_RAID1},
1328 { "RAID3", DDF_RAID3},
1329 { "RAID4", DDF_RAID4},
1330 { "RAID5", DDF_RAID5},
1331 { "RAID1E",DDF_RAID1E},
1332 { "JBOD", DDF_JBOD},
1333 { "CONCAT",DDF_CONCAT},
1334 { "RAID5E",DDF_RAID5E},
1335 { "RAID5EE",DDF_RAID5EE},
1336 { "RAID6", DDF_RAID6},
1337 { NULL, 0}
1338 };
1339 static mapping_t ddf_sec_level[] = {
1340 { "Striped", DDF_2STRIPED},
1341 { "Mirrored", DDF_2MIRRORED},
1342 { "Concat", DDF_2CONCAT},
1343 { "Spanned", DDF_2SPANNED},
1344 { NULL, 0}
1345 };
1346 #endif
1347
1348 static int all_ff(const char *guid)
1349 {
1350 int i;
1351 for (i = 0; i < DDF_GUID_LEN; i++)
1352 if (guid[i] != (char)0xff)
1353 return 0;
1354 return 1;
1355 }
1356
1357 static const char *guid_str(const char *guid)
1358 {
1359 static char buf[DDF_GUID_LEN*2+1];
1360 int i;
1361 char *p = buf;
1362 for (i = 0; i < DDF_GUID_LEN; i++) {
1363 unsigned char c = guid[i];
1364 if (c >= 32 && c < 127)
1365 p += sprintf(p, "%c", c);
1366 else
1367 p += sprintf(p, "%02x", c);
1368 }
1369 *p = '\0';
1370 return (const char *) buf;
1371 }
1372
1373 #ifndef MDASSEMBLE
1374 static void print_guid(char *guid, int tstamp)
1375 {
1376 /* A GUIDs are part (or all) ASCII and part binary.
1377 * They tend to be space padded.
1378 * We print the GUID in HEX, then in parentheses add
1379 * any initial ASCII sequence, and a possible
1380 * time stamp from bytes 16-19
1381 */
1382 int l = DDF_GUID_LEN;
1383 int i;
1384
1385 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1386 if ((i&3)==0 && i != 0) printf(":");
1387 printf("%02X", guid[i]&255);
1388 }
1389
1390 printf("\n (");
1391 while (l && guid[l-1] == ' ')
1392 l--;
1393 for (i=0 ; i<l ; i++) {
1394 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1395 fputc(guid[i], stdout);
1396 else
1397 break;
1398 }
1399 if (tstamp) {
1400 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1401 char tbuf[100];
1402 struct tm *tm;
1403 tm = localtime(&then);
1404 strftime(tbuf, 100, " %D %T",tm);
1405 fputs(tbuf, stdout);
1406 }
1407 printf(")");
1408 }
1409
1410 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1411 {
1412 int crl = sb->conf_rec_len;
1413 struct vcl *vcl;
1414
1415 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1416 unsigned int i;
1417 struct vd_config *vc = &vcl->conf;
1418
1419 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1420 continue;
1421 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1422 continue;
1423
1424 /* Ok, we know about this VD, let's give more details */
1425 printf(" Raid Devices[%d] : %d (", n,
1426 be16_to_cpu(vc->prim_elmnt_count));
1427 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1428 int j;
1429 int cnt = be16_to_cpu(sb->phys->max_pdes);
1430 for (j=0; j<cnt; j++)
1431 if (be32_eq(vc->phys_refnum[i],
1432 sb->phys->entries[j].refnum))
1433 break;
1434 if (i) printf(" ");
1435 if (j < cnt)
1436 printf("%d", j);
1437 else
1438 printf("--");
1439 }
1440 printf(")\n");
1441 if (vc->chunk_shift != 255)
1442 printf(" Chunk Size[%d] : %d sectors\n", n,
1443 1 << vc->chunk_shift);
1444 printf(" Raid Level[%d] : %s\n", n,
1445 map_num(ddf_level, vc->prl)?:"-unknown-");
1446 if (vc->sec_elmnt_count != 1) {
1447 printf(" Secondary Position[%d] : %d of %d\n", n,
1448 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1449 printf(" Secondary Level[%d] : %s\n", n,
1450 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1451 }
1452 printf(" Device Size[%d] : %llu\n", n,
1453 be64_to_cpu(vc->blocks)/2);
1454 printf(" Array Size[%d] : %llu\n", n,
1455 be64_to_cpu(vc->array_blocks)/2);
1456 }
1457 }
1458
1459 static void examine_vds(struct ddf_super *sb)
1460 {
1461 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1462 unsigned int i;
1463 printf(" Virtual Disks : %d\n", cnt);
1464
1465 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1466 struct virtual_entry *ve = &sb->virt->entries[i];
1467 if (all_ff(ve->guid))
1468 continue;
1469 printf("\n");
1470 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1471 printf("\n");
1472 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1473 printf(" state[%d] : %s, %s%s\n", i,
1474 map_num(ddf_state, ve->state & 7),
1475 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1476 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1477 printf(" init state[%d] : %s\n", i,
1478 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1479 printf(" access[%d] : %s\n", i,
1480 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1481 printf(" Name[%d] : %.16s\n", i, ve->name);
1482 examine_vd(i, sb, ve->guid);
1483 }
1484 if (cnt) printf("\n");
1485 }
1486
1487 static void examine_pds(struct ddf_super *sb)
1488 {
1489 int cnt = be16_to_cpu(sb->phys->max_pdes);
1490 int i;
1491 struct dl *dl;
1492 int unlisted = 0;
1493 printf(" Physical Disks : %d\n", cnt);
1494 printf(" Number RefNo Size Device Type/State\n");
1495
1496 for (dl = sb->dlist; dl; dl = dl->next)
1497 dl->displayed = 0;
1498
1499 for (i=0 ; i<cnt ; i++) {
1500 struct phys_disk_entry *pd = &sb->phys->entries[i];
1501 int type = be16_to_cpu(pd->type);
1502 int state = be16_to_cpu(pd->state);
1503
1504 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1505 /* Not in use */
1506 continue;
1507 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1508 //printf("\n");
1509 printf(" %3d %08x ", i,
1510 be32_to_cpu(pd->refnum));
1511 printf("%8lluK ",
1512 be64_to_cpu(pd->config_size)>>1);
1513 for (dl = sb->dlist; dl ; dl = dl->next) {
1514 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1515 char *dv = map_dev(dl->major, dl->minor, 0);
1516 if (dv) {
1517 printf("%-15s", dv);
1518 break;
1519 }
1520 }
1521 }
1522 if (!dl)
1523 printf("%15s","");
1524 else
1525 dl->displayed = 1;
1526 printf(" %s%s%s%s%s",
1527 (type&2) ? "active":"",
1528 (type&4) ? "Global-Spare":"",
1529 (type&8) ? "spare" : "",
1530 (type&16)? ", foreign" : "",
1531 (type&32)? "pass-through" : "");
1532 if (state & DDF_Failed)
1533 /* This over-rides these three */
1534 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1535 printf("/%s%s%s%s%s%s%s",
1536 (state&1)? "Online": "Offline",
1537 (state&2)? ", Failed": "",
1538 (state&4)? ", Rebuilding": "",
1539 (state&8)? ", in-transition": "",
1540 (state&16)? ", SMART-errors": "",
1541 (state&32)? ", Unrecovered-Read-Errors": "",
1542 (state&64)? ", Missing" : "");
1543 printf("\n");
1544 }
1545 for (dl = sb->dlist; dl; dl = dl->next) {
1546 char *dv;
1547 if (dl->displayed)
1548 continue;
1549 if (!unlisted)
1550 printf(" Physical disks not in metadata!:\n");
1551 unlisted = 1;
1552 dv = map_dev(dl->major, dl->minor, 0);
1553 printf(" %08x %s\n", be32_to_cpu(dl->disk.refnum),
1554 dv ? dv : "-unknown-");
1555 }
1556 if (unlisted)
1557 printf("\n");
1558 }
1559
1560 static void examine_super_ddf(struct supertype *st, char *homehost)
1561 {
1562 struct ddf_super *sb = st->sb;
1563
1564 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1565 printf(" Version : %.8s\n", sb->anchor.revision);
1566 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1567 printf("\n");
1568 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1569 printf("\n");
1570 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1571 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1572 DDF_HEADER_MAGIC)
1573 ?"yes" : "no"));
1574 examine_vds(sb);
1575 examine_pds(sb);
1576 }
1577
1578 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1579 {
1580 /*
1581 * Figure out the VD number for this supertype.
1582 * Returns DDF_CONTAINER for the container itself,
1583 * and DDF_NOTFOUND on error.
1584 */
1585 struct ddf_super *ddf = st->sb;
1586 struct mdinfo *sra;
1587 char *sub, *end;
1588 unsigned int vcnum;
1589
1590 if (*st->container_devnm == '\0')
1591 return DDF_CONTAINER;
1592
1593 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1594 if (!sra || sra->array.major_version != -1 ||
1595 sra->array.minor_version != -2 ||
1596 !is_subarray(sra->text_version))
1597 return DDF_NOTFOUND;
1598
1599 sub = strchr(sra->text_version + 1, '/');
1600 if (sub != NULL)
1601 vcnum = strtoul(sub + 1, &end, 10);
1602 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1603 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1604 return DDF_NOTFOUND;
1605
1606 return vcnum;
1607 }
1608
1609 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1610 {
1611 /* We just write a generic DDF ARRAY entry
1612 */
1613 struct mdinfo info;
1614 char nbuf[64];
1615 getinfo_super_ddf(st, &info, NULL);
1616 fname_from_uuid(st, &info, nbuf, ':');
1617
1618 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1619 }
1620
1621 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1622 {
1623 /* We write a DDF ARRAY member entry for each vd, identifying container
1624 * by uuid and member by unit number and uuid.
1625 */
1626 struct ddf_super *ddf = st->sb;
1627 struct mdinfo info;
1628 unsigned int i;
1629 char nbuf[64];
1630 getinfo_super_ddf(st, &info, NULL);
1631 fname_from_uuid(st, &info, nbuf, ':');
1632
1633 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1634 struct virtual_entry *ve = &ddf->virt->entries[i];
1635 struct vcl vcl;
1636 char nbuf1[64];
1637 char namebuf[17];
1638 if (all_ff(ve->guid))
1639 continue;
1640 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1641 ddf->currentconf =&vcl;
1642 vcl.vcnum = i;
1643 uuid_from_super_ddf(st, info.uuid);
1644 fname_from_uuid(st, &info, nbuf1, ':');
1645 _ddf_array_name(namebuf, ddf, i);
1646 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1647 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1648 nbuf+5, i, nbuf1+5);
1649 }
1650 }
1651
1652 static void export_examine_super_ddf(struct supertype *st)
1653 {
1654 struct mdinfo info;
1655 char nbuf[64];
1656 getinfo_super_ddf(st, &info, NULL);
1657 fname_from_uuid(st, &info, nbuf, ':');
1658 printf("MD_METADATA=ddf\n");
1659 printf("MD_LEVEL=container\n");
1660 printf("MD_UUID=%s\n", nbuf+5);
1661 printf("MD_DEVICES=%u\n",
1662 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1663 }
1664
1665 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1666 {
1667 void *buf;
1668 unsigned long long dsize, offset;
1669 int bytes;
1670 struct ddf_header *ddf;
1671 int written = 0;
1672
1673 /* The meta consists of an anchor, a primary, and a secondary.
1674 * This all lives at the end of the device.
1675 * So it is easiest to find the earliest of primary and
1676 * secondary, and copy everything from there.
1677 *
1678 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1679 * we choose one of those
1680 */
1681
1682 if (posix_memalign(&buf, 4096, 4096) != 0)
1683 return 1;
1684
1685 if (!get_dev_size(from, NULL, &dsize))
1686 goto err;
1687
1688 if (lseek64(from, dsize-512, 0) < 0)
1689 goto err;
1690 if (read(from, buf, 512) != 512)
1691 goto err;
1692 ddf = buf;
1693 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1694 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1695 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1696 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1697 goto err;
1698
1699 offset = dsize - 512;
1700 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1701 offset = be64_to_cpu(ddf->primary_lba) << 9;
1702 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1703 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1704
1705 bytes = dsize - offset;
1706
1707 if (lseek64(from, offset, 0) < 0 ||
1708 lseek64(to, offset, 0) < 0)
1709 goto err;
1710 while (written < bytes) {
1711 int n = bytes - written;
1712 if (n > 4096)
1713 n = 4096;
1714 if (read(from, buf, n) != n)
1715 goto err;
1716 if (write(to, buf, n) != n)
1717 goto err;
1718 written += n;
1719 }
1720 free(buf);
1721 return 0;
1722 err:
1723 free(buf);
1724 return 1;
1725 }
1726
1727 static void detail_super_ddf(struct supertype *st, char *homehost)
1728 {
1729 /* FIXME later
1730 * Could print DDF GUID
1731 * Need to find which array
1732 * If whole, briefly list all arrays
1733 * If one, give name
1734 */
1735 }
1736
1737 static const char *vendors_with_variable_volume_UUID[] = {
1738 "LSI ",
1739 };
1740
1741 static int volume_id_is_reliable(const struct ddf_super *ddf)
1742 {
1743 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1744 int i;
1745 for (i = 0; i < n; i++)
1746 if (!memcmp(ddf->controller.guid,
1747 vendors_with_variable_volume_UUID[i], 8))
1748 return 0;
1749 return 1;
1750 }
1751
1752 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1753 unsigned int vcnum, int uuid[4])
1754 {
1755 char buf[DDF_GUID_LEN+18], sha[20], *p;
1756 struct sha1_ctx ctx;
1757 if (volume_id_is_reliable(ddf)) {
1758 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1759 return;
1760 }
1761 /*
1762 * Some fake RAID BIOSes (in particular, LSI ones) change the
1763 * VD GUID at every boot. These GUIDs are not suitable for
1764 * identifying an array. Luckily the header GUID appears to
1765 * remain constant.
1766 * We construct a pseudo-UUID from the header GUID and those
1767 * properties of the subarray that we expect to remain constant.
1768 */
1769 memset(buf, 0, sizeof(buf));
1770 p = buf;
1771 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1772 p += DDF_GUID_LEN;
1773 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1774 p += 16;
1775 *((__u16 *) p) = vcnum;
1776 sha1_init_ctx(&ctx);
1777 sha1_process_bytes(buf, sizeof(buf), &ctx);
1778 sha1_finish_ctx(&ctx, sha);
1779 memcpy(uuid, sha, 4*4);
1780 }
1781
1782 static void brief_detail_super_ddf(struct supertype *st)
1783 {
1784 struct mdinfo info;
1785 char nbuf[64];
1786 struct ddf_super *ddf = st->sb;
1787 unsigned int vcnum = get_vd_num_of_subarray(st);
1788 if (vcnum == DDF_CONTAINER)
1789 uuid_from_super_ddf(st, info.uuid);
1790 else if (vcnum == DDF_NOTFOUND)
1791 return;
1792 else
1793 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1794 fname_from_uuid(st, &info, nbuf,':');
1795 printf(" UUID=%s", nbuf + 5);
1796 }
1797 #endif
1798
1799 static int match_home_ddf(struct supertype *st, char *homehost)
1800 {
1801 /* It matches 'this' host if the controller is a
1802 * Linux-MD controller with vendor_data matching
1803 * the hostname. It would be nice if we could
1804 * test against controller found in /sys or somewhere...
1805 */
1806 struct ddf_super *ddf = st->sb;
1807 unsigned int len;
1808
1809 if (!homehost)
1810 return 0;
1811 len = strlen(homehost);
1812
1813 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1814 len < sizeof(ddf->controller.vendor_data) &&
1815 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1816 ddf->controller.vendor_data[len] == 0);
1817 }
1818
1819 #ifndef MDASSEMBLE
1820 static int find_index_in_bvd(const struct ddf_super *ddf,
1821 const struct vd_config *conf, unsigned int n,
1822 unsigned int *n_bvd)
1823 {
1824 /*
1825 * Find the index of the n-th valid physical disk in this BVD.
1826 * Unused entries can be sprinkled in with the used entries,
1827 * but don't count.
1828 */
1829 unsigned int i, j;
1830 for (i = 0, j = 0;
1831 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1832 i++) {
1833 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1834 if (n == j) {
1835 *n_bvd = i;
1836 return 1;
1837 }
1838 j++;
1839 }
1840 }
1841 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1842 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1843 return 0;
1844 }
1845
1846 /* Given a member array instance number, and a raid disk within that instance,
1847 * find the vd_config structure. The offset of the given disk in the phys_refnum
1848 * table is returned in n_bvd.
1849 * For two-level members with a secondary raid level the vd_config for
1850 * the appropriate BVD is returned.
1851 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1852 */
1853 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1854 unsigned int n,
1855 unsigned int *n_bvd, struct vcl **vcl)
1856 {
1857 struct vcl *v;
1858
1859 for (v = ddf->conflist; v; v = v->next) {
1860 unsigned int nsec, ibvd = 0;
1861 struct vd_config *conf;
1862 if (inst != v->vcnum)
1863 continue;
1864 conf = &v->conf;
1865 if (conf->sec_elmnt_count == 1) {
1866 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1867 *vcl = v;
1868 return conf;
1869 } else
1870 goto bad;
1871 }
1872 if (v->other_bvds == NULL) {
1873 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1874 __func__, conf->sec_elmnt_count);
1875 goto bad;
1876 }
1877 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1878 if (conf->sec_elmnt_seq != nsec) {
1879 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1880 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1881 == nsec)
1882 break;
1883 }
1884 if (ibvd == conf->sec_elmnt_count)
1885 goto bad;
1886 conf = v->other_bvds[ibvd-1];
1887 }
1888 if (!find_index_in_bvd(ddf, conf,
1889 n - nsec*conf->sec_elmnt_count, n_bvd))
1890 goto bad;
1891 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1892 , __func__, n, *n_bvd, ibvd, inst);
1893 *vcl = v;
1894 return conf;
1895 }
1896 bad:
1897 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1898 return NULL;
1899 }
1900 #endif
1901
1902 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1903 {
1904 /* Find the entry in phys_disk which has the given refnum
1905 * and return it's index
1906 */
1907 unsigned int i;
1908 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1909 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1910 return i;
1911 return -1;
1912 }
1913
1914 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1915 {
1916 char buf[20];
1917 struct sha1_ctx ctx;
1918 sha1_init_ctx(&ctx);
1919 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1920 sha1_finish_ctx(&ctx, buf);
1921 memcpy(uuid, buf, 4*4);
1922 }
1923
1924 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1925 {
1926 /* The uuid returned here is used for:
1927 * uuid to put into bitmap file (Create, Grow)
1928 * uuid for backup header when saving critical section (Grow)
1929 * comparing uuids when re-adding a device into an array
1930 * In these cases the uuid required is that of the data-array,
1931 * not the device-set.
1932 * uuid to recognise same set when adding a missing device back
1933 * to an array. This is a uuid for the device-set.
1934 *
1935 * For each of these we can make do with a truncated
1936 * or hashed uuid rather than the original, as long as
1937 * everyone agrees.
1938 * In the case of SVD we assume the BVD is of interest,
1939 * though that might be the case if a bitmap were made for
1940 * a mirrored SVD - worry about that later.
1941 * So we need to find the VD configuration record for the
1942 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1943 * The first 16 bytes of the sha1 of these is used.
1944 */
1945 struct ddf_super *ddf = st->sb;
1946 struct vcl *vcl = ddf->currentconf;
1947
1948 if (vcl)
1949 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1950 else
1951 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1952 }
1953
1954 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1955 {
1956 struct ddf_super *ddf = st->sb;
1957 int map_disks = info->array.raid_disks;
1958 __u32 *cptr;
1959
1960 if (ddf->currentconf) {
1961 getinfo_super_ddf_bvd(st, info, map);
1962 return;
1963 }
1964 memset(info, 0, sizeof(*info));
1965
1966 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1967 info->array.level = LEVEL_CONTAINER;
1968 info->array.layout = 0;
1969 info->array.md_minor = -1;
1970 cptr = (__u32 *)(ddf->anchor.guid + 16);
1971 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1972
1973 info->array.utime = 0;
1974 info->array.chunk_size = 0;
1975 info->container_enough = 1;
1976
1977 info->disk.major = 0;
1978 info->disk.minor = 0;
1979 if (ddf->dlist) {
1980 struct phys_disk_entry *pde = NULL;
1981 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
1982 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
1983
1984 info->data_offset = be64_to_cpu(ddf->phys->
1985 entries[info->disk.raid_disk].
1986 config_size);
1987 info->component_size = ddf->dlist->size - info->data_offset;
1988 if (info->disk.raid_disk >= 0)
1989 pde = ddf->phys->entries + info->disk.raid_disk;
1990 if (pde &&
1991 !(be16_to_cpu(pde->state) & DDF_Failed))
1992 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
1993 else
1994 info->disk.state = 1 << MD_DISK_FAULTY;
1995
1996 info->events = be32_to_cpu(ddf->active->seq);
1997 } else {
1998 info->disk.number = -1;
1999 info->disk.raid_disk = -1;
2000 // info->disk.raid_disk = find refnum in the table and use index;
2001 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2002 }
2003
2004 info->recovery_start = MaxSector;
2005 info->reshape_active = 0;
2006 info->recovery_blocked = 0;
2007 info->name[0] = 0;
2008
2009 info->array.major_version = -1;
2010 info->array.minor_version = -2;
2011 strcpy(info->text_version, "ddf");
2012 info->safe_mode_delay = 0;
2013
2014 uuid_from_super_ddf(st, info->uuid);
2015
2016 if (map) {
2017 int i;
2018 for (i = 0 ; i < map_disks; i++) {
2019 if (i < info->array.raid_disks &&
2020 !(be16_to_cpu(ddf->phys->entries[i].state)
2021 & DDF_Failed))
2022 map[i] = 1;
2023 else
2024 map[i] = 0;
2025 }
2026 }
2027 }
2028
2029 /* size of name must be at least 17 bytes! */
2030 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2031 {
2032 int j;
2033 memcpy(name, ddf->virt->entries[i].name, 16);
2034 name[16] = 0;
2035 for(j = 0; j < 16; j++)
2036 if (name[j] == ' ')
2037 name[j] = 0;
2038 }
2039
2040 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2041 {
2042 struct ddf_super *ddf = st->sb;
2043 struct vcl *vc = ddf->currentconf;
2044 int cd = ddf->currentdev;
2045 int n_prim;
2046 int j;
2047 struct dl *dl;
2048 int map_disks = info->array.raid_disks;
2049 __u32 *cptr;
2050 struct vd_config *conf;
2051
2052 memset(info, 0, sizeof(*info));
2053 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2054 return;
2055 info->array.md_minor = -1;
2056 cptr = (__u32 *)(vc->conf.guid + 16);
2057 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2058 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2059 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2060 info->custom_array_size = 0;
2061
2062 conf = &vc->conf;
2063 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2064 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2065 int ibvd = cd / n_prim - 1;
2066 cd %= n_prim;
2067 conf = vc->other_bvds[ibvd];
2068 }
2069
2070 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2071 info->data_offset =
2072 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2073 if (vc->block_sizes)
2074 info->component_size = vc->block_sizes[cd];
2075 else
2076 info->component_size = be64_to_cpu(conf->blocks);
2077
2078 for (dl = ddf->dlist; dl ; dl = dl->next)
2079 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2080 break;
2081 }
2082
2083 info->disk.major = 0;
2084 info->disk.minor = 0;
2085 info->disk.state = 0;
2086 if (dl && dl->pdnum >= 0) {
2087 info->disk.major = dl->major;
2088 info->disk.minor = dl->minor;
2089 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2090 * be16_to_cpu(conf->prim_elmnt_count);
2091 info->disk.number = dl->pdnum;
2092 info->disk.state = 0;
2093 if (info->disk.number >= 0 &&
2094 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2095 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2096 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2097 info->events = be32_to_cpu(ddf->active->seq);
2098 }
2099
2100 info->container_member = ddf->currentconf->vcnum;
2101
2102 info->recovery_start = MaxSector;
2103 info->resync_start = 0;
2104 info->reshape_active = 0;
2105 info->recovery_blocked = 0;
2106 if (!(ddf->virt->entries[info->container_member].state
2107 & DDF_state_inconsistent) &&
2108 (ddf->virt->entries[info->container_member].init_state
2109 & DDF_initstate_mask)
2110 == DDF_init_full)
2111 info->resync_start = MaxSector;
2112
2113 uuid_from_super_ddf(st, info->uuid);
2114
2115 info->array.major_version = -1;
2116 info->array.minor_version = -2;
2117 sprintf(info->text_version, "/%s/%d",
2118 st->container_devnm,
2119 info->container_member);
2120 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2121
2122 _ddf_array_name(info->name, ddf, info->container_member);
2123
2124 if (map)
2125 for (j = 0; j < map_disks; j++) {
2126 map[j] = 0;
2127 if (j < info->array.raid_disks) {
2128 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2129 if (i >= 0 &&
2130 (be16_to_cpu(ddf->phys->entries[i].state)
2131 & DDF_Online) &&
2132 !(be16_to_cpu(ddf->phys->entries[i].state)
2133 & DDF_Failed))
2134 map[i] = 1;
2135 }
2136 }
2137 }
2138
2139 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2140 char *update,
2141 char *devname, int verbose,
2142 int uuid_set, char *homehost)
2143 {
2144 /* For 'assemble' and 'force' we need to return non-zero if any
2145 * change was made. For others, the return value is ignored.
2146 * Update options are:
2147 * force-one : This device looks a bit old but needs to be included,
2148 * update age info appropriately.
2149 * assemble: clear any 'faulty' flag to allow this device to
2150 * be assembled.
2151 * force-array: Array is degraded but being forced, mark it clean
2152 * if that will be needed to assemble it.
2153 *
2154 * newdev: not used ????
2155 * grow: Array has gained a new device - this is currently for
2156 * linear only
2157 * resync: mark as dirty so a resync will happen.
2158 * uuid: Change the uuid of the array to match what is given
2159 * homehost: update the recorded homehost
2160 * name: update the name - preserving the homehost
2161 * _reshape_progress: record new reshape_progress position.
2162 *
2163 * Following are not relevant for this version:
2164 * sparc2.2 : update from old dodgey metadata
2165 * super-minor: change the preferred_minor number
2166 * summaries: update redundant counters.
2167 */
2168 int rv = 0;
2169 // struct ddf_super *ddf = st->sb;
2170 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2171 // struct virtual_entry *ve = find_ve(ddf);
2172
2173 /* we don't need to handle "force-*" or "assemble" as
2174 * there is no need to 'trick' the kernel. When the metadata is
2175 * first updated to activate the array, all the implied modifications
2176 * will just happen.
2177 */
2178
2179 if (strcmp(update, "grow") == 0) {
2180 /* FIXME */
2181 } else if (strcmp(update, "resync") == 0) {
2182 // info->resync_checkpoint = 0;
2183 } else if (strcmp(update, "homehost") == 0) {
2184 /* homehost is stored in controller->vendor_data,
2185 * or it is when we are the vendor
2186 */
2187 // if (info->vendor_is_local)
2188 // strcpy(ddf->controller.vendor_data, homehost);
2189 rv = -1;
2190 } else if (strcmp(update, "name") == 0) {
2191 /* name is stored in virtual_entry->name */
2192 // memset(ve->name, ' ', 16);
2193 // strncpy(ve->name, info->name, 16);
2194 rv = -1;
2195 } else if (strcmp(update, "_reshape_progress") == 0) {
2196 /* We don't support reshape yet */
2197 } else if (strcmp(update, "assemble") == 0 ) {
2198 /* Do nothing, just succeed */
2199 rv = 0;
2200 } else
2201 rv = -1;
2202
2203 // update_all_csum(ddf);
2204
2205 return rv;
2206 }
2207
2208 static void make_header_guid(char *guid)
2209 {
2210 be32 stamp;
2211 /* Create a DDF Header of Virtual Disk GUID */
2212
2213 /* 24 bytes of fiction required.
2214 * first 8 are a 'vendor-id' - "Linux-MD"
2215 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2216 * Remaining 8 random number plus timestamp
2217 */
2218 memcpy(guid, T10, sizeof(T10));
2219 stamp = cpu_to_be32(0xdeadbeef);
2220 memcpy(guid+8, &stamp, 4);
2221 stamp = cpu_to_be32(0);
2222 memcpy(guid+12, &stamp, 4);
2223 stamp = cpu_to_be32(time(0) - DECADE);
2224 memcpy(guid+16, &stamp, 4);
2225 stamp._v32 = random32();
2226 memcpy(guid+20, &stamp, 4);
2227 }
2228
2229 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2230 {
2231 unsigned int i;
2232 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2233 if (all_ff(ddf->virt->entries[i].guid))
2234 return i;
2235 }
2236 return DDF_NOTFOUND;
2237 }
2238
2239 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2240 const char *name)
2241 {
2242 unsigned int i;
2243 if (name == NULL)
2244 return DDF_NOTFOUND;
2245 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2246 if (all_ff(ddf->virt->entries[i].guid))
2247 continue;
2248 if (!strncmp(name, ddf->virt->entries[i].name,
2249 sizeof(ddf->virt->entries[i].name)))
2250 return i;
2251 }
2252 return DDF_NOTFOUND;
2253 }
2254
2255 #ifndef MDASSEMBLE
2256 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2257 const char *guid)
2258 {
2259 unsigned int i;
2260 if (guid == NULL || all_ff(guid))
2261 return DDF_NOTFOUND;
2262 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2263 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2264 return i;
2265 return DDF_NOTFOUND;
2266 }
2267 #endif
2268
2269 static int init_super_ddf(struct supertype *st,
2270 mdu_array_info_t *info,
2271 unsigned long long size, char *name, char *homehost,
2272 int *uuid, unsigned long long data_offset)
2273 {
2274 /* This is primarily called by Create when creating a new array.
2275 * We will then get add_to_super called for each component, and then
2276 * write_init_super called to write it out to each device.
2277 * For DDF, Create can create on fresh devices or on a pre-existing
2278 * array.
2279 * To create on a pre-existing array a different method will be called.
2280 * This one is just for fresh drives.
2281 *
2282 * We need to create the entire 'ddf' structure which includes:
2283 * DDF headers - these are easy.
2284 * Controller data - a Sector describing this controller .. not that
2285 * this is a controller exactly.
2286 * Physical Disk Record - one entry per device, so
2287 * leave plenty of space.
2288 * Virtual Disk Records - again, just leave plenty of space.
2289 * This just lists VDs, doesn't give details.
2290 * Config records - describe the VDs that use this disk
2291 * DiskData - describes 'this' device.
2292 * BadBlockManagement - empty
2293 * Diag Space - empty
2294 * Vendor Logs - Could we put bitmaps here?
2295 *
2296 */
2297 struct ddf_super *ddf;
2298 char hostname[17];
2299 int hostlen;
2300 int max_phys_disks, max_virt_disks;
2301 unsigned long long sector;
2302 int clen;
2303 int i;
2304 int pdsize, vdsize;
2305 struct phys_disk *pd;
2306 struct virtual_disk *vd;
2307
2308 if (data_offset != INVALID_SECTORS) {
2309 pr_err("data-offset not supported by DDF\n");
2310 return 0;
2311 }
2312
2313 if (st->sb)
2314 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2315 data_offset);
2316
2317 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2318 pr_err("%s could not allocate superblock\n", __func__);
2319 return 0;
2320 }
2321 memset(ddf, 0, sizeof(*ddf));
2322 st->sb = ddf;
2323
2324 if (info == NULL) {
2325 /* zeroing superblock */
2326 return 0;
2327 }
2328
2329 /* At least 32MB *must* be reserved for the ddf. So let's just
2330 * start 32MB from the end, and put the primary header there.
2331 * Don't do secondary for now.
2332 * We don't know exactly where that will be yet as it could be
2333 * different on each device. So just set up the lengths.
2334 */
2335
2336 ddf->anchor.magic = DDF_HEADER_MAGIC;
2337 make_header_guid(ddf->anchor.guid);
2338
2339 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2340 ddf->anchor.seq = cpu_to_be32(1);
2341 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2342 ddf->anchor.openflag = 0xFF;
2343 ddf->anchor.foreignflag = 0;
2344 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2345 ddf->anchor.pad0 = 0xff;
2346 memset(ddf->anchor.pad1, 0xff, 12);
2347 memset(ddf->anchor.header_ext, 0xff, 32);
2348 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2349 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2350 ddf->anchor.type = DDF_HEADER_ANCHOR;
2351 memset(ddf->anchor.pad2, 0xff, 3);
2352 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2353 /* Put this at bottom of 32M reserved.. */
2354 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2355 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2356 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2357 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2358 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2359 ddf->max_part = 64;
2360 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2361 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2362 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2363 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2364 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2365 memset(ddf->anchor.pad3, 0xff, 54);
2366 /* Controller section is one sector long immediately
2367 * after the ddf header */
2368 sector = 1;
2369 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2370 ddf->anchor.controller_section_length = cpu_to_be32(1);
2371 sector += 1;
2372
2373 /* phys is 8 sectors after that */
2374 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2375 sizeof(struct phys_disk_entry)*max_phys_disks,
2376 512);
2377 switch(pdsize/512) {
2378 case 2: case 8: case 32: case 128: case 512: break;
2379 default: abort();
2380 }
2381 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2382 ddf->anchor.phys_section_length =
2383 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2384 sector += pdsize/512;
2385
2386 /* virt is another 32 sectors */
2387 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2388 sizeof(struct virtual_entry) * max_virt_disks,
2389 512);
2390 switch(vdsize/512) {
2391 case 2: case 8: case 32: case 128: case 512: break;
2392 default: abort();
2393 }
2394 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2395 ddf->anchor.virt_section_length =
2396 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2397 sector += vdsize/512;
2398
2399 clen = ddf->conf_rec_len * (ddf->max_part+1);
2400 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2401 ddf->anchor.config_section_length = cpu_to_be32(clen);
2402 sector += clen;
2403
2404 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2405 ddf->anchor.data_section_length = cpu_to_be32(1);
2406 sector += 1;
2407
2408 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2409 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2410 ddf->anchor.diag_space_length = cpu_to_be32(0);
2411 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2412 ddf->anchor.vendor_length = cpu_to_be32(0);
2413 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2414
2415 memset(ddf->anchor.pad4, 0xff, 256);
2416
2417 memcpy(&ddf->primary, &ddf->anchor, 512);
2418 memcpy(&ddf->secondary, &ddf->anchor, 512);
2419
2420 ddf->primary.openflag = 1; /* I guess.. */
2421 ddf->primary.type = DDF_HEADER_PRIMARY;
2422
2423 ddf->secondary.openflag = 1; /* I guess.. */
2424 ddf->secondary.type = DDF_HEADER_SECONDARY;
2425
2426 ddf->active = &ddf->primary;
2427
2428 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2429
2430 /* 24 more bytes of fiction required.
2431 * first 8 are a 'vendor-id' - "Linux-MD"
2432 * Remaining 16 are serial number.... maybe a hostname would do?
2433 */
2434 memcpy(ddf->controller.guid, T10, sizeof(T10));
2435 gethostname(hostname, sizeof(hostname));
2436 hostname[sizeof(hostname) - 1] = 0;
2437 hostlen = strlen(hostname);
2438 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2439 for (i = strlen(T10) ; i+hostlen < 24; i++)
2440 ddf->controller.guid[i] = ' ';
2441
2442 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2443 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2444 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2445 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2446 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2447 memset(ddf->controller.pad, 0xff, 8);
2448 memset(ddf->controller.vendor_data, 0xff, 448);
2449 if (homehost && strlen(homehost) < 440)
2450 strcpy((char*)ddf->controller.vendor_data, homehost);
2451
2452 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2453 pr_err("%s could not allocate pd\n", __func__);
2454 return 0;
2455 }
2456 ddf->phys = pd;
2457 ddf->pdsize = pdsize;
2458
2459 memset(pd, 0xff, pdsize);
2460 memset(pd, 0, sizeof(*pd));
2461 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2462 pd->used_pdes = cpu_to_be16(0);
2463 pd->max_pdes = cpu_to_be16(max_phys_disks);
2464 memset(pd->pad, 0xff, 52);
2465 for (i = 0; i < max_phys_disks; i++)
2466 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2467
2468 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2469 pr_err("%s could not allocate vd\n", __func__);
2470 return 0;
2471 }
2472 ddf->virt = vd;
2473 ddf->vdsize = vdsize;
2474 memset(vd, 0, vdsize);
2475 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2476 vd->populated_vdes = cpu_to_be16(0);
2477 vd->max_vdes = cpu_to_be16(max_virt_disks);
2478 memset(vd->pad, 0xff, 52);
2479
2480 for (i=0; i<max_virt_disks; i++)
2481 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2482
2483 st->sb = ddf;
2484 ddf_set_updates_pending(ddf);
2485 return 1;
2486 }
2487
2488 static int chunk_to_shift(int chunksize)
2489 {
2490 return ffs(chunksize/512)-1;
2491 }
2492
2493 #ifndef MDASSEMBLE
2494 struct extent {
2495 unsigned long long start, size;
2496 };
2497 static int cmp_extent(const void *av, const void *bv)
2498 {
2499 const struct extent *a = av;
2500 const struct extent *b = bv;
2501 if (a->start < b->start)
2502 return -1;
2503 if (a->start > b->start)
2504 return 1;
2505 return 0;
2506 }
2507
2508 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2509 {
2510 /* Find a list of used extents on the give physical device
2511 * (dnum) of the given ddf.
2512 * Return a malloced array of 'struct extent'
2513 */
2514 struct extent *rv;
2515 int n = 0;
2516 unsigned int i;
2517 __u16 state;
2518
2519 if (dl->pdnum < 0)
2520 return NULL;
2521 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2522
2523 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2524 return NULL;
2525
2526 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2527
2528 for (i = 0; i < ddf->max_part; i++) {
2529 const struct vd_config *bvd;
2530 unsigned int ibvd;
2531 struct vcl *v = dl->vlist[i];
2532 if (v == NULL ||
2533 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2534 &bvd, &ibvd) == DDF_NOTFOUND)
2535 continue;
2536 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2537 rv[n].size = be64_to_cpu(bvd->blocks);
2538 n++;
2539 }
2540 qsort(rv, n, sizeof(*rv), cmp_extent);
2541
2542 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2543 rv[n].size = 0;
2544 return rv;
2545 }
2546 #endif
2547
2548 static int init_super_ddf_bvd(struct supertype *st,
2549 mdu_array_info_t *info,
2550 unsigned long long size,
2551 char *name, char *homehost,
2552 int *uuid, unsigned long long data_offset)
2553 {
2554 /* We are creating a BVD inside a pre-existing container.
2555 * so st->sb is already set.
2556 * We need to create a new vd_config and a new virtual_entry
2557 */
2558 struct ddf_super *ddf = st->sb;
2559 unsigned int venum, i;
2560 struct virtual_entry *ve;
2561 struct vcl *vcl;
2562 struct vd_config *vc;
2563
2564 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2565 pr_err("This ddf already has an array called %s\n", name);
2566 return 0;
2567 }
2568 venum = find_unused_vde(ddf);
2569 if (venum == DDF_NOTFOUND) {
2570 pr_err("Cannot find spare slot for virtual disk\n");
2571 return 0;
2572 }
2573 ve = &ddf->virt->entries[venum];
2574
2575 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2576 * timestamp, random number
2577 */
2578 make_header_guid(ve->guid);
2579 ve->unit = cpu_to_be16(info->md_minor);
2580 ve->pad0 = 0xFFFF;
2581 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2582 DDF_GUID_LEN);
2583 ve->type = cpu_to_be16(0);
2584 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2585 if (info->state & 1) /* clean */
2586 ve->init_state = DDF_init_full;
2587 else
2588 ve->init_state = DDF_init_not;
2589
2590 memset(ve->pad1, 0xff, 14);
2591 memset(ve->name, ' ', 16);
2592 if (name)
2593 strncpy(ve->name, name, 16);
2594 ddf->virt->populated_vdes =
2595 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2596
2597 /* Now create a new vd_config */
2598 if (posix_memalign((void**)&vcl, 512,
2599 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2600 pr_err("%s could not allocate vd_config\n", __func__);
2601 return 0;
2602 }
2603 vcl->vcnum = venum;
2604 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2605 vc = &vcl->conf;
2606
2607 vc->magic = DDF_VD_CONF_MAGIC;
2608 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2609 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2610 vc->seqnum = cpu_to_be32(1);
2611 memset(vc->pad0, 0xff, 24);
2612 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2613 if (layout_md2ddf(info, vc) == -1 ||
2614 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2615 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2616 __func__, info->level, info->layout, info->raid_disks);
2617 free(vcl);
2618 return 0;
2619 }
2620 vc->sec_elmnt_seq = 0;
2621 if (alloc_other_bvds(ddf, vcl) != 0) {
2622 pr_err("%s could not allocate other bvds\n",
2623 __func__);
2624 free(vcl);
2625 return 0;
2626 }
2627 vc->blocks = cpu_to_be64(info->size * 2);
2628 vc->array_blocks = cpu_to_be64(
2629 calc_array_size(info->level, info->raid_disks, info->layout,
2630 info->chunk_size, info->size*2));
2631 memset(vc->pad1, 0xff, 8);
2632 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2633 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2634 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2635 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2636 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2637 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2638 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2639 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2640 memset(vc->cache_pol, 0, 8);
2641 vc->bg_rate = 0x80;
2642 memset(vc->pad2, 0xff, 3);
2643 memset(vc->pad3, 0xff, 52);
2644 memset(vc->pad4, 0xff, 192);
2645 memset(vc->v0, 0xff, 32);
2646 memset(vc->v1, 0xff, 32);
2647 memset(vc->v2, 0xff, 16);
2648 memset(vc->v3, 0xff, 16);
2649 memset(vc->vendor, 0xff, 32);
2650
2651 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2652 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2653
2654 for (i = 1; i < vc->sec_elmnt_count; i++) {
2655 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2656 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2657 }
2658
2659 vcl->next = ddf->conflist;
2660 ddf->conflist = vcl;
2661 ddf->currentconf = vcl;
2662 ddf_set_updates_pending(ddf);
2663 return 1;
2664 }
2665
2666 #ifndef MDASSEMBLE
2667 static void add_to_super_ddf_bvd(struct supertype *st,
2668 mdu_disk_info_t *dk, int fd, char *devname)
2669 {
2670 /* fd and devname identify a device within the ddf container (st).
2671 * dk identifies a location in the new BVD.
2672 * We need to find suitable free space in that device and update
2673 * the phys_refnum and lba_offset for the newly created vd_config.
2674 * We might also want to update the type in the phys_disk
2675 * section.
2676 *
2677 * Alternately: fd == -1 and we have already chosen which device to
2678 * use and recorded in dlist->raid_disk;
2679 */
2680 struct dl *dl;
2681 struct ddf_super *ddf = st->sb;
2682 struct vd_config *vc;
2683 unsigned int i;
2684 unsigned long long blocks, pos, esize;
2685 struct extent *ex;
2686 unsigned int raid_disk = dk->raid_disk;
2687
2688 if (fd == -1) {
2689 for (dl = ddf->dlist; dl ; dl = dl->next)
2690 if (dl->raiddisk == dk->raid_disk)
2691 break;
2692 } else {
2693 for (dl = ddf->dlist; dl ; dl = dl->next)
2694 if (dl->major == dk->major &&
2695 dl->minor == dk->minor)
2696 break;
2697 }
2698 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2699 return;
2700
2701 vc = &ddf->currentconf->conf;
2702 if (vc->sec_elmnt_count > 1) {
2703 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2704 if (raid_disk >= n)
2705 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2706 raid_disk %= n;
2707 }
2708
2709 ex = get_extents(ddf, dl);
2710 if (!ex)
2711 return;
2712
2713 i = 0; pos = 0;
2714 blocks = be64_to_cpu(vc->blocks);
2715 if (ddf->currentconf->block_sizes)
2716 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2717
2718 /* First-fit */
2719 do {
2720 esize = ex[i].start - pos;
2721 if (esize >= blocks)
2722 break;
2723 pos = ex[i].start + ex[i].size;
2724 i++;
2725 } while (ex[i-1].size);
2726
2727 free(ex);
2728 if (esize < blocks)
2729 return;
2730
2731 ddf->currentdev = dk->raid_disk;
2732 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2733 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2734
2735 for (i = 0; i < ddf->max_part ; i++)
2736 if (dl->vlist[i] == NULL)
2737 break;
2738 if (i == ddf->max_part)
2739 return;
2740 dl->vlist[i] = ddf->currentconf;
2741
2742 if (fd >= 0)
2743 dl->fd = fd;
2744 if (devname)
2745 dl->devname = devname;
2746
2747 /* Check if we can mark array as optimal yet */
2748 i = ddf->currentconf->vcnum;
2749 ddf->virt->entries[i].state =
2750 (ddf->virt->entries[i].state & ~DDF_state_mask)
2751 | get_svd_state(ddf, ddf->currentconf);
2752 be16_clear(ddf->phys->entries[dl->pdnum].type,
2753 cpu_to_be16(DDF_Global_Spare));
2754 be16_set(ddf->phys->entries[dl->pdnum].type,
2755 cpu_to_be16(DDF_Active_in_VD));
2756 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2757 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2758 ddf->currentconf->vcnum, guid_str(vc->guid),
2759 dk->raid_disk);
2760 ddf_set_updates_pending(ddf);
2761 }
2762
2763 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2764 {
2765 unsigned int i;
2766 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2767 if (all_ff(ddf->phys->entries[i].guid))
2768 return i;
2769 }
2770 return DDF_NOTFOUND;
2771 }
2772
2773 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2774 {
2775 __u64 cfs, t;
2776 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2777 t = be64_to_cpu(dl->secondary_lba);
2778 if (t != ~(__u64)0)
2779 cfs = min(cfs, t);
2780 /*
2781 * Some vendor DDF structures interpret workspace_lba
2782 * very differently than we do: Make a sanity check on the value.
2783 */
2784 t = be64_to_cpu(dl->workspace_lba);
2785 if (t < cfs) {
2786 __u64 wsp = cfs - t;
2787 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2788 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2789 __func__, dl->major, dl->minor, wsp);
2790 } else
2791 cfs = t;
2792 }
2793 pde->config_size = cpu_to_be64(cfs);
2794 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2795 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2796 }
2797
2798 /* Add a device to a container, either while creating it or while
2799 * expanding a pre-existing container
2800 */
2801 static int add_to_super_ddf(struct supertype *st,
2802 mdu_disk_info_t *dk, int fd, char *devname,
2803 unsigned long long data_offset)
2804 {
2805 struct ddf_super *ddf = st->sb;
2806 struct dl *dd;
2807 time_t now;
2808 struct tm *tm;
2809 unsigned long long size;
2810 struct phys_disk_entry *pde;
2811 unsigned int n, i;
2812 struct stat stb;
2813 __u32 *tptr;
2814
2815 if (ddf->currentconf) {
2816 add_to_super_ddf_bvd(st, dk, fd, devname);
2817 return 0;
2818 }
2819
2820 /* This is device numbered dk->number. We need to create
2821 * a phys_disk entry and a more detailed disk_data entry.
2822 */
2823 fstat(fd, &stb);
2824 n = find_unused_pde(ddf);
2825 if (n == DDF_NOTFOUND) {
2826 pr_err("%s: No free slot in array, cannot add disk\n",
2827 __func__);
2828 return 1;
2829 }
2830 pde = &ddf->phys->entries[n];
2831 get_dev_size(fd, NULL, &size);
2832 if (size <= 32*1024*1024) {
2833 pr_err("%s: device size must be at least 32MB\n",
2834 __func__);
2835 return 1;
2836 }
2837 size >>= 9;
2838
2839 if (posix_memalign((void**)&dd, 512,
2840 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2841 pr_err("%s could allocate buffer for new disk, aborting\n",
2842 __func__);
2843 return 1;
2844 }
2845 dd->major = major(stb.st_rdev);
2846 dd->minor = minor(stb.st_rdev);
2847 dd->devname = devname;
2848 dd->fd = fd;
2849 dd->spare = NULL;
2850
2851 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2852 now = time(0);
2853 tm = localtime(&now);
2854 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2855 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2856 tptr = (__u32 *)(dd->disk.guid + 16);
2857 *tptr++ = random32();
2858 *tptr = random32();
2859
2860 do {
2861 /* Cannot be bothered finding a CRC of some irrelevant details*/
2862 dd->disk.refnum._v32 = random32();
2863 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2864 i > 0; i--)
2865 if (be32_eq(ddf->phys->entries[i-1].refnum,
2866 dd->disk.refnum))
2867 break;
2868 } while (i > 0);
2869
2870 dd->disk.forced_ref = 1;
2871 dd->disk.forced_guid = 1;
2872 memset(dd->disk.vendor, ' ', 32);
2873 memcpy(dd->disk.vendor, "Linux", 5);
2874 memset(dd->disk.pad, 0xff, 442);
2875 for (i = 0; i < ddf->max_part ; i++)
2876 dd->vlist[i] = NULL;
2877
2878 dd->pdnum = n;
2879
2880 if (st->update_tail) {
2881 int len = (sizeof(struct phys_disk) +
2882 sizeof(struct phys_disk_entry));
2883 struct phys_disk *pd;
2884
2885 pd = xmalloc(len);
2886 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2887 pd->used_pdes = cpu_to_be16(n);
2888 pde = &pd->entries[0];
2889 dd->mdupdate = pd;
2890 } else
2891 ddf->phys->used_pdes = cpu_to_be16(
2892 1 + be16_to_cpu(ddf->phys->used_pdes));
2893
2894 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2895 pde->refnum = dd->disk.refnum;
2896 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2897 pde->state = cpu_to_be16(DDF_Online);
2898 dd->size = size;
2899 /*
2900 * If there is already a device in dlist, try to reserve the same
2901 * amount of workspace. Otherwise, use 32MB.
2902 * We checked disk size above already.
2903 */
2904 #define __calc_lba(new, old, lba, mb) do { \
2905 unsigned long long dif; \
2906 if ((old) != NULL) \
2907 dif = (old)->size - be64_to_cpu((old)->lba); \
2908 else \
2909 dif = (new)->size; \
2910 if ((new)->size > dif) \
2911 (new)->lba = cpu_to_be64((new)->size - dif); \
2912 else \
2913 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2914 } while (0)
2915 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2916 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2917 if (ddf->dlist == NULL ||
2918 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2919 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2920 _set_config_size(pde, dd);
2921
2922 sprintf(pde->path, "%17.17s","Information: nil") ;
2923 memset(pde->pad, 0xff, 6);
2924
2925 if (st->update_tail) {
2926 dd->next = ddf->add_list;
2927 ddf->add_list = dd;
2928 } else {
2929 dd->next = ddf->dlist;
2930 ddf->dlist = dd;
2931 ddf_set_updates_pending(ddf);
2932 }
2933
2934 return 0;
2935 }
2936
2937 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2938 {
2939 struct ddf_super *ddf = st->sb;
2940 struct dl *dl;
2941
2942 /* mdmon has noticed that this disk (dk->major/dk->minor) has
2943 * disappeared from the container.
2944 * We need to arrange that it disappears from the metadata and
2945 * internal data structures too.
2946 * Most of the work is done by ddf_process_update which edits
2947 * the metadata and closes the file handle and attaches the memory
2948 * where free_updates will free it.
2949 */
2950 for (dl = ddf->dlist; dl ; dl = dl->next)
2951 if (dl->major == dk->major &&
2952 dl->minor == dk->minor)
2953 break;
2954 if (!dl || dl->pdnum < 0)
2955 return -1;
2956
2957 if (st->update_tail) {
2958 int len = (sizeof(struct phys_disk) +
2959 sizeof(struct phys_disk_entry));
2960 struct phys_disk *pd;
2961
2962 pd = xmalloc(len);
2963 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2964 pd->used_pdes = cpu_to_be16(dl->pdnum);
2965 pd->entries[0].state = cpu_to_be16(DDF_Missing);
2966 append_metadata_update(st, pd, len);
2967 }
2968 return 0;
2969 }
2970 #endif
2971
2972 /*
2973 * This is the write_init_super method for a ddf container. It is
2974 * called when creating a container or adding another device to a
2975 * container.
2976 */
2977
2978 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
2979 {
2980 unsigned long long sector;
2981 struct ddf_header *header;
2982 int fd, i, n_config, conf_size, buf_size;
2983 int ret = 0;
2984 char *conf;
2985
2986 fd = d->fd;
2987
2988 switch (type) {
2989 case DDF_HEADER_PRIMARY:
2990 header = &ddf->primary;
2991 sector = be64_to_cpu(header->primary_lba);
2992 break;
2993 case DDF_HEADER_SECONDARY:
2994 header = &ddf->secondary;
2995 sector = be64_to_cpu(header->secondary_lba);
2996 break;
2997 default:
2998 return 0;
2999 }
3000 if (sector == ~(__u64)0)
3001 return 0;
3002
3003 header->type = type;
3004 header->openflag = 1;
3005 header->crc = calc_crc(header, 512);
3006
3007 lseek64(fd, sector<<9, 0);
3008 if (write(fd, header, 512) < 0)
3009 goto out;
3010
3011 ddf->controller.crc = calc_crc(&ddf->controller, 512);
3012 if (write(fd, &ddf->controller, 512) < 0)
3013 goto out;
3014
3015 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
3016 if (write(fd, ddf->phys, ddf->pdsize) < 0)
3017 goto out;
3018 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
3019 if (write(fd, ddf->virt, ddf->vdsize) < 0)
3020 goto out;
3021
3022 /* Now write lots of config records. */
3023 n_config = ddf->max_part;
3024 conf_size = ddf->conf_rec_len * 512;
3025 conf = ddf->conf;
3026 buf_size = conf_size * (n_config + 1);
3027 if (!conf) {
3028 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
3029 goto out;
3030 ddf->conf = conf;
3031 }
3032 for (i = 0 ; i <= n_config ; i++) {
3033 struct vcl *c;
3034 struct vd_config *vdc = NULL;
3035 if (i == n_config) {
3036 c = (struct vcl *)d->spare;
3037 if (c)
3038 vdc = &c->conf;
3039 } else {
3040 unsigned int dummy;
3041 c = d->vlist[i];
3042 if (c)
3043 get_pd_index_from_refnum(
3044 c, d->disk.refnum,
3045 ddf->mppe,
3046 (const struct vd_config **)&vdc,
3047 &dummy);
3048 }
3049 if (vdc) {
3050 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3051 i, be32_to_cpu(d->disk.refnum),
3052 guid_str(vdc->guid),
3053 vdc->sec_elmnt_seq);
3054 vdc->seqnum = header->seq;
3055 vdc->crc = calc_crc(vdc, conf_size);
3056 memcpy(conf + i*conf_size, vdc, conf_size);
3057 } else
3058 memset(conf + i*conf_size, 0xff, conf_size);
3059 }
3060 if (write(fd, conf, buf_size) != buf_size)
3061 goto out;
3062
3063 d->disk.crc = calc_crc(&d->disk, 512);
3064 if (write(fd, &d->disk, 512) < 0)
3065 goto out;
3066
3067 ret = 1;
3068 out:
3069 header->openflag = 0;
3070 header->crc = calc_crc(header, 512);
3071
3072 lseek64(fd, sector<<9, 0);
3073 if (write(fd, header, 512) < 0)
3074 ret = 0;
3075
3076 return ret;
3077 }
3078
3079 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3080 {
3081 unsigned long long size;
3082 int fd = d->fd;
3083 if (fd < 0)
3084 return 0;
3085
3086 /* We need to fill in the primary, (secondary) and workspace
3087 * lba's in the headers, set their checksums,
3088 * Also checksum phys, virt....
3089 *
3090 * Then write everything out, finally the anchor is written.
3091 */
3092 get_dev_size(fd, NULL, &size);
3093 size /= 512;
3094 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3095 ddf->anchor.workspace_lba = d->workspace_lba;
3096 else
3097 ddf->anchor.workspace_lba =
3098 cpu_to_be64(size - 32*1024*2);
3099 if (be64_to_cpu(d->primary_lba) != 0ULL)
3100 ddf->anchor.primary_lba = d->primary_lba;
3101 else
3102 ddf->anchor.primary_lba =
3103 cpu_to_be64(size - 16*1024*2);
3104 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3105 ddf->anchor.secondary_lba = d->secondary_lba;
3106 else
3107 ddf->anchor.secondary_lba =
3108 cpu_to_be64(size - 32*1024*2);
3109 ddf->anchor.seq = ddf->active->seq;
3110 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
3111 memcpy(&ddf->primary, &ddf->anchor, 512);
3112 memcpy(&ddf->secondary, &ddf->anchor, 512);
3113
3114 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3115 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3116 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3117
3118 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3119 return 0;
3120
3121 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3122 return 0;
3123
3124 lseek64(fd, (size-1)*512, SEEK_SET);
3125 if (write(fd, &ddf->anchor, 512) < 0)
3126 return 0;
3127
3128 return 1;
3129 }
3130
3131 #ifndef MDASSEMBLE
3132 static int __write_init_super_ddf(struct supertype *st)
3133 {
3134 struct ddf_super *ddf = st->sb;
3135 struct dl *d;
3136 int attempts = 0;
3137 int successes = 0;
3138
3139 pr_state(ddf, __func__);
3140
3141 /* try to write updated metadata,
3142 * if we catch a failure move on to the next disk
3143 */
3144 for (d = ddf->dlist; d; d=d->next) {
3145 attempts++;
3146 successes += _write_super_to_disk(ddf, d);
3147 }
3148
3149 return attempts != successes;
3150 }
3151
3152 static int write_init_super_ddf(struct supertype *st)
3153 {
3154 struct ddf_super *ddf = st->sb;
3155 struct vcl *currentconf = ddf->currentconf;
3156
3157 /* We are done with currentconf - reset it so st refers to the container */
3158 ddf->currentconf = NULL;
3159
3160 if (st->update_tail) {
3161 /* queue the virtual_disk and vd_config as metadata updates */
3162 struct virtual_disk *vd;
3163 struct vd_config *vc;
3164 int len, tlen;
3165 unsigned int i;
3166
3167 if (!currentconf) {
3168 /* Must be adding a physical disk to the container */
3169 int len = (sizeof(struct phys_disk) +
3170 sizeof(struct phys_disk_entry));
3171
3172 /* adding a disk to the container. */
3173 if (!ddf->add_list)
3174 return 0;
3175
3176 append_metadata_update(st, ddf->add_list->mdupdate, len);
3177 ddf->add_list->mdupdate = NULL;
3178 return 0;
3179 }
3180
3181 /* Newly created VD */
3182
3183 /* First the virtual disk. We have a slightly fake header */
3184 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3185 vd = xmalloc(len);
3186 *vd = *ddf->virt;
3187 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3188 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3189 append_metadata_update(st, vd, len);
3190
3191 /* Then the vd_config */
3192 len = ddf->conf_rec_len * 512;
3193 tlen = len * currentconf->conf.sec_elmnt_count;
3194 vc = xmalloc(tlen);
3195 memcpy(vc, &currentconf->conf, len);
3196 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3197 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3198 len);
3199 append_metadata_update(st, vc, tlen);
3200
3201 /* FIXME I need to close the fds! */
3202 return 0;
3203 } else {
3204 struct dl *d;
3205 if (!currentconf)
3206 for (d = ddf->dlist; d; d=d->next)
3207 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3208 return __write_init_super_ddf(st);
3209 }
3210 }
3211
3212 #endif
3213
3214 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3215 unsigned long long data_offset)
3216 {
3217 /* We must reserve the last 32Meg */
3218 if (devsize <= 32*1024*2)
3219 return 0;
3220 return devsize - 32*1024*2;
3221 }
3222
3223 #ifndef MDASSEMBLE
3224
3225 static int reserve_space(struct supertype *st, int raiddisks,
3226 unsigned long long size, int chunk,
3227 unsigned long long *freesize)
3228 {
3229 /* Find 'raiddisks' spare extents at least 'size' big (but
3230 * only caring about multiples of 'chunk') and remember
3231 * them. If size==0, find the largest size possible.
3232 * Report available size in *freesize
3233 * If space cannot be found, fail.
3234 */
3235 struct dl *dl;
3236 struct ddf_super *ddf = st->sb;
3237 int cnt = 0;
3238
3239 for (dl = ddf->dlist; dl ; dl=dl->next) {
3240 dl->raiddisk = -1;
3241 dl->esize = 0;
3242 }
3243 /* Now find largest extent on each device */
3244 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3245 struct extent *e = get_extents(ddf, dl);
3246 unsigned long long pos = 0;
3247 int i = 0;
3248 int found = 0;
3249 unsigned long long minsize = size;
3250
3251 if (size == 0)
3252 minsize = chunk;
3253
3254 if (!e)
3255 continue;
3256 do {
3257 unsigned long long esize;
3258 esize = e[i].start - pos;
3259 if (esize >= minsize) {
3260 found = 1;
3261 minsize = esize;
3262 }
3263 pos = e[i].start + e[i].size;
3264 i++;
3265 } while (e[i-1].size);
3266 if (found) {
3267 cnt++;
3268 dl->esize = minsize;
3269 }
3270 free(e);
3271 }
3272 if (cnt < raiddisks) {
3273 pr_err("not enough devices with space to create array.\n");
3274 return 0; /* No enough free spaces large enough */
3275 }
3276 if (size == 0) {
3277 /* choose the largest size of which there are at least 'raiddisk' */
3278 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3279 struct dl *dl2;
3280 if (dl->esize <= size)
3281 continue;
3282 /* This is bigger than 'size', see if there are enough */
3283 cnt = 0;
3284 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3285 if (dl2->esize >= dl->esize)
3286 cnt++;
3287 if (cnt >= raiddisks)
3288 size = dl->esize;
3289 }
3290 if (chunk) {
3291 size = size / chunk;
3292 size *= chunk;
3293 }
3294 *freesize = size;
3295 if (size < 32) {
3296 pr_err("not enough spare devices to create array.\n");
3297 return 0;
3298 }
3299 }
3300 /* We have a 'size' of which there are enough spaces.
3301 * We simply do a first-fit */
3302 cnt = 0;
3303 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3304 if (dl->esize < size)
3305 continue;
3306
3307 dl->raiddisk = cnt;
3308 cnt++;
3309 }
3310 return 1;
3311 }
3312
3313 static int validate_geometry_ddf(struct supertype *st,
3314 int level, int layout, int raiddisks,
3315 int *chunk, unsigned long long size,
3316 unsigned long long data_offset,
3317 char *dev, unsigned long long *freesize,
3318 int verbose)
3319 {
3320 int fd;
3321 struct mdinfo *sra;
3322 int cfd;
3323
3324 /* ddf potentially supports lots of things, but it depends on
3325 * what devices are offered (and maybe kernel version?)
3326 * If given unused devices, we will make a container.
3327 * If given devices in a container, we will make a BVD.
3328 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3329 */
3330
3331 if (*chunk == UnSet)
3332 *chunk = DEFAULT_CHUNK;
3333
3334 if (level == LEVEL_NONE)
3335 level = LEVEL_CONTAINER;
3336 if (level == LEVEL_CONTAINER) {
3337 /* Must be a fresh device to add to a container */
3338 return validate_geometry_ddf_container(st, level, layout,
3339 raiddisks, *chunk,
3340 size, data_offset, dev,
3341 freesize,
3342 verbose);
3343 }
3344
3345 if (!dev) {
3346 mdu_array_info_t array = {
3347 .level = level,
3348 .layout = layout,
3349 .raid_disks = raiddisks
3350 };
3351 struct vd_config conf;
3352 if (layout_md2ddf(&array, &conf) == -1) {
3353 if (verbose)
3354 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3355 level, layout, raiddisks);
3356 return 0;
3357 }
3358 /* Should check layout? etc */
3359
3360 if (st->sb && freesize) {
3361 /* --create was given a container to create in.
3362 * So we need to check that there are enough
3363 * free spaces and return the amount of space.
3364 * We may as well remember which drives were
3365 * chosen so that add_to_super/getinfo_super
3366 * can return them.
3367 */
3368 return reserve_space(st, raiddisks, size, *chunk, freesize);
3369 }
3370 return 1;
3371 }
3372
3373 if (st->sb) {
3374 /* A container has already been opened, so we are
3375 * creating in there. Maybe a BVD, maybe an SVD.
3376 * Should make a distinction one day.
3377 */
3378 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3379 chunk, size, data_offset, dev,
3380 freesize,
3381 verbose);
3382 }
3383 /* This is the first device for the array.
3384 * If it is a container, we read it in and do automagic allocations,
3385 * no other devices should be given.
3386 * Otherwise it must be a member device of a container, and we
3387 * do manual allocation.
3388 * Later we should check for a BVD and make an SVD.
3389 */
3390 fd = open(dev, O_RDONLY|O_EXCL, 0);
3391 if (fd >= 0) {
3392 sra = sysfs_read(fd, NULL, GET_VERSION);
3393 close(fd);
3394 if (sra && sra->array.major_version == -1 &&
3395 strcmp(sra->text_version, "ddf") == 0) {
3396 /* load super */
3397 /* find space for 'n' devices. */
3398 /* remember the devices */
3399 /* Somehow return the fact that we have enough */
3400 }
3401
3402 if (verbose)
3403 pr_err("ddf: Cannot create this array "
3404 "on device %s - a container is required.\n",
3405 dev);
3406 return 0;
3407 }
3408 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3409 if (verbose)
3410 pr_err("ddf: Cannot open %s: %s\n",
3411 dev, strerror(errno));
3412 return 0;
3413 }
3414 /* Well, it is in use by someone, maybe a 'ddf' container. */
3415 cfd = open_container(fd);
3416 if (cfd < 0) {
3417 close(fd);
3418 if (verbose)
3419 pr_err("ddf: Cannot use %s: %s\n",
3420 dev, strerror(EBUSY));
3421 return 0;
3422 }
3423 sra = sysfs_read(cfd, NULL, GET_VERSION);
3424 close(fd);
3425 if (sra && sra->array.major_version == -1 &&
3426 strcmp(sra->text_version, "ddf") == 0) {
3427 /* This is a member of a ddf container. Load the container
3428 * and try to create a bvd
3429 */
3430 struct ddf_super *ddf;
3431 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3432 st->sb = ddf;
3433 strcpy(st->container_devnm, fd2devnm(cfd));
3434 close(cfd);
3435 return validate_geometry_ddf_bvd(st, level, layout,
3436 raiddisks, chunk, size,
3437 data_offset,
3438 dev, freesize,
3439 verbose);
3440 }
3441 close(cfd);
3442 } else /* device may belong to a different container */
3443 return 0;
3444
3445 return 1;
3446 }
3447
3448 static int
3449 validate_geometry_ddf_container(struct supertype *st,
3450 int level, int layout, int raiddisks,
3451 int chunk, unsigned long long size,
3452 unsigned long long data_offset,
3453 char *dev, unsigned long long *freesize,
3454 int verbose)
3455 {
3456 int fd;
3457 unsigned long long ldsize;
3458
3459 if (level != LEVEL_CONTAINER)
3460 return 0;
3461 if (!dev)
3462 return 1;
3463
3464 fd = open(dev, O_RDONLY|O_EXCL, 0);
3465 if (fd < 0) {
3466 if (verbose)
3467 pr_err("ddf: Cannot open %s: %s\n",
3468 dev, strerror(errno));
3469 return 0;
3470 }
3471 if (!get_dev_size(fd, dev, &ldsize)) {
3472 close(fd);
3473 return 0;
3474 }
3475 close(fd);
3476
3477 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3478 if (*freesize == 0)
3479 return 0;
3480
3481 return 1;
3482 }
3483
3484 static int validate_geometry_ddf_bvd(struct supertype *st,
3485 int level, int layout, int raiddisks,
3486 int *chunk, unsigned long long size,
3487 unsigned long long data_offset,
3488 char *dev, unsigned long long *freesize,
3489 int verbose)
3490 {
3491 struct stat stb;
3492 struct ddf_super *ddf = st->sb;
3493 struct dl *dl;
3494 unsigned long long pos = 0;
3495 unsigned long long maxsize;
3496 struct extent *e;
3497 int i;
3498 /* ddf/bvd supports lots of things, but not containers */
3499 if (level == LEVEL_CONTAINER) {
3500 if (verbose)
3501 pr_err("DDF cannot create a container within an container\n");
3502 return 0;
3503 }
3504 /* We must have the container info already read in. */
3505 if (!ddf)
3506 return 0;
3507
3508 if (!dev) {
3509 /* General test: make sure there is space for
3510 * 'raiddisks' device extents of size 'size'.
3511 */
3512 unsigned long long minsize = size;
3513 int dcnt = 0;
3514 if (minsize == 0)
3515 minsize = 8;
3516 for (dl = ddf->dlist; dl ; dl = dl->next) {
3517 int found = 0;
3518 pos = 0;
3519
3520 i = 0;
3521 e = get_extents(ddf, dl);
3522 if (!e) continue;
3523 do {
3524 unsigned long long esize;
3525 esize = e[i].start - pos;
3526 if (esize >= minsize)
3527 found = 1;
3528 pos = e[i].start + e[i].size;
3529 i++;
3530 } while (e[i-1].size);
3531 if (found)
3532 dcnt++;
3533 free(e);
3534 }
3535 if (dcnt < raiddisks) {
3536 if (verbose)
3537 pr_err("ddf: Not enough devices with "
3538 "space for this array (%d < %d)\n",
3539 dcnt, raiddisks);
3540 return 0;
3541 }
3542 return 1;
3543 }
3544 /* This device must be a member of the set */
3545 if (stat(dev, &stb) < 0)
3546 return 0;
3547 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3548 return 0;
3549 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3550 if (dl->major == (int)major(stb.st_rdev) &&
3551 dl->minor == (int)minor(stb.st_rdev))
3552 break;
3553 }
3554 if (!dl) {
3555 if (verbose)
3556 pr_err("ddf: %s is not in the "
3557 "same DDF set\n",
3558 dev);
3559 return 0;
3560 }
3561 e = get_extents(ddf, dl);
3562 maxsize = 0;
3563 i = 0;
3564 if (e)
3565 do {
3566 unsigned long long esize;
3567 esize = e[i].start - pos;
3568 if (esize >= maxsize)
3569 maxsize = esize;
3570 pos = e[i].start + e[i].size;
3571 i++;
3572 } while (e[i-1].size);
3573 *freesize = maxsize;
3574 // FIXME here I am
3575
3576 return 1;
3577 }
3578
3579 static int load_super_ddf_all(struct supertype *st, int fd,
3580 void **sbp, char *devname)
3581 {
3582 struct mdinfo *sra;
3583 struct ddf_super *super;
3584 struct mdinfo *sd, *best = NULL;
3585 int bestseq = 0;
3586 int seq;
3587 char nm[20];
3588 int dfd;
3589
3590 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3591 if (!sra)
3592 return 1;
3593 if (sra->array.major_version != -1 ||
3594 sra->array.minor_version != -2 ||
3595 strcmp(sra->text_version, "ddf") != 0)
3596 return 1;
3597
3598 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3599 return 1;
3600 memset(super, 0, sizeof(*super));
3601
3602 /* first, try each device, and choose the best ddf */
3603 for (sd = sra->devs ; sd ; sd = sd->next) {
3604 int rv;
3605 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3606 dfd = dev_open(nm, O_RDONLY);
3607 if (dfd < 0)
3608 return 2;
3609 rv = load_ddf_headers(dfd, super, NULL);
3610 close(dfd);
3611 if (rv == 0) {
3612 seq = be32_to_cpu(super->active->seq);
3613 if (super->active->openflag)
3614 seq--;
3615 if (!best || seq > bestseq) {
3616 bestseq = seq;
3617 best = sd;
3618 }
3619 }
3620 }
3621 if (!best)
3622 return 1;
3623 /* OK, load this ddf */
3624 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3625 dfd = dev_open(nm, O_RDONLY);
3626 if (dfd < 0)
3627 return 1;
3628 load_ddf_headers(dfd, super, NULL);
3629 load_ddf_global(dfd, super, NULL);
3630 close(dfd);
3631 /* Now we need the device-local bits */
3632 for (sd = sra->devs ; sd ; sd = sd->next) {
3633 int rv;
3634
3635 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3636 dfd = dev_open(nm, O_RDWR);
3637 if (dfd < 0)
3638 return 2;
3639 rv = load_ddf_headers(dfd, super, NULL);
3640 if (rv == 0)
3641 rv = load_ddf_local(dfd, super, NULL, 1);
3642 if (rv)
3643 return 1;
3644 }
3645
3646 *sbp = super;
3647 if (st->ss == NULL) {
3648 st->ss = &super_ddf;
3649 st->minor_version = 0;
3650 st->max_devs = 512;
3651 }
3652 strcpy(st->container_devnm, fd2devnm(fd));
3653 return 0;
3654 }
3655
3656 static int load_container_ddf(struct supertype *st, int fd,
3657 char *devname)
3658 {
3659 return load_super_ddf_all(st, fd, &st->sb, devname);
3660 }
3661
3662 #endif /* MDASSEMBLE */
3663
3664 static int check_secondary(const struct vcl *vc)
3665 {
3666 const struct vd_config *conf = &vc->conf;
3667 int i;
3668
3669 /* The only DDF secondary RAID level md can support is
3670 * RAID 10, if the stripe sizes and Basic volume sizes
3671 * are all equal.
3672 * Other configurations could in theory be supported by exposing
3673 * the BVDs to user space and using device mapper for the secondary
3674 * mapping. So far we don't support that.
3675 */
3676
3677 __u64 sec_elements[4] = {0, 0, 0, 0};
3678 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3679 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3680
3681 if (vc->other_bvds == NULL) {
3682 pr_err("No BVDs for secondary RAID found\n");
3683 return -1;
3684 }
3685 if (conf->prl != DDF_RAID1) {
3686 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3687 return -1;
3688 }
3689 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3690 pr_err("Secondary RAID level %d is unsupported\n",
3691 conf->srl);
3692 return -1;
3693 }
3694 __set_sec_seen(conf->sec_elmnt_seq);
3695 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3696 const struct vd_config *bvd = vc->other_bvds[i];
3697 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3698 continue;
3699 if (bvd->srl != conf->srl) {
3700 pr_err("Inconsistent secondary RAID level across BVDs\n");
3701 return -1;
3702 }
3703 if (bvd->prl != conf->prl) {
3704 pr_err("Different RAID levels for BVDs are unsupported\n");
3705 return -1;
3706 }
3707 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3708 pr_err("All BVDs must have the same number of primary elements\n");
3709 return -1;
3710 }
3711 if (bvd->chunk_shift != conf->chunk_shift) {
3712 pr_err("Different strip sizes for BVDs are unsupported\n");
3713 return -1;
3714 }
3715 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3716 pr_err("Different BVD sizes are unsupported\n");
3717 return -1;
3718 }
3719 __set_sec_seen(bvd->sec_elmnt_seq);
3720 }
3721 for (i = 0; i < conf->sec_elmnt_count; i++) {
3722 if (!__was_sec_seen(i)) {
3723 pr_err("BVD %d is missing\n", i);
3724 return -1;
3725 }
3726 }
3727 return 0;
3728 }
3729
3730 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3731 be32 refnum, unsigned int nmax,
3732 const struct vd_config **bvd,
3733 unsigned int *idx)
3734 {
3735 unsigned int i, j, n, sec, cnt;
3736
3737 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3738 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3739
3740 for (i = 0, j = 0 ; i < nmax ; i++) {
3741 /* j counts valid entries for this BVD */
3742 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3743 *bvd = &vc->conf;
3744 *idx = i;
3745 return sec * cnt + j;
3746 }
3747 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3748 j++;
3749 }
3750 if (vc->other_bvds == NULL)
3751 goto bad;
3752
3753 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3754 struct vd_config *vd = vc->other_bvds[n-1];
3755 sec = vd->sec_elmnt_seq;
3756 if (sec == DDF_UNUSED_BVD)
3757 continue;
3758 for (i = 0, j = 0 ; i < nmax ; i++) {
3759 if (be32_eq(vd->phys_refnum[i], refnum)) {
3760 *bvd = vd;
3761 *idx = i;
3762 return sec * cnt + j;
3763 }
3764 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3765 j++;
3766 }
3767 }
3768 bad:
3769 *bvd = NULL;
3770 return DDF_NOTFOUND;
3771 }
3772
3773 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3774 {
3775 /* Given a container loaded by load_super_ddf_all,
3776 * extract information about all the arrays into
3777 * an mdinfo tree.
3778 *
3779 * For each vcl in conflist: create an mdinfo, fill it in,
3780 * then look for matching devices (phys_refnum) in dlist
3781 * and create appropriate device mdinfo.
3782 */
3783 struct ddf_super *ddf = st->sb;
3784 struct mdinfo *rest = NULL;
3785 struct vcl *vc;
3786
3787 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3788 unsigned int i;
3789 struct mdinfo *this;
3790 char *ep;
3791 __u32 *cptr;
3792 unsigned int pd;
3793
3794 if (subarray &&
3795 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3796 *ep != '\0'))
3797 continue;
3798
3799 if (vc->conf.sec_elmnt_count > 1) {
3800 if (check_secondary(vc) != 0)
3801 continue;
3802 }
3803
3804 this = xcalloc(1, sizeof(*this));
3805 this->next = rest;
3806 rest = this;
3807
3808 if (layout_ddf2md(&vc->conf, &this->array))
3809 continue;
3810 this->array.md_minor = -1;
3811 this->array.major_version = -1;
3812 this->array.minor_version = -2;
3813 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3814 cptr = (__u32 *)(vc->conf.guid + 16);
3815 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3816 this->array.utime = DECADE +
3817 be32_to_cpu(vc->conf.timestamp);
3818 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3819
3820 i = vc->vcnum;
3821 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3822 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3823 DDF_init_full) {
3824 this->array.state = 0;
3825 this->resync_start = 0;
3826 } else {
3827 this->array.state = 1;
3828 this->resync_start = MaxSector;
3829 }
3830 _ddf_array_name(this->name, ddf, i);
3831 memset(this->uuid, 0, sizeof(this->uuid));
3832 this->component_size = be64_to_cpu(vc->conf.blocks);
3833 this->array.size = this->component_size / 2;
3834 this->container_member = i;
3835
3836 ddf->currentconf = vc;
3837 uuid_from_super_ddf(st, this->uuid);
3838 if (!subarray)
3839 ddf->currentconf = NULL;
3840
3841 sprintf(this->text_version, "/%s/%d",
3842 st->container_devnm, this->container_member);
3843
3844 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3845 struct mdinfo *dev;
3846 struct dl *d;
3847 const struct vd_config *bvd;
3848 unsigned int iphys;
3849 int stt;
3850
3851 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3852 == 0xFFFFFFFF)
3853 continue;
3854
3855 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3856 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3857 != DDF_Online)
3858 continue;
3859
3860 i = get_pd_index_from_refnum(
3861 vc, ddf->phys->entries[pd].refnum,
3862 ddf->mppe, &bvd, &iphys);
3863 if (i == DDF_NOTFOUND)
3864 continue;
3865
3866 this->array.working_disks++;
3867
3868 for (d = ddf->dlist; d ; d=d->next)
3869 if (be32_eq(d->disk.refnum,
3870 ddf->phys->entries[pd].refnum))
3871 break;
3872 if (d == NULL)
3873 /* Haven't found that one yet, maybe there are others */
3874 continue;
3875
3876 dev = xcalloc(1, sizeof(*dev));
3877 dev->next = this->devs;
3878 this->devs = dev;
3879
3880 dev->disk.number = be32_to_cpu(d->disk.refnum);
3881 dev->disk.major = d->major;
3882 dev->disk.minor = d->minor;
3883 dev->disk.raid_disk = i;
3884 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3885 dev->recovery_start = MaxSector;
3886
3887 dev->events = be32_to_cpu(ddf->active->seq);
3888 dev->data_offset =
3889 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3890 dev->component_size = be64_to_cpu(bvd->blocks);
3891 if (d->devname)
3892 strcpy(dev->name, d->devname);
3893 }
3894 }
3895 return rest;
3896 }
3897
3898 static int store_super_ddf(struct supertype *st, int fd)
3899 {
3900 struct ddf_super *ddf = st->sb;
3901 unsigned long long dsize;
3902 void *buf;
3903 int rc;
3904
3905 if (!ddf)
3906 return 1;
3907
3908 if (!get_dev_size(fd, NULL, &dsize))
3909 return 1;
3910
3911 if (ddf->dlist || ddf->conflist) {
3912 struct stat sta;
3913 struct dl *dl;
3914 int ofd, ret;
3915
3916 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3917 pr_err("%s: file descriptor for invalid device\n",
3918 __func__);
3919 return 1;
3920 }
3921 for (dl = ddf->dlist; dl; dl = dl->next)
3922 if (dl->major == (int)major(sta.st_rdev) &&
3923 dl->minor == (int)minor(sta.st_rdev))
3924 break;
3925 if (!dl) {
3926 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3927 (int)major(sta.st_rdev),
3928 (int)minor(sta.st_rdev));
3929 return 1;
3930 }
3931 ofd = dl->fd;
3932 dl->fd = fd;
3933 ret = (_write_super_to_disk(ddf, dl) != 1);
3934 dl->fd = ofd;
3935 return ret;
3936 }
3937
3938 if (posix_memalign(&buf, 512, 512) != 0)
3939 return 1;
3940 memset(buf, 0, 512);
3941
3942 lseek64(fd, dsize-512, 0);
3943 rc = write(fd, buf, 512);
3944 free(buf);
3945 if (rc < 0)
3946 return 1;
3947 return 0;
3948 }
3949
3950 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3951 {
3952 /*
3953 * return:
3954 * 0 same, or first was empty, and second was copied
3955 * 1 second had wrong magic number - but that isn't possible
3956 * 2 wrong uuid
3957 * 3 wrong other info
3958 */
3959 struct ddf_super *first = st->sb;
3960 struct ddf_super *second = tst->sb;
3961 struct dl *dl1, *dl2;
3962 struct vcl *vl1, *vl2;
3963 unsigned int max_vds, max_pds, pd, vd;
3964
3965 if (!first) {
3966 st->sb = tst->sb;
3967 tst->sb = NULL;
3968 return 0;
3969 }
3970
3971 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3972 return 2;
3973
3974 /* It is only OK to compare info in the anchor. Anything else
3975 * could be changing due to a reconfig so must be ignored.
3976 * guid really should be enough anyway.
3977 */
3978
3979 if (!be32_eq(first->active->seq, second->active->seq)) {
3980 dprintf("%s: sequence number mismatch %u<->%u\n", __func__,
3981 be32_to_cpu(first->active->seq),
3982 be32_to_cpu(second->active->seq));
3983 return 0;
3984 }
3985
3986 /*
3987 * At this point we are fairly sure that the meta data matches.
3988 * But the new disk may contain additional local data.
3989 * Add it to the super block.
3990 */
3991 max_vds = be16_to_cpu(first->active->max_vd_entries);
3992 max_pds = be16_to_cpu(first->phys->max_pdes);
3993 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
3994 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
3995 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
3996 DDF_GUID_LEN))
3997 break;
3998 if (vl1) {
3999 if (vl1->other_bvds != NULL &&
4000 vl1->conf.sec_elmnt_seq !=
4001 vl2->conf.sec_elmnt_seq) {
4002 dprintf("%s: adding BVD %u\n", __func__,
4003 vl2->conf.sec_elmnt_seq);
4004 add_other_bvd(vl1, &vl2->conf,
4005 first->conf_rec_len*512);
4006 }
4007 continue;
4008 }
4009
4010 if (posix_memalign((void **)&vl1, 512,
4011 (first->conf_rec_len*512 +
4012 offsetof(struct vcl, conf))) != 0) {
4013 pr_err("%s could not allocate vcl buf\n",
4014 __func__);
4015 return 3;
4016 }
4017
4018 vl1->next = first->conflist;
4019 vl1->block_sizes = NULL;
4020 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4021 if (alloc_other_bvds(first, vl1) != 0) {
4022 pr_err("%s could not allocate other bvds\n",
4023 __func__);
4024 free(vl1);
4025 return 3;
4026 }
4027 for (vd = 0; vd < max_vds; vd++)
4028 if (!memcmp(first->virt->entries[vd].guid,
4029 vl1->conf.guid, DDF_GUID_LEN))
4030 break;
4031 vl1->vcnum = vd;
4032 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4033 first->conflist = vl1;
4034 }
4035
4036 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4037 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4038 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4039 break;
4040 if (dl1)
4041 continue;
4042
4043 if (posix_memalign((void **)&dl1, 512,
4044 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4045 != 0) {
4046 pr_err("%s could not allocate disk info buffer\n",
4047 __func__);
4048 return 3;
4049 }
4050 memcpy(dl1, dl2, sizeof(*dl1));
4051 dl1->mdupdate = NULL;
4052 dl1->next = first->dlist;
4053 dl1->fd = -1;
4054 for (pd = 0; pd < max_pds; pd++)
4055 if (be32_eq(first->phys->entries[pd].refnum,
4056 dl1->disk.refnum))
4057 break;
4058 dl1->pdnum = pd < max_pds ? (int)pd : -1;
4059 if (dl2->spare) {
4060 if (posix_memalign((void **)&dl1->spare, 512,
4061 first->conf_rec_len*512) != 0) {
4062 pr_err("%s could not allocate spare info buf\n",
4063 __func__);
4064 return 3;
4065 }
4066 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4067 }
4068 for (vd = 0 ; vd < first->max_part ; vd++) {
4069 if (!dl2->vlist[vd]) {
4070 dl1->vlist[vd] = NULL;
4071 continue;
4072 }
4073 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4074 if (!memcmp(vl1->conf.guid,
4075 dl2->vlist[vd]->conf.guid,
4076 DDF_GUID_LEN))
4077 break;
4078 dl1->vlist[vd] = vl1;
4079 }
4080 }
4081 first->dlist = dl1;
4082 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4083 be32_to_cpu(dl1->disk.refnum));
4084 }
4085
4086 return 0;
4087 }
4088
4089 #ifndef MDASSEMBLE
4090 /*
4091 * A new array 'a' has been started which claims to be instance 'inst'
4092 * within container 'c'.
4093 * We need to confirm that the array matches the metadata in 'c' so
4094 * that we don't corrupt any metadata.
4095 */
4096 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4097 {
4098 struct ddf_super *ddf = c->sb;
4099 int n = atoi(inst);
4100 struct mdinfo *dev;
4101 struct dl *dl;
4102 static const char faulty[] = "faulty";
4103
4104 if (all_ff(ddf->virt->entries[n].guid)) {
4105 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4106 return -ENODEV;
4107 }
4108 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4109 guid_str(ddf->virt->entries[n].guid));
4110 for (dev = a->info.devs; dev; dev = dev->next) {
4111 for (dl = ddf->dlist; dl; dl = dl->next)
4112 if (dl->major == dev->disk.major &&
4113 dl->minor == dev->disk.minor)
4114 break;
4115 if (!dl || dl->pdnum < 0) {
4116 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4117 __func__, dev->disk.major, dev->disk.minor, n);
4118 return -1;
4119 }
4120 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4121 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4122 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4123 __func__, n, dl->major, dl->minor,
4124 be16_to_cpu(
4125 ddf->phys->entries[dl->pdnum].state));
4126 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4127 sizeof(faulty) - 1)
4128 pr_err("Write to state_fd failed\n");
4129 dev->curr_state = DS_FAULTY;
4130 }
4131 }
4132 a->info.container_member = n;
4133 return 0;
4134 }
4135
4136 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4137 {
4138 /* This member array is being activated. If any devices
4139 * are missing they must now be marked as failed.
4140 */
4141 struct vd_config *vc;
4142 unsigned int n_bvd;
4143 struct vcl *vcl;
4144 struct dl *dl;
4145 int pd;
4146 int n;
4147 int state;
4148
4149 for (n = 0; ; n++) {
4150 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4151 if (!vc)
4152 break;
4153 for (dl = ddf->dlist; dl; dl = dl->next)
4154 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4155 break;
4156 if (dl)
4157 /* Found this disk, so not missing */
4158 continue;
4159
4160 /* Mark the device as failed/missing. */
4161 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4162 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4163 cpu_to_be16(DDF_Online))) {
4164 be16_clear(ddf->phys->entries[pd].state,
4165 cpu_to_be16(DDF_Online));
4166 be16_set(ddf->phys->entries[pd].state,
4167 cpu_to_be16(DDF_Failed|DDF_Missing));
4168 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4169 ddf_set_updates_pending(ddf);
4170 }
4171
4172 /* Mark the array as Degraded */
4173 state = get_svd_state(ddf, vcl);
4174 if (ddf->virt->entries[inst].state !=
4175 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4176 | state)) {
4177 ddf->virt->entries[inst].state =
4178 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4179 | state;
4180 a->check_degraded = 1;
4181 ddf_set_updates_pending(ddf);
4182 }
4183 }
4184 }
4185
4186 /*
4187 * The array 'a' is to be marked clean in the metadata.
4188 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4189 * clean up to the point (in sectors). If that cannot be recorded in the
4190 * metadata, then leave it as dirty.
4191 *
4192 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4193 * !global! virtual_disk.virtual_entry structure.
4194 */
4195 static int ddf_set_array_state(struct active_array *a, int consistent)
4196 {
4197 struct ddf_super *ddf = a->container->sb;
4198 int inst = a->info.container_member;
4199 int old = ddf->virt->entries[inst].state;
4200 if (consistent == 2) {
4201 handle_missing(ddf, a, inst);
4202 /* Should check if a recovery should be started FIXME */
4203 consistent = 1;
4204 if (!is_resync_complete(&a->info))
4205 consistent = 0;
4206 }
4207 if (consistent)
4208 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4209 else
4210 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4211 if (old != ddf->virt->entries[inst].state)
4212 ddf_set_updates_pending(ddf);
4213
4214 old = ddf->virt->entries[inst].init_state;
4215 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4216 if (is_resync_complete(&a->info))
4217 ddf->virt->entries[inst].init_state |= DDF_init_full;
4218 else if (a->info.resync_start == 0)
4219 ddf->virt->entries[inst].init_state |= DDF_init_not;
4220 else
4221 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4222 if (old != ddf->virt->entries[inst].init_state)
4223 ddf_set_updates_pending(ddf);
4224
4225 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4226 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4227 consistent?"clean":"dirty",
4228 a->info.resync_start);
4229 return consistent;
4230 }
4231
4232 static int get_bvd_state(const struct ddf_super *ddf,
4233 const struct vd_config *vc)
4234 {
4235 unsigned int i, n_bvd, working = 0;
4236 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4237 int pd, st, state;
4238 for (i = 0; i < n_prim; i++) {
4239 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4240 continue;
4241 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4242 if (pd < 0)
4243 continue;
4244 st = be16_to_cpu(ddf->phys->entries[pd].state);
4245 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4246 == DDF_Online)
4247 working++;
4248 }
4249
4250 state = DDF_state_degraded;
4251 if (working == n_prim)
4252 state = DDF_state_optimal;
4253 else
4254 switch (vc->prl) {
4255 case DDF_RAID0:
4256 case DDF_CONCAT:
4257 case DDF_JBOD:
4258 state = DDF_state_failed;
4259 break;
4260 case DDF_RAID1:
4261 if (working == 0)
4262 state = DDF_state_failed;
4263 else if (working >= 2)
4264 state = DDF_state_part_optimal;
4265 break;
4266 case DDF_RAID4:
4267 case DDF_RAID5:
4268 if (working < n_prim - 1)
4269 state = DDF_state_failed;
4270 break;
4271 case DDF_RAID6:
4272 if (working < n_prim - 2)
4273 state = DDF_state_failed;
4274 else if (working == n_prim - 1)
4275 state = DDF_state_part_optimal;
4276 break;
4277 }
4278 return state;
4279 }
4280
4281 static int secondary_state(int state, int other, int seclevel)
4282 {
4283 if (state == DDF_state_optimal && other == DDF_state_optimal)
4284 return DDF_state_optimal;
4285 if (seclevel == DDF_2MIRRORED) {
4286 if (state == DDF_state_optimal || other == DDF_state_optimal)
4287 return DDF_state_part_optimal;
4288 if (state == DDF_state_failed && other == DDF_state_failed)
4289 return DDF_state_failed;
4290 return DDF_state_degraded;
4291 } else {
4292 if (state == DDF_state_failed || other == DDF_state_failed)
4293 return DDF_state_failed;
4294 if (state == DDF_state_degraded || other == DDF_state_degraded)
4295 return DDF_state_degraded;
4296 return DDF_state_part_optimal;
4297 }
4298 }
4299
4300 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4301 {
4302 int state = get_bvd_state(ddf, &vcl->conf);
4303 unsigned int i;
4304 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4305 state = secondary_state(
4306 state,
4307 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4308 vcl->conf.srl);
4309 }
4310 return state;
4311 }
4312
4313 /*
4314 * The state of each disk is stored in the global phys_disk structure
4315 * in phys_disk.entries[n].state.
4316 * This makes various combinations awkward.
4317 * - When a device fails in any array, it must be failed in all arrays
4318 * that include a part of this device.
4319 * - When a component is rebuilding, we cannot include it officially in the
4320 * array unless this is the only array that uses the device.
4321 *
4322 * So: when transitioning:
4323 * Online -> failed, just set failed flag. monitor will propagate
4324 * spare -> online, the device might need to be added to the array.
4325 * spare -> failed, just set failed. Don't worry if in array or not.
4326 */
4327 static void ddf_set_disk(struct active_array *a, int n, int state)
4328 {
4329 struct ddf_super *ddf = a->container->sb;
4330 unsigned int inst = a->info.container_member, n_bvd;
4331 struct vcl *vcl;
4332 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4333 &n_bvd, &vcl);
4334 int pd;
4335 struct mdinfo *mdi;
4336 struct dl *dl;
4337
4338 dprintf("%s: %d to %x\n", __func__, n, state);
4339 if (vc == NULL) {
4340 dprintf("ddf: cannot find instance %d!!\n", inst);
4341 return;
4342 }
4343 /* Find the matching slot in 'info'. */
4344 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4345 if (mdi->disk.raid_disk == n)
4346 break;
4347 if (!mdi) {
4348 pr_err("%s: cannot find raid disk %d\n",
4349 __func__, n);
4350 return;
4351 }
4352
4353 /* and find the 'dl' entry corresponding to that. */
4354 for (dl = ddf->dlist; dl; dl = dl->next)
4355 if (mdi->state_fd >= 0 &&
4356 mdi->disk.major == dl->major &&
4357 mdi->disk.minor == dl->minor)
4358 break;
4359 if (!dl) {
4360 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4361 __func__, n,
4362 mdi->disk.major, mdi->disk.minor);
4363 return;
4364 }
4365
4366 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4367 if (pd < 0 || pd != dl->pdnum) {
4368 /* disk doesn't currently exist or has changed.
4369 * If it is now in_sync, insert it. */
4370 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4371 __func__, dl->pdnum, dl->major, dl->minor,
4372 be32_to_cpu(dl->disk.refnum));
4373 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4374 __func__, inst, n_bvd,
4375 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4376 if ((state & DS_INSYNC) && ! (state & DS_FAULTY)) {
4377 pd = dl->pdnum; /* FIXME: is this really correct ? */
4378 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4379 LBA_OFFSET(ddf, vc)[n_bvd] =
4380 cpu_to_be64(mdi->data_offset);
4381 be16_clear(ddf->phys->entries[pd].type,
4382 cpu_to_be16(DDF_Global_Spare));
4383 be16_set(ddf->phys->entries[pd].type,
4384 cpu_to_be16(DDF_Active_in_VD));
4385 ddf_set_updates_pending(ddf);
4386 }
4387 } else {
4388 be16 old = ddf->phys->entries[pd].state;
4389 if (state & DS_FAULTY)
4390 be16_set(ddf->phys->entries[pd].state,
4391 cpu_to_be16(DDF_Failed));
4392 if (state & DS_INSYNC) {
4393 be16_set(ddf->phys->entries[pd].state,
4394 cpu_to_be16(DDF_Online));
4395 be16_clear(ddf->phys->entries[pd].state,
4396 cpu_to_be16(DDF_Rebuilding));
4397 }
4398 if (!be16_eq(old, ddf->phys->entries[pd].state))
4399 ddf_set_updates_pending(ddf);
4400 }
4401
4402 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4403 be32_to_cpu(dl->disk.refnum), state,
4404 be16_to_cpu(ddf->phys->entries[pd].state));
4405
4406 /* Now we need to check the state of the array and update
4407 * virtual_disk.entries[n].state.
4408 * It needs to be one of "optimal", "degraded", "failed".
4409 * I don't understand 'deleted' or 'missing'.
4410 */
4411 state = get_svd_state(ddf, vcl);
4412
4413 if (ddf->virt->entries[inst].state !=
4414 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4415 | state)) {
4416 ddf->virt->entries[inst].state =
4417 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4418 | state;
4419 ddf_set_updates_pending(ddf);
4420 }
4421
4422 }
4423
4424 static void ddf_sync_metadata(struct supertype *st)
4425 {
4426 /*
4427 * Write all data to all devices.
4428 * Later, we might be able to track whether only local changes
4429 * have been made, or whether any global data has been changed,
4430 * but ddf is sufficiently weird that it probably always
4431 * changes global data ....
4432 */
4433 struct ddf_super *ddf = st->sb;
4434 if (!ddf->updates_pending)
4435 return;
4436 ddf->updates_pending = 0;
4437 __write_init_super_ddf(st);
4438 dprintf("ddf: sync_metadata\n");
4439 }
4440
4441 static int del_from_conflist(struct vcl **list, const char *guid)
4442 {
4443 struct vcl **p;
4444 int found = 0;
4445 for (p = list; p && *p; p = &((*p)->next))
4446 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4447 found = 1;
4448 *p = (*p)->next;
4449 }
4450 return found;
4451 }
4452
4453 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4454 {
4455 struct dl *dl;
4456 unsigned int vdnum, i;
4457 vdnum = find_vde_by_guid(ddf, guid);
4458 if (vdnum == DDF_NOTFOUND) {
4459 pr_err("%s: could not find VD %s\n", __func__,
4460 guid_str(guid));
4461 return -1;
4462 }
4463 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4464 pr_err("%s: could not find conf %s\n", __func__,
4465 guid_str(guid));
4466 return -1;
4467 }
4468 for (dl = ddf->dlist; dl; dl = dl->next)
4469 for (i = 0; i < ddf->max_part; i++)
4470 if (dl->vlist[i] != NULL &&
4471 !memcmp(dl->vlist[i]->conf.guid, guid,
4472 DDF_GUID_LEN))
4473 dl->vlist[i] = NULL;
4474 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4475 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4476 return 0;
4477 }
4478
4479 static int kill_subarray_ddf(struct supertype *st)
4480 {
4481 struct ddf_super *ddf = st->sb;
4482 /*
4483 * currentconf is set in container_content_ddf,
4484 * called with subarray arg
4485 */
4486 struct vcl *victim = ddf->currentconf;
4487 struct vd_config *conf;
4488 unsigned int vdnum;
4489
4490 ddf->currentconf = NULL;
4491 if (!victim) {
4492 pr_err("%s: nothing to kill\n", __func__);
4493 return -1;
4494 }
4495 conf = &victim->conf;
4496 vdnum = find_vde_by_guid(ddf, conf->guid);
4497 if (vdnum == DDF_NOTFOUND) {
4498 pr_err("%s: could not find VD %s\n", __func__,
4499 guid_str(conf->guid));
4500 return -1;
4501 }
4502 if (st->update_tail) {
4503 struct virtual_disk *vd;
4504 int len = sizeof(struct virtual_disk)
4505 + sizeof(struct virtual_entry);
4506 vd = xmalloc(len);
4507 if (vd == NULL) {
4508 pr_err("%s: failed to allocate %d bytes\n", __func__,
4509 len);
4510 return -1;
4511 }
4512 memset(vd, 0 , len);
4513 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4514 vd->populated_vdes = cpu_to_be16(0);
4515 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4516 /* we use DDF_state_deleted as marker */
4517 vd->entries[0].state = DDF_state_deleted;
4518 append_metadata_update(st, vd, len);
4519 } else {
4520 _kill_subarray_ddf(ddf, conf->guid);
4521 ddf_set_updates_pending(ddf);
4522 ddf_sync_metadata(st);
4523 }
4524 return 0;
4525 }
4526
4527 static void copy_matching_bvd(struct ddf_super *ddf,
4528 struct vd_config *conf,
4529 const struct metadata_update *update)
4530 {
4531 unsigned int mppe =
4532 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4533 unsigned int len = ddf->conf_rec_len * 512;
4534 char *p;
4535 struct vd_config *vc;
4536 for (p = update->buf; p < update->buf + update->len; p += len) {
4537 vc = (struct vd_config *) p;
4538 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4539 memcpy(conf->phys_refnum, vc->phys_refnum,
4540 mppe * (sizeof(__u32) + sizeof(__u64)));
4541 return;
4542 }
4543 }
4544 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4545 conf->sec_elmnt_seq, guid_str(conf->guid));
4546 }
4547
4548 static void ddf_process_update(struct supertype *st,
4549 struct metadata_update *update)
4550 {
4551 /* Apply this update to the metadata.
4552 * The first 4 bytes are a DDF_*_MAGIC which guides
4553 * our actions.
4554 * Possible update are:
4555 * DDF_PHYS_RECORDS_MAGIC
4556 * Add a new physical device or remove an old one.
4557 * Changes to this record only happen implicitly.
4558 * used_pdes is the device number.
4559 * DDF_VIRT_RECORDS_MAGIC
4560 * Add a new VD. Possibly also change the 'access' bits.
4561 * populated_vdes is the entry number.
4562 * DDF_VD_CONF_MAGIC
4563 * New or updated VD. the VIRT_RECORD must already
4564 * exist. For an update, phys_refnum and lba_offset
4565 * (at least) are updated, and the VD_CONF must
4566 * be written to precisely those devices listed with
4567 * a phys_refnum.
4568 * DDF_SPARE_ASSIGN_MAGIC
4569 * replacement Spare Assignment Record... but for which device?
4570 *
4571 * So, e.g.:
4572 * - to create a new array, we send a VIRT_RECORD and
4573 * a VD_CONF. Then assemble and start the array.
4574 * - to activate a spare we send a VD_CONF to add the phys_refnum
4575 * and offset. This will also mark the spare as active with
4576 * a spare-assignment record.
4577 */
4578 struct ddf_super *ddf = st->sb;
4579 be32 *magic = (be32 *)update->buf;
4580 struct phys_disk *pd;
4581 struct virtual_disk *vd;
4582 struct vd_config *vc;
4583 struct vcl *vcl;
4584 struct dl *dl;
4585 unsigned int ent;
4586 unsigned int pdnum, pd2, len;
4587
4588 dprintf("Process update %x\n", be32_to_cpu(*magic));
4589
4590 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4591 if (update->len != (sizeof(struct phys_disk) +
4592 sizeof(struct phys_disk_entry)))
4593 return;
4594 pd = (struct phys_disk*)update->buf;
4595
4596 ent = be16_to_cpu(pd->used_pdes);
4597 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4598 return;
4599 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4600 struct dl **dlp;
4601 /* removing this disk. */
4602 be16_set(ddf->phys->entries[ent].state,
4603 cpu_to_be16(DDF_Missing));
4604 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4605 struct dl *dl = *dlp;
4606 if (dl->pdnum == (signed)ent) {
4607 close(dl->fd);
4608 dl->fd = -1;
4609 /* FIXME this doesn't free
4610 * dl->devname */
4611 update->space = dl;
4612 *dlp = dl->next;
4613 break;
4614 }
4615 }
4616 ddf_set_updates_pending(ddf);
4617 return;
4618 }
4619 if (!all_ff(ddf->phys->entries[ent].guid))
4620 return;
4621 ddf->phys->entries[ent] = pd->entries[0];
4622 ddf->phys->used_pdes = cpu_to_be16
4623 (1 + be16_to_cpu(ddf->phys->used_pdes));
4624 ddf_set_updates_pending(ddf);
4625 if (ddf->add_list) {
4626 struct active_array *a;
4627 struct dl *al = ddf->add_list;
4628 ddf->add_list = al->next;
4629
4630 al->next = ddf->dlist;
4631 ddf->dlist = al;
4632
4633 /* As a device has been added, we should check
4634 * for any degraded devices that might make
4635 * use of this spare */
4636 for (a = st->arrays ; a; a=a->next)
4637 a->check_degraded = 1;
4638 }
4639 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4640 if (update->len != (sizeof(struct virtual_disk) +
4641 sizeof(struct virtual_entry)))
4642 return;
4643 vd = (struct virtual_disk*)update->buf;
4644
4645 if (vd->entries[0].state == DDF_state_deleted) {
4646 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4647 return;
4648 } else {
4649 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4650 if (ent != DDF_NOTFOUND) {
4651 dprintf("%s: VD %s exists already in slot %d\n",
4652 __func__, guid_str(vd->entries[0].guid),
4653 ent);
4654 return;
4655 }
4656 ent = find_unused_vde(ddf);
4657 if (ent == DDF_NOTFOUND)
4658 return;
4659 ddf->virt->entries[ent] = vd->entries[0];
4660 ddf->virt->populated_vdes =
4661 cpu_to_be16(
4662 1 + be16_to_cpu(
4663 ddf->virt->populated_vdes));
4664 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4665 __func__, guid_str(vd->entries[0].guid), ent,
4666 ddf->virt->entries[ent].state,
4667 ddf->virt->entries[ent].init_state);
4668 }
4669 ddf_set_updates_pending(ddf);
4670 }
4671
4672 else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4673 vc = (struct vd_config*)update->buf;
4674 len = ddf->conf_rec_len * 512;
4675 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4676 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4677 __func__, guid_str(vc->guid), update->len,
4678 vc->sec_elmnt_count);
4679 return;
4680 }
4681 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4682 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4683 break;
4684 dprintf("%s: conf update for %s (%s)\n", __func__,
4685 guid_str(vc->guid), (vcl ? "old" : "new"));
4686 if (vcl) {
4687 /* An update, just copy the phys_refnum and lba_offset
4688 * fields
4689 */
4690 unsigned int i;
4691 unsigned int k;
4692 copy_matching_bvd(ddf, &vcl->conf, update);
4693 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4694 dprintf("BVD %u has %08x at %llu\n", 0,
4695 be32_to_cpu(vcl->conf.phys_refnum[k]),
4696 be64_to_cpu(LBA_OFFSET(ddf,
4697 &vcl->conf)[k]));
4698 for (i = 1; i < vc->sec_elmnt_count; i++) {
4699 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4700 update);
4701 for (k = 0; k < be16_to_cpu(
4702 vc->prim_elmnt_count); k++)
4703 dprintf("BVD %u has %08x at %llu\n", i,
4704 be32_to_cpu
4705 (vcl->other_bvds[i-1]->
4706 phys_refnum[k]),
4707 be64_to_cpu
4708 (LBA_OFFSET
4709 (ddf,
4710 vcl->other_bvds[i-1])[k]));
4711 }
4712 } else {
4713 /* A new VD_CONF */
4714 unsigned int i;
4715 if (!update->space)
4716 return;
4717 vcl = update->space;
4718 update->space = NULL;
4719 vcl->next = ddf->conflist;
4720 memcpy(&vcl->conf, vc, len);
4721 ent = find_vde_by_guid(ddf, vc->guid);
4722 if (ent == DDF_NOTFOUND)
4723 return;
4724 vcl->vcnum = ent;
4725 ddf->conflist = vcl;
4726 for (i = 1; i < vc->sec_elmnt_count; i++)
4727 memcpy(vcl->other_bvds[i-1],
4728 update->buf + len * i, len);
4729 }
4730 /* Set DDF_Transition on all Failed devices - to help
4731 * us detect those that are no longer in use
4732 */
4733 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4734 pdnum++)
4735 if (be16_and(ddf->phys->entries[pdnum].state,
4736 cpu_to_be16(DDF_Failed)))
4737 be16_set(ddf->phys->entries[pdnum].state,
4738 cpu_to_be16(DDF_Transition));
4739 /* Now make sure vlist is correct for each dl. */
4740 for (dl = ddf->dlist; dl; dl = dl->next) {
4741 unsigned int vn = 0;
4742 int in_degraded = 0;
4743
4744 if (dl->pdnum < 0)
4745 continue;
4746 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4747 unsigned int dn, ibvd;
4748 const struct vd_config *conf;
4749 int vstate;
4750 dn = get_pd_index_from_refnum(vcl,
4751 dl->disk.refnum,
4752 ddf->mppe,
4753 &conf, &ibvd);
4754 if (dn == DDF_NOTFOUND)
4755 continue;
4756 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4757 dl->pdnum,
4758 be32_to_cpu(dl->disk.refnum),
4759 guid_str(conf->guid),
4760 conf->sec_elmnt_seq, vn);
4761 /* Clear the Transition flag */
4762 if (be16_and
4763 (ddf->phys->entries[dl->pdnum].state,
4764 cpu_to_be16(DDF_Failed)))
4765 be16_clear(ddf->phys
4766 ->entries[dl->pdnum].state,
4767 cpu_to_be16(DDF_Transition));
4768 dl->vlist[vn++] = vcl;
4769 vstate = ddf->virt->entries[vcl->vcnum].state
4770 & DDF_state_mask;
4771 if (vstate == DDF_state_degraded ||
4772 vstate == DDF_state_part_optimal)
4773 in_degraded = 1;
4774 }
4775 while (vn < ddf->max_part)
4776 dl->vlist[vn++] = NULL;
4777 if (dl->vlist[0]) {
4778 be16_clear(ddf->phys->entries[dl->pdnum].type,
4779 cpu_to_be16(DDF_Global_Spare));
4780 if (!be16_and(ddf->phys
4781 ->entries[dl->pdnum].type,
4782 cpu_to_be16(DDF_Active_in_VD))) {
4783 be16_set(ddf->phys
4784 ->entries[dl->pdnum].type,
4785 cpu_to_be16(DDF_Active_in_VD));
4786 if (in_degraded)
4787 be16_set(ddf->phys
4788 ->entries[dl->pdnum]
4789 .state,
4790 cpu_to_be16
4791 (DDF_Rebuilding));
4792 }
4793 }
4794 if (dl->spare) {
4795 be16_clear(ddf->phys->entries[dl->pdnum].type,
4796 cpu_to_be16(DDF_Global_Spare));
4797 be16_set(ddf->phys->entries[dl->pdnum].type,
4798 cpu_to_be16(DDF_Spare));
4799 }
4800 if (!dl->vlist[0] && !dl->spare) {
4801 be16_set(ddf->phys->entries[dl->pdnum].type,
4802 cpu_to_be16(DDF_Global_Spare));
4803 be16_clear(ddf->phys->entries[dl->pdnum].type,
4804 cpu_to_be16(DDF_Spare));
4805 be16_clear(ddf->phys->entries[dl->pdnum].type,
4806 cpu_to_be16(DDF_Active_in_VD));
4807 }
4808 }
4809
4810 /* Now remove any 'Failed' devices that are not part
4811 * of any VD. They will have the Transition flag set.
4812 * Once done, we need to update all dl->pdnum numbers.
4813 */
4814 pd2 = 0;
4815 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4816 pdnum++) {
4817 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4818 0xFFFFFFFF)
4819 continue;
4820 if (be16_and(ddf->phys->entries[pdnum].state,
4821 cpu_to_be16(DDF_Failed))
4822 && be16_and(ddf->phys->entries[pdnum].state,
4823 cpu_to_be16(DDF_Transition))) {
4824 /* skip this one unless in dlist*/
4825 for (dl = ddf->dlist; dl; dl = dl->next)
4826 if (dl->pdnum == (int)pdnum)
4827 break;
4828 if (!dl)
4829 continue;
4830 }
4831 if (pdnum == pd2)
4832 pd2++;
4833 else {
4834 ddf->phys->entries[pd2] =
4835 ddf->phys->entries[pdnum];
4836 for (dl = ddf->dlist; dl; dl = dl->next)
4837 if (dl->pdnum == (int)pdnum)
4838 dl->pdnum = pd2;
4839 pd2++;
4840 }
4841 }
4842 ddf->phys->used_pdes = cpu_to_be16(pd2);
4843 while (pd2 < pdnum) {
4844 memset(ddf->phys->entries[pd2].guid, 0xff,
4845 DDF_GUID_LEN);
4846 pd2++;
4847 }
4848
4849 ddf_set_updates_pending(ddf);
4850 }
4851 /* case DDF_SPARE_ASSIGN_MAGIC */
4852 }
4853
4854 static void ddf_prepare_update(struct supertype *st,
4855 struct metadata_update *update)
4856 {
4857 /* This update arrived at managemon.
4858 * We are about to pass it to monitor.
4859 * If a malloc is needed, do it here.
4860 */
4861 struct ddf_super *ddf = st->sb;
4862 be32 *magic = (be32 *)update->buf;
4863 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4864 struct vcl *vcl;
4865 struct vd_config *conf = (struct vd_config *) update->buf;
4866 if (posix_memalign(&update->space, 512,
4867 offsetof(struct vcl, conf)
4868 + ddf->conf_rec_len * 512) != 0) {
4869 update->space = NULL;
4870 return;
4871 }
4872 vcl = update->space;
4873 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4874 if (alloc_other_bvds(ddf, vcl) != 0) {
4875 free(update->space);
4876 update->space = NULL;
4877 }
4878 }
4879 }
4880
4881 /*
4882 * Check degraded state of a RAID10.
4883 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4884 */
4885 static int raid10_degraded(struct mdinfo *info)
4886 {
4887 int n_prim, n_bvds;
4888 int i;
4889 struct mdinfo *d;
4890 char *found;
4891 int ret = -1;
4892
4893 n_prim = info->array.layout & ~0x100;
4894 n_bvds = info->array.raid_disks / n_prim;
4895 found = xmalloc(n_bvds);
4896 if (found == NULL)
4897 return ret;
4898 memset(found, 0, n_bvds);
4899 for (d = info->devs; d; d = d->next) {
4900 i = d->disk.raid_disk / n_prim;
4901 if (i >= n_bvds) {
4902 pr_err("%s: BUG: invalid raid disk\n", __func__);
4903 goto out;
4904 }
4905 if (d->state_fd > 0)
4906 found[i]++;
4907 }
4908 ret = 2;
4909 for (i = 0; i < n_bvds; i++)
4910 if (!found[i]) {
4911 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4912 ret = 0;
4913 goto out;
4914 } else if (found[i] < n_prim) {
4915 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4916 n_bvds);
4917 ret = 1;
4918 }
4919 out:
4920 free(found);
4921 return ret;
4922 }
4923
4924 /*
4925 * Check if the array 'a' is degraded but not failed.
4926 * If it is, find as many spares as are available and needed and
4927 * arrange for their inclusion.
4928 * We only choose devices which are not already in the array,
4929 * and prefer those with a spare-assignment to this array.
4930 * Otherwise we choose global spares - assuming always that
4931 * there is enough room.
4932 * For each spare that we assign, we return an 'mdinfo' which
4933 * describes the position for the device in the array.
4934 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4935 * the new phys_refnum and lba_offset values.
4936 *
4937 * Only worry about BVDs at the moment.
4938 */
4939 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4940 struct metadata_update **updates)
4941 {
4942 int working = 0;
4943 struct mdinfo *d;
4944 struct ddf_super *ddf = a->container->sb;
4945 int global_ok = 0;
4946 struct mdinfo *rv = NULL;
4947 struct mdinfo *di;
4948 struct metadata_update *mu;
4949 struct dl *dl;
4950 int i;
4951 unsigned int j;
4952 struct vcl *vcl;
4953 struct vd_config *vc;
4954 unsigned int n_bvd;
4955
4956 for (d = a->info.devs ; d ; d = d->next) {
4957 if ((d->curr_state & DS_FAULTY) &&
4958 d->state_fd >= 0)
4959 /* wait for Removal to happen */
4960 return NULL;
4961 if (d->state_fd >= 0)
4962 working ++;
4963 }
4964
4965 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
4966 a->info.array.raid_disks,
4967 a->info.array.level);
4968 if (working == a->info.array.raid_disks)
4969 return NULL; /* array not degraded */
4970 switch (a->info.array.level) {
4971 case 1:
4972 if (working == 0)
4973 return NULL; /* failed */
4974 break;
4975 case 4:
4976 case 5:
4977 if (working < a->info.array.raid_disks - 1)
4978 return NULL; /* failed */
4979 break;
4980 case 6:
4981 if (working < a->info.array.raid_disks - 2)
4982 return NULL; /* failed */
4983 break;
4984 case 10:
4985 if (raid10_degraded(&a->info) < 1)
4986 return NULL;
4987 break;
4988 default: /* concat or stripe */
4989 return NULL; /* failed */
4990 }
4991
4992 /* For each slot, if it is not working, find a spare */
4993 dl = ddf->dlist;
4994 for (i = 0; i < a->info.array.raid_disks; i++) {
4995 for (d = a->info.devs ; d ; d = d->next)
4996 if (d->disk.raid_disk == i)
4997 break;
4998 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4999 if (d && (d->state_fd >= 0))
5000 continue;
5001
5002 /* OK, this device needs recovery. Find a spare */
5003 again:
5004 for ( ; dl ; dl = dl->next) {
5005 unsigned long long esize;
5006 unsigned long long pos;
5007 struct mdinfo *d2;
5008 int is_global = 0;
5009 int is_dedicated = 0;
5010 struct extent *ex;
5011 unsigned int j;
5012 be16 state;
5013
5014 if (dl->pdnum < 0)
5015 continue;
5016 state = ddf->phys->entries[dl->pdnum].state;
5017 if (be16_and(state,
5018 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
5019 !be16_and(state,
5020 cpu_to_be16(DDF_Online)))
5021 continue;
5022
5023 /* If in this array, skip */
5024 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
5025 if (d2->state_fd >= 0 &&
5026 d2->disk.major == dl->major &&
5027 d2->disk.minor == dl->minor) {
5028 dprintf("%x:%x (%08x) already in array\n",
5029 dl->major, dl->minor,
5030 be32_to_cpu(dl->disk.refnum));
5031 break;
5032 }
5033 if (d2)
5034 continue;
5035 if (be16_and(ddf->phys->entries[dl->pdnum].type,
5036 cpu_to_be16(DDF_Spare))) {
5037 /* Check spare assign record */
5038 if (dl->spare) {
5039 if (dl->spare->type & DDF_spare_dedicated) {
5040 /* check spare_ents for guid */
5041 for (j = 0 ;
5042 j < be16_to_cpu
5043 (dl->spare
5044 ->populated);
5045 j++) {
5046 if (memcmp(dl->spare->spare_ents[j].guid,
5047 ddf->virt->entries[a->info.container_member].guid,
5048 DDF_GUID_LEN) == 0)
5049 is_dedicated = 1;
5050 }
5051 } else
5052 is_global = 1;
5053 }
5054 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5055 cpu_to_be16(DDF_Global_Spare))) {
5056 is_global = 1;
5057 } else if (!be16_and(ddf->phys
5058 ->entries[dl->pdnum].state,
5059 cpu_to_be16(DDF_Failed))) {
5060 /* we can possibly use some of this */
5061 is_global = 1;
5062 }
5063 if ( ! (is_dedicated ||
5064 (is_global && global_ok))) {
5065 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5066 is_dedicated, is_global);
5067 continue;
5068 }
5069
5070 /* We are allowed to use this device - is there space?
5071 * We need a->info.component_size sectors */
5072 ex = get_extents(ddf, dl);
5073 if (!ex) {
5074 dprintf("cannot get extents\n");
5075 continue;
5076 }
5077 j = 0; pos = 0;
5078 esize = 0;
5079
5080 do {
5081 esize = ex[j].start - pos;
5082 if (esize >= a->info.component_size)
5083 break;
5084 pos = ex[j].start + ex[j].size;
5085 j++;
5086 } while (ex[j-1].size);
5087
5088 free(ex);
5089 if (esize < a->info.component_size) {
5090 dprintf("%x:%x has no room: %llu %llu\n",
5091 dl->major, dl->minor,
5092 esize, a->info.component_size);
5093 /* No room */
5094 continue;
5095 }
5096
5097 /* Cool, we have a device with some space at pos */
5098 di = xcalloc(1, sizeof(*di));
5099 di->disk.number = i;
5100 di->disk.raid_disk = i;
5101 di->disk.major = dl->major;
5102 di->disk.minor = dl->minor;
5103 di->disk.state = 0;
5104 di->recovery_start = 0;
5105 di->data_offset = pos;
5106 di->component_size = a->info.component_size;
5107 di->next = rv;
5108 rv = di;
5109 dprintf("%x:%x (%08x) to be %d at %llu\n",
5110 dl->major, dl->minor,
5111 be32_to_cpu(dl->disk.refnum), i, pos);
5112
5113 break;
5114 }
5115 if (!dl && ! global_ok) {
5116 /* not enough dedicated spares, try global */
5117 global_ok = 1;
5118 dl = ddf->dlist;
5119 goto again;
5120 }
5121 }
5122
5123 if (!rv)
5124 /* No spares found */
5125 return rv;
5126 /* Now 'rv' has a list of devices to return.
5127 * Create a metadata_update record to update the
5128 * phys_refnum and lba_offset values
5129 */
5130 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5131 &n_bvd, &vcl);
5132 if (vc == NULL)
5133 return NULL;
5134
5135 mu = xmalloc(sizeof(*mu));
5136 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5137 free(mu);
5138 mu = NULL;
5139 }
5140
5141 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5142 mu->buf = xmalloc(mu->len);
5143 mu->space = NULL;
5144 mu->space_list = NULL;
5145 mu->next = *updates;
5146 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5147 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5148 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5149 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5150
5151 vc = (struct vd_config*)mu->buf;
5152 for (di = rv ; di ; di = di->next) {
5153 unsigned int i_sec, i_prim;
5154 i_sec = di->disk.raid_disk
5155 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5156 i_prim = di->disk.raid_disk
5157 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5158 vc = (struct vd_config *)(mu->buf
5159 + i_sec * ddf->conf_rec_len * 512);
5160 for (dl = ddf->dlist; dl; dl = dl->next)
5161 if (dl->major == di->disk.major
5162 && dl->minor == di->disk.minor)
5163 break;
5164 if (!dl || dl->pdnum < 0) {
5165 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5166 __func__, di->disk.raid_disk,
5167 di->disk.major, di->disk.minor);
5168 return NULL;
5169 }
5170 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5171 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5172 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5173 be32_to_cpu(vc->phys_refnum[i_prim]),
5174 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5175 }
5176 *updates = mu;
5177 return rv;
5178 }
5179 #endif /* MDASSEMBLE */
5180
5181 static int ddf_level_to_layout(int level)
5182 {
5183 switch(level) {
5184 case 0:
5185 case 1:
5186 return 0;
5187 case 5:
5188 return ALGORITHM_LEFT_SYMMETRIC;
5189 case 6:
5190 return ALGORITHM_ROTATING_N_CONTINUE;
5191 case 10:
5192 return 0x102;
5193 default:
5194 return UnSet;
5195 }
5196 }
5197
5198 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5199 {
5200 if (level && *level == UnSet)
5201 *level = LEVEL_CONTAINER;
5202
5203 if (level && layout && *layout == UnSet)
5204 *layout = ddf_level_to_layout(*level);
5205 }
5206
5207 struct superswitch super_ddf = {
5208 #ifndef MDASSEMBLE
5209 .examine_super = examine_super_ddf,
5210 .brief_examine_super = brief_examine_super_ddf,
5211 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5212 .export_examine_super = export_examine_super_ddf,
5213 .detail_super = detail_super_ddf,
5214 .brief_detail_super = brief_detail_super_ddf,
5215 .validate_geometry = validate_geometry_ddf,
5216 .write_init_super = write_init_super_ddf,
5217 .add_to_super = add_to_super_ddf,
5218 .remove_from_super = remove_from_super_ddf,
5219 .load_container = load_container_ddf,
5220 .copy_metadata = copy_metadata_ddf,
5221 .kill_subarray = kill_subarray_ddf,
5222 #endif
5223 .match_home = match_home_ddf,
5224 .uuid_from_super= uuid_from_super_ddf,
5225 .getinfo_super = getinfo_super_ddf,
5226 .update_super = update_super_ddf,
5227
5228 .avail_size = avail_size_ddf,
5229
5230 .compare_super = compare_super_ddf,
5231
5232 .load_super = load_super_ddf,
5233 .init_super = init_super_ddf,
5234 .store_super = store_super_ddf,
5235 .free_super = free_super_ddf,
5236 .match_metadata_desc = match_metadata_desc_ddf,
5237 .container_content = container_content_ddf,
5238 .default_geometry = default_geometry_ddf,
5239
5240 .external = 1,
5241
5242 #ifndef MDASSEMBLE
5243 /* for mdmon */
5244 .open_new = ddf_open_new,
5245 .set_array_state= ddf_set_array_state,
5246 .set_disk = ddf_set_disk,
5247 .sync_metadata = ddf_sync_metadata,
5248 .process_update = ddf_process_update,
5249 .prepare_update = ddf_prepare_update,
5250 .activate_spare = ddf_activate_spare,
5251 #endif
5252 .name = "ddf",
5253 };