]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-ddf.c
Grow: Use 'forked' also for reshape_container in Grow_continue
[thirdparty/mdadm.git] / super-ddf.c
1 /*
2 * mdadm - manage Linux "md" devices aka RAID arrays.
3 *
4 * Copyright (C) 2006-2014 Neil Brown <neilb@suse.de>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * Author: Neil Brown
22 * Email: <neil@brown.name>
23 *
24 * Specifications for DDF taken from Common RAID DDF Specification Revision 1.2
25 * (July 28 2006). Reused by permission of SNIA.
26 */
27
28 #define HAVE_STDINT_H 1
29 #include "mdadm.h"
30 #include "mdmon.h"
31 #include "sha1.h"
32 #include <values.h>
33 #include <stddef.h>
34
35 /* a non-official T10 name for creation GUIDs */
36 static char T10[] = "Linux-MD";
37
38 /* DDF timestamps are 1980 based, so we need to add
39 * second-in-decade-of-seventies to convert to linux timestamps.
40 * 10 years with 2 leap years.
41 */
42 #define DECADE (3600*24*(365*10+2))
43 unsigned long crc32(
44 unsigned long crc,
45 const unsigned char *buf,
46 unsigned len);
47
48 #define DDF_NOTFOUND (~0U)
49 #define DDF_CONTAINER (DDF_NOTFOUND-1)
50
51 /* Default for safe_mode_delay. Same value as for IMSM.
52 */
53 static const int DDF_SAFE_MODE_DELAY = 4000;
54
55 /* The DDF metadata handling.
56 * DDF metadata lives at the end of the device.
57 * The last 512 byte block provides an 'anchor' which is used to locate
58 * the rest of the metadata which usually lives immediately behind the anchor.
59 *
60 * Note:
61 * - all multibyte numeric fields are bigendian.
62 * - all strings are space padded.
63 *
64 */
65
66 typedef struct __be16 {
67 __u16 _v16;
68 } be16;
69 #define be16_eq(x, y) ((x)._v16 == (y)._v16)
70 #define be16_and(x, y) ((x)._v16 & (y)._v16)
71 #define be16_or(x, y) ((x)._v16 | (y)._v16)
72 #define be16_clear(x, y) ((x)._v16 &= ~(y)._v16)
73 #define be16_set(x, y) ((x)._v16 |= (y)._v16)
74
75 typedef struct __be32 {
76 __u32 _v32;
77 } be32;
78 #define be32_eq(x, y) ((x)._v32 == (y)._v32)
79
80 typedef struct __be64 {
81 __u64 _v64;
82 } be64;
83 #define be64_eq(x, y) ((x)._v64 == (y)._v64)
84
85 #define be16_to_cpu(be) __be16_to_cpu((be)._v16)
86 static inline be16 cpu_to_be16(__u16 x)
87 {
88 be16 be = { ._v16 = __cpu_to_be16(x) };
89 return be;
90 }
91
92 #define be32_to_cpu(be) __be32_to_cpu((be)._v32)
93 static inline be32 cpu_to_be32(__u32 x)
94 {
95 be32 be = { ._v32 = __cpu_to_be32(x) };
96 return be;
97 }
98
99 #define be64_to_cpu(be) __be64_to_cpu((be)._v64)
100 static inline be64 cpu_to_be64(__u64 x)
101 {
102 be64 be = { ._v64 = __cpu_to_be64(x) };
103 return be;
104 }
105
106 /* Primary Raid Level (PRL) */
107 #define DDF_RAID0 0x00
108 #define DDF_RAID1 0x01
109 #define DDF_RAID3 0x03
110 #define DDF_RAID4 0x04
111 #define DDF_RAID5 0x05
112 #define DDF_RAID1E 0x11
113 #define DDF_JBOD 0x0f
114 #define DDF_CONCAT 0x1f
115 #define DDF_RAID5E 0x15
116 #define DDF_RAID5EE 0x25
117 #define DDF_RAID6 0x06
118
119 /* Raid Level Qualifier (RLQ) */
120 #define DDF_RAID0_SIMPLE 0x00
121 #define DDF_RAID1_SIMPLE 0x00 /* just 2 devices in this plex */
122 #define DDF_RAID1_MULTI 0x01 /* exactly 3 devices in this plex */
123 #define DDF_RAID3_0 0x00 /* parity in first extent */
124 #define DDF_RAID3_N 0x01 /* parity in last extent */
125 #define DDF_RAID4_0 0x00 /* parity in first extent */
126 #define DDF_RAID4_N 0x01 /* parity in last extent */
127 /* these apply to raid5e and raid5ee as well */
128 #define DDF_RAID5_0_RESTART 0x00 /* same as 'right asymmetric' - layout 1 */
129 #define DDF_RAID6_0_RESTART 0x01 /* raid6 different from raid5 here!!! */
130 #define DDF_RAID5_N_RESTART 0x02 /* same as 'left asymmetric' - layout 0 */
131 #define DDF_RAID5_N_CONTINUE 0x03 /* same as 'left symmetric' - layout 2 */
132
133 #define DDF_RAID1E_ADJACENT 0x00 /* raid10 nearcopies==2 */
134 #define DDF_RAID1E_OFFSET 0x01 /* raid10 offsetcopies==2 */
135
136 /* Secondary RAID Level (SRL) */
137 #define DDF_2STRIPED 0x00 /* This is weirder than RAID0 !! */
138 #define DDF_2MIRRORED 0x01
139 #define DDF_2CONCAT 0x02
140 #define DDF_2SPANNED 0x03 /* This is also weird - be careful */
141
142 /* Magic numbers */
143 #define DDF_HEADER_MAGIC cpu_to_be32(0xDE11DE11)
144 #define DDF_CONTROLLER_MAGIC cpu_to_be32(0xAD111111)
145 #define DDF_PHYS_RECORDS_MAGIC cpu_to_be32(0x22222222)
146 #define DDF_PHYS_DATA_MAGIC cpu_to_be32(0x33333333)
147 #define DDF_VIRT_RECORDS_MAGIC cpu_to_be32(0xDDDDDDDD)
148 #define DDF_VD_CONF_MAGIC cpu_to_be32(0xEEEEEEEE)
149 #define DDF_SPARE_ASSIGN_MAGIC cpu_to_be32(0x55555555)
150 #define DDF_VU_CONF_MAGIC cpu_to_be32(0x88888888)
151 #define DDF_VENDOR_LOG_MAGIC cpu_to_be32(0x01dBEEF0)
152 #define DDF_BBM_LOG_MAGIC cpu_to_be32(0xABADB10C)
153
154 #define DDF_GUID_LEN 24
155 #define DDF_REVISION_0 "01.00.00"
156 #define DDF_REVISION_2 "01.02.00"
157
158 struct ddf_header {
159 be32 magic; /* DDF_HEADER_MAGIC */
160 be32 crc;
161 char guid[DDF_GUID_LEN];
162 char revision[8]; /* 01.02.00 */
163 be32 seq; /* starts at '1' */
164 be32 timestamp;
165 __u8 openflag;
166 __u8 foreignflag;
167 __u8 enforcegroups;
168 __u8 pad0; /* 0xff */
169 __u8 pad1[12]; /* 12 * 0xff */
170 /* 64 bytes so far */
171 __u8 header_ext[32]; /* reserved: fill with 0xff */
172 be64 primary_lba;
173 be64 secondary_lba;
174 __u8 type;
175 __u8 pad2[3]; /* 0xff */
176 be32 workspace_len; /* sectors for vendor space -
177 * at least 32768(sectors) */
178 be64 workspace_lba;
179 be16 max_pd_entries; /* one of 15, 63, 255, 1023, 4095 */
180 be16 max_vd_entries; /* 2^(4,6,8,10,12)-1 : i.e. as above */
181 be16 max_partitions; /* i.e. max num of configuration
182 record entries per disk */
183 be16 config_record_len; /* 1 +ROUNDUP(max_primary_element_entries
184 *12/512) */
185 be16 max_primary_element_entries; /* 16, 64, 256, 1024, or 4096 */
186 __u8 pad3[54]; /* 0xff */
187 /* 192 bytes so far */
188 be32 controller_section_offset;
189 be32 controller_section_length;
190 be32 phys_section_offset;
191 be32 phys_section_length;
192 be32 virt_section_offset;
193 be32 virt_section_length;
194 be32 config_section_offset;
195 be32 config_section_length;
196 be32 data_section_offset;
197 be32 data_section_length;
198 be32 bbm_section_offset;
199 be32 bbm_section_length;
200 be32 diag_space_offset;
201 be32 diag_space_length;
202 be32 vendor_offset;
203 be32 vendor_length;
204 /* 256 bytes so far */
205 __u8 pad4[256]; /* 0xff */
206 };
207
208 /* type field */
209 #define DDF_HEADER_ANCHOR 0x00
210 #define DDF_HEADER_PRIMARY 0x01
211 #define DDF_HEADER_SECONDARY 0x02
212
213 /* The content of the 'controller section' - global scope */
214 struct ddf_controller_data {
215 be32 magic; /* DDF_CONTROLLER_MAGIC */
216 be32 crc;
217 char guid[DDF_GUID_LEN];
218 struct controller_type {
219 be16 vendor_id;
220 be16 device_id;
221 be16 sub_vendor_id;
222 be16 sub_device_id;
223 } type;
224 char product_id[16];
225 __u8 pad[8]; /* 0xff */
226 __u8 vendor_data[448];
227 };
228
229 /* The content of phys_section - global scope */
230 struct phys_disk {
231 be32 magic; /* DDF_PHYS_RECORDS_MAGIC */
232 be32 crc;
233 be16 used_pdes; /* This is a counter, not a max - the list
234 * of used entries may not be dense */
235 be16 max_pdes;
236 __u8 pad[52];
237 struct phys_disk_entry {
238 char guid[DDF_GUID_LEN];
239 be32 refnum;
240 be16 type;
241 be16 state;
242 be64 config_size; /* DDF structures must be after here */
243 char path[18]; /* Another horrible structure really
244 * but is "used for information
245 * purposes only" */
246 __u8 pad[6];
247 } entries[0];
248 };
249
250 /* phys_disk_entry.type is a bitmap - bigendian remember */
251 #define DDF_Forced_PD_GUID 1
252 #define DDF_Active_in_VD 2
253 #define DDF_Global_Spare 4 /* VD_CONF records are ignored */
254 #define DDF_Spare 8 /* overrides Global_spare */
255 #define DDF_Foreign 16
256 #define DDF_Legacy 32 /* no DDF on this device */
257
258 #define DDF_Interface_mask 0xf00
259 #define DDF_Interface_SCSI 0x100
260 #define DDF_Interface_SAS 0x200
261 #define DDF_Interface_SATA 0x300
262 #define DDF_Interface_FC 0x400
263
264 /* phys_disk_entry.state is a bigendian bitmap */
265 #define DDF_Online 1
266 #define DDF_Failed 2 /* overrides 1,4,8 */
267 #define DDF_Rebuilding 4
268 #define DDF_Transition 8
269 #define DDF_SMART 16
270 #define DDF_ReadErrors 32
271 #define DDF_Missing 64
272
273 /* The content of the virt_section global scope */
274 struct virtual_disk {
275 be32 magic; /* DDF_VIRT_RECORDS_MAGIC */
276 be32 crc;
277 be16 populated_vdes;
278 be16 max_vdes;
279 __u8 pad[52];
280 struct virtual_entry {
281 char guid[DDF_GUID_LEN];
282 be16 unit;
283 __u16 pad0; /* 0xffff */
284 be16 guid_crc;
285 be16 type;
286 __u8 state;
287 __u8 init_state;
288 __u8 pad1[14];
289 char name[16];
290 } entries[0];
291 };
292
293 /* virtual_entry.type is a bitmap - bigendian */
294 #define DDF_Shared 1
295 #define DDF_Enforce_Groups 2
296 #define DDF_Unicode 4
297 #define DDF_Owner_Valid 8
298
299 /* virtual_entry.state is a bigendian bitmap */
300 #define DDF_state_mask 0x7
301 #define DDF_state_optimal 0x0
302 #define DDF_state_degraded 0x1
303 #define DDF_state_deleted 0x2
304 #define DDF_state_missing 0x3
305 #define DDF_state_failed 0x4
306 #define DDF_state_part_optimal 0x5
307
308 #define DDF_state_morphing 0x8
309 #define DDF_state_inconsistent 0x10
310
311 /* virtual_entry.init_state is a bigendian bitmap */
312 #define DDF_initstate_mask 0x03
313 #define DDF_init_not 0x00
314 #define DDF_init_quick 0x01 /* initialisation is progress.
315 * i.e. 'state_inconsistent' */
316 #define DDF_init_full 0x02
317
318 #define DDF_access_mask 0xc0
319 #define DDF_access_rw 0x00
320 #define DDF_access_ro 0x80
321 #define DDF_access_blocked 0xc0
322
323 /* The content of the config_section - local scope
324 * It has multiple records each config_record_len sectors
325 * They can be vd_config or spare_assign
326 */
327
328 struct vd_config {
329 be32 magic; /* DDF_VD_CONF_MAGIC */
330 be32 crc;
331 char guid[DDF_GUID_LEN];
332 be32 timestamp;
333 be32 seqnum;
334 __u8 pad0[24];
335 be16 prim_elmnt_count;
336 __u8 chunk_shift; /* 0 == 512, 1==1024 etc */
337 __u8 prl;
338 __u8 rlq;
339 __u8 sec_elmnt_count;
340 __u8 sec_elmnt_seq;
341 __u8 srl;
342 be64 blocks; /* blocks per component could be different
343 * on different component devices...(only
344 * for concat I hope) */
345 be64 array_blocks; /* blocks in array */
346 __u8 pad1[8];
347 be32 spare_refs[8]; /* This is used to detect missing spares.
348 * As we don't have an interface for that
349 * the values are ignored.
350 */
351 __u8 cache_pol[8];
352 __u8 bg_rate;
353 __u8 pad2[3];
354 __u8 pad3[52];
355 __u8 pad4[192];
356 __u8 v0[32]; /* reserved- 0xff */
357 __u8 v1[32]; /* reserved- 0xff */
358 __u8 v2[16]; /* reserved- 0xff */
359 __u8 v3[16]; /* reserved- 0xff */
360 __u8 vendor[32];
361 be32 phys_refnum[0]; /* refnum of each disk in sequence */
362 /*__u64 lba_offset[0]; LBA offset in each phys. Note extents in a
363 bvd are always the same size */
364 };
365 #define LBA_OFFSET(ddf, vd) ((be64 *) &(vd)->phys_refnum[(ddf)->mppe])
366
367 /* vd_config.cache_pol[7] is a bitmap */
368 #define DDF_cache_writeback 1 /* else writethrough */
369 #define DDF_cache_wadaptive 2 /* only applies if writeback */
370 #define DDF_cache_readahead 4
371 #define DDF_cache_radaptive 8 /* only if doing read-ahead */
372 #define DDF_cache_ifnobatt 16 /* even to write cache if battery is poor */
373 #define DDF_cache_wallowed 32 /* enable write caching */
374 #define DDF_cache_rallowed 64 /* enable read caching */
375
376 struct spare_assign {
377 be32 magic; /* DDF_SPARE_ASSIGN_MAGIC */
378 be32 crc;
379 be32 timestamp;
380 __u8 reserved[7];
381 __u8 type;
382 be16 populated; /* SAEs used */
383 be16 max; /* max SAEs */
384 __u8 pad[8];
385 struct spare_assign_entry {
386 char guid[DDF_GUID_LEN];
387 be16 secondary_element;
388 __u8 pad[6];
389 } spare_ents[0];
390 };
391 /* spare_assign.type is a bitmap */
392 #define DDF_spare_dedicated 0x1 /* else global */
393 #define DDF_spare_revertible 0x2 /* else committable */
394 #define DDF_spare_active 0x4 /* else not active */
395 #define DDF_spare_affinity 0x8 /* enclosure affinity */
396
397 /* The data_section contents - local scope */
398 struct disk_data {
399 be32 magic; /* DDF_PHYS_DATA_MAGIC */
400 be32 crc;
401 char guid[DDF_GUID_LEN];
402 be32 refnum; /* crc of some magic drive data ... */
403 __u8 forced_ref; /* set when above was not result of magic */
404 __u8 forced_guid; /* set if guid was forced rather than magic */
405 __u8 vendor[32];
406 __u8 pad[442];
407 };
408
409 /* bbm_section content */
410 struct bad_block_log {
411 be32 magic;
412 be32 crc;
413 be16 entry_count;
414 be32 spare_count;
415 __u8 pad[10];
416 be64 first_spare;
417 struct mapped_block {
418 be64 defective_start;
419 be32 replacement_start;
420 be16 remap_count;
421 __u8 pad[2];
422 } entries[0];
423 };
424
425 /* Struct for internally holding ddf structures */
426 /* The DDF structure stored on each device is potentially
427 * quite different, as some data is global and some is local.
428 * The global data is:
429 * - ddf header
430 * - controller_data
431 * - Physical disk records
432 * - Virtual disk records
433 * The local data is:
434 * - Configuration records
435 * - Physical Disk data section
436 * ( and Bad block and vendor which I don't care about yet).
437 *
438 * The local data is parsed into separate lists as it is read
439 * and reconstructed for writing. This means that we only need
440 * to make config changes once and they are automatically
441 * propagated to all devices.
442 * The global (config and disk data) records are each in a list
443 * of separate data structures. When writing we find the entry
444 * or entries applicable to the particular device.
445 */
446 struct ddf_super {
447 struct ddf_header anchor, primary, secondary;
448 struct ddf_controller_data controller;
449 struct ddf_header *active;
450 struct phys_disk *phys;
451 struct virtual_disk *virt;
452 char *conf;
453 int pdsize, vdsize;
454 unsigned int max_part, mppe, conf_rec_len;
455 int currentdev;
456 int updates_pending;
457 struct vcl {
458 union {
459 char space[512];
460 struct {
461 struct vcl *next;
462 unsigned int vcnum; /* index into ->virt */
463 /* For an array with a secondary level there are
464 * multiple vd_config structures, all with the same
465 * guid but with different sec_elmnt_seq.
466 * One of these structures is in 'conf' below.
467 * The others are in other_bvds, not in any
468 * particular order.
469 */
470 struct vd_config **other_bvds;
471 __u64 *block_sizes; /* NULL if all the same */
472 };
473 };
474 struct vd_config conf;
475 } *conflist, *currentconf;
476 struct dl {
477 union {
478 char space[512];
479 struct {
480 struct dl *next;
481 int major, minor;
482 char *devname;
483 int fd;
484 unsigned long long size; /* sectors */
485 be64 primary_lba; /* sectors */
486 be64 secondary_lba; /* sectors */
487 be64 workspace_lba; /* sectors */
488 int pdnum; /* index in ->phys */
489 struct spare_assign *spare;
490 void *mdupdate; /* hold metadata update */
491
492 /* These fields used by auto-layout */
493 int raiddisk; /* slot to fill in autolayout */
494 __u64 esize;
495 int displayed;
496 };
497 };
498 struct disk_data disk;
499 struct vcl *vlist[0]; /* max_part in size */
500 } *dlist, *add_list;
501 };
502
503 #ifndef MDASSEMBLE
504 static int load_super_ddf_all(struct supertype *st, int fd,
505 void **sbp, char *devname);
506 static int get_svd_state(const struct ddf_super *, const struct vcl *);
507 static int
508 validate_geometry_ddf_container(struct supertype *st,
509 int level, int layout, int raiddisks,
510 int chunk, unsigned long long size,
511 unsigned long long data_offset,
512 char *dev, unsigned long long *freesize,
513 int verbose);
514
515 static int validate_geometry_ddf_bvd(struct supertype *st,
516 int level, int layout, int raiddisks,
517 int *chunk, unsigned long long size,
518 unsigned long long data_offset,
519 char *dev, unsigned long long *freesize,
520 int verbose);
521 #endif
522
523 static void free_super_ddf(struct supertype *st);
524 static int all_ff(const char *guid);
525 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
526 be32 refnum, unsigned int nmax,
527 const struct vd_config **bvd,
528 unsigned int *idx);
529 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map);
530 static void uuid_from_ddf_guid(const char *guid, int uuid[4]);
531 static void uuid_from_super_ddf(struct supertype *st, int uuid[4]);
532 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i);
533 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map);
534 static int init_super_ddf_bvd(struct supertype *st,
535 mdu_array_info_t *info,
536 unsigned long long size,
537 char *name, char *homehost,
538 int *uuid, unsigned long long data_offset);
539
540 #if DEBUG
541 static void pr_state(struct ddf_super *ddf, const char *msg)
542 {
543 unsigned int i;
544 dprintf("%s/%s: ", __func__, msg);
545 for (i = 0; i < be16_to_cpu(ddf->active->max_vd_entries); i++) {
546 if (all_ff(ddf->virt->entries[i].guid))
547 continue;
548 dprintf("%u(s=%02x i=%02x) ", i,
549 ddf->virt->entries[i].state,
550 ddf->virt->entries[i].init_state);
551 }
552 dprintf("\n");
553 }
554 #else
555 static void pr_state(const struct ddf_super *ddf, const char *msg) {}
556 #endif
557
558 static void _ddf_set_updates_pending(struct ddf_super *ddf, struct vd_config *vc,
559 const char *func)
560 {
561 if (vc) {
562 vc->timestamp = cpu_to_be32(time(0)-DECADE);
563 vc->seqnum = cpu_to_be32(be32_to_cpu(vc->seqnum) + 1);
564 }
565 if (ddf->updates_pending)
566 return;
567 ddf->updates_pending = 1;
568 ddf->active->seq = cpu_to_be32((be32_to_cpu(ddf->active->seq)+1));
569 pr_state(ddf, func);
570 }
571
572 #define ddf_set_updates_pending(x,v) _ddf_set_updates_pending((x), (v), __func__)
573
574 static be32 calc_crc(void *buf, int len)
575 {
576 /* crcs are always at the same place as in the ddf_header */
577 struct ddf_header *ddf = buf;
578 be32 oldcrc = ddf->crc;
579 __u32 newcrc;
580 ddf->crc = cpu_to_be32(0xffffffff);
581
582 newcrc = crc32(0, buf, len);
583 ddf->crc = oldcrc;
584 /* The crc is stored (like everything) bigendian, so convert
585 * here for simplicity
586 */
587 return cpu_to_be32(newcrc);
588 }
589
590 #define DDF_INVALID_LEVEL 0xff
591 #define DDF_NO_SECONDARY 0xff
592 static int err_bad_md_layout(const mdu_array_info_t *array)
593 {
594 pr_err("RAID%d layout %x with %d disks is unsupported for DDF\n",
595 array->level, array->layout, array->raid_disks);
596 return -1;
597 }
598
599 static int layout_md2ddf(const mdu_array_info_t *array,
600 struct vd_config *conf)
601 {
602 be16 prim_elmnt_count = cpu_to_be16(array->raid_disks);
603 __u8 prl = DDF_INVALID_LEVEL, rlq = 0;
604 __u8 sec_elmnt_count = 1;
605 __u8 srl = DDF_NO_SECONDARY;
606
607 switch (array->level) {
608 case LEVEL_LINEAR:
609 prl = DDF_CONCAT;
610 break;
611 case 0:
612 rlq = DDF_RAID0_SIMPLE;
613 prl = DDF_RAID0;
614 break;
615 case 1:
616 switch (array->raid_disks) {
617 case 2:
618 rlq = DDF_RAID1_SIMPLE;
619 break;
620 case 3:
621 rlq = DDF_RAID1_MULTI;
622 break;
623 default:
624 return err_bad_md_layout(array);
625 }
626 prl = DDF_RAID1;
627 break;
628 case 4:
629 if (array->layout != 0)
630 return err_bad_md_layout(array);
631 rlq = DDF_RAID4_N;
632 prl = DDF_RAID4;
633 break;
634 case 5:
635 switch (array->layout) {
636 case ALGORITHM_LEFT_ASYMMETRIC:
637 rlq = DDF_RAID5_N_RESTART;
638 break;
639 case ALGORITHM_RIGHT_ASYMMETRIC:
640 rlq = DDF_RAID5_0_RESTART;
641 break;
642 case ALGORITHM_LEFT_SYMMETRIC:
643 rlq = DDF_RAID5_N_CONTINUE;
644 break;
645 case ALGORITHM_RIGHT_SYMMETRIC:
646 /* not mentioned in standard */
647 default:
648 return err_bad_md_layout(array);
649 }
650 prl = DDF_RAID5;
651 break;
652 case 6:
653 switch (array->layout) {
654 case ALGORITHM_ROTATING_N_RESTART:
655 rlq = DDF_RAID5_N_RESTART;
656 break;
657 case ALGORITHM_ROTATING_ZERO_RESTART:
658 rlq = DDF_RAID6_0_RESTART;
659 break;
660 case ALGORITHM_ROTATING_N_CONTINUE:
661 rlq = DDF_RAID5_N_CONTINUE;
662 break;
663 default:
664 return err_bad_md_layout(array);
665 }
666 prl = DDF_RAID6;
667 break;
668 case 10:
669 if (array->raid_disks % 2 == 0 && array->layout == 0x102) {
670 rlq = DDF_RAID1_SIMPLE;
671 prim_elmnt_count = cpu_to_be16(2);
672 sec_elmnt_count = array->raid_disks / 2;
673 srl = DDF_2SPANNED;
674 prl = DDF_RAID1;
675 } else if (array->raid_disks % 3 == 0
676 && array->layout == 0x103) {
677 rlq = DDF_RAID1_MULTI;
678 prim_elmnt_count = cpu_to_be16(3);
679 sec_elmnt_count = array->raid_disks / 3;
680 srl = DDF_2SPANNED;
681 prl = DDF_RAID1;
682 } else if (array->layout == 0x201) {
683 prl = DDF_RAID1E;
684 rlq = DDF_RAID1E_OFFSET;
685 } else if (array->layout == 0x102) {
686 prl = DDF_RAID1E;
687 rlq = DDF_RAID1E_ADJACENT;
688 } else
689 return err_bad_md_layout(array);
690 break;
691 default:
692 return err_bad_md_layout(array);
693 }
694 conf->prl = prl;
695 conf->prim_elmnt_count = prim_elmnt_count;
696 conf->rlq = rlq;
697 conf->srl = srl;
698 conf->sec_elmnt_count = sec_elmnt_count;
699 return 0;
700 }
701
702 static int err_bad_ddf_layout(const struct vd_config *conf)
703 {
704 pr_err("DDF RAID %u qualifier %u with %u disks is unsupported\n",
705 conf->prl, conf->rlq, be16_to_cpu(conf->prim_elmnt_count));
706 return -1;
707 }
708
709 static int layout_ddf2md(const struct vd_config *conf,
710 mdu_array_info_t *array)
711 {
712 int level = LEVEL_UNSUPPORTED;
713 int layout = 0;
714 int raiddisks = be16_to_cpu(conf->prim_elmnt_count);
715
716 if (conf->sec_elmnt_count > 1) {
717 /* see also check_secondary() */
718 if (conf->prl != DDF_RAID1 ||
719 (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED)) {
720 pr_err("Unsupported secondary RAID level %u/%u\n",
721 conf->prl, conf->srl);
722 return -1;
723 }
724 if (raiddisks == 2 && conf->rlq == DDF_RAID1_SIMPLE)
725 layout = 0x102;
726 else if (raiddisks == 3 && conf->rlq == DDF_RAID1_MULTI)
727 layout = 0x103;
728 else
729 return err_bad_ddf_layout(conf);
730 raiddisks *= conf->sec_elmnt_count;
731 level = 10;
732 goto good;
733 }
734
735 switch (conf->prl) {
736 case DDF_CONCAT:
737 level = LEVEL_LINEAR;
738 break;
739 case DDF_RAID0:
740 if (conf->rlq != DDF_RAID0_SIMPLE)
741 return err_bad_ddf_layout(conf);
742 level = 0;
743 break;
744 case DDF_RAID1:
745 if (!((conf->rlq == DDF_RAID1_SIMPLE && raiddisks == 2) ||
746 (conf->rlq == DDF_RAID1_MULTI && raiddisks == 3)))
747 return err_bad_ddf_layout(conf);
748 level = 1;
749 break;
750 case DDF_RAID1E:
751 if (conf->rlq == DDF_RAID1E_ADJACENT)
752 layout = 0x102;
753 else if (conf->rlq == DDF_RAID1E_OFFSET)
754 layout = 0x201;
755 else
756 return err_bad_ddf_layout(conf);
757 level = 10;
758 break;
759 case DDF_RAID4:
760 if (conf->rlq != DDF_RAID4_N)
761 return err_bad_ddf_layout(conf);
762 level = 4;
763 break;
764 case DDF_RAID5:
765 switch (conf->rlq) {
766 case DDF_RAID5_N_RESTART:
767 layout = ALGORITHM_LEFT_ASYMMETRIC;
768 break;
769 case DDF_RAID5_0_RESTART:
770 layout = ALGORITHM_RIGHT_ASYMMETRIC;
771 break;
772 case DDF_RAID5_N_CONTINUE:
773 layout = ALGORITHM_LEFT_SYMMETRIC;
774 break;
775 default:
776 return err_bad_ddf_layout(conf);
777 }
778 level = 5;
779 break;
780 case DDF_RAID6:
781 switch (conf->rlq) {
782 case DDF_RAID5_N_RESTART:
783 layout = ALGORITHM_ROTATING_N_RESTART;
784 break;
785 case DDF_RAID6_0_RESTART:
786 layout = ALGORITHM_ROTATING_ZERO_RESTART;
787 break;
788 case DDF_RAID5_N_CONTINUE:
789 layout = ALGORITHM_ROTATING_N_CONTINUE;
790 break;
791 default:
792 return err_bad_ddf_layout(conf);
793 }
794 level = 6;
795 break;
796 default:
797 return err_bad_ddf_layout(conf);
798 };
799
800 good:
801 array->level = level;
802 array->layout = layout;
803 array->raid_disks = raiddisks;
804 return 0;
805 }
806
807 static int load_ddf_header(int fd, unsigned long long lba,
808 unsigned long long size,
809 int type,
810 struct ddf_header *hdr, struct ddf_header *anchor)
811 {
812 /* read a ddf header (primary or secondary) from fd/lba
813 * and check that it is consistent with anchor
814 * Need to check:
815 * magic, crc, guid, rev, and LBA's header_type, and
816 * everything after header_type must be the same
817 */
818 if (lba >= size-1)
819 return 0;
820
821 if (lseek64(fd, lba<<9, 0) < 0)
822 return 0;
823
824 if (read(fd, hdr, 512) != 512)
825 return 0;
826
827 if (!be32_eq(hdr->magic, DDF_HEADER_MAGIC)) {
828 pr_err("%s: bad header magic\n", __func__);
829 return 0;
830 }
831 if (!be32_eq(calc_crc(hdr, 512), hdr->crc)) {
832 pr_err("%s: bad CRC\n", __func__);
833 return 0;
834 }
835 if (memcmp(anchor->guid, hdr->guid, DDF_GUID_LEN) != 0 ||
836 memcmp(anchor->revision, hdr->revision, 8) != 0 ||
837 !be64_eq(anchor->primary_lba, hdr->primary_lba) ||
838 !be64_eq(anchor->secondary_lba, hdr->secondary_lba) ||
839 hdr->type != type ||
840 memcmp(anchor->pad2, hdr->pad2, 512 -
841 offsetof(struct ddf_header, pad2)) != 0) {
842 pr_err("%s: header mismatch\n", __func__);
843 return 0;
844 }
845
846 /* Looks good enough to me... */
847 return 1;
848 }
849
850 static void *load_section(int fd, struct ddf_super *super, void *buf,
851 be32 offset_be, be32 len_be, int check)
852 {
853 unsigned long long offset = be32_to_cpu(offset_be);
854 unsigned long long len = be32_to_cpu(len_be);
855 int dofree = (buf == NULL);
856
857 if (check)
858 if (len != 2 && len != 8 && len != 32
859 && len != 128 && len != 512)
860 return NULL;
861
862 if (len > 1024)
863 return NULL;
864 if (!buf && posix_memalign(&buf, 512, len<<9) != 0)
865 buf = NULL;
866
867 if (!buf)
868 return NULL;
869
870 if (super->active->type == 1)
871 offset += be64_to_cpu(super->active->primary_lba);
872 else
873 offset += be64_to_cpu(super->active->secondary_lba);
874
875 if ((unsigned long long)lseek64(fd, offset<<9, 0) != (offset<<9)) {
876 if (dofree)
877 free(buf);
878 return NULL;
879 }
880 if ((unsigned long long)read(fd, buf, len<<9) != (len<<9)) {
881 if (dofree)
882 free(buf);
883 return NULL;
884 }
885 return buf;
886 }
887
888 static int load_ddf_headers(int fd, struct ddf_super *super, char *devname)
889 {
890 unsigned long long dsize;
891
892 get_dev_size(fd, NULL, &dsize);
893
894 if (lseek64(fd, dsize-512, 0) < 0) {
895 if (devname)
896 pr_err("Cannot seek to anchor block on %s: %s\n",
897 devname, strerror(errno));
898 return 1;
899 }
900 if (read(fd, &super->anchor, 512) != 512) {
901 if (devname)
902 pr_err("Cannot read anchor block on %s: %s\n",
903 devname, strerror(errno));
904 return 1;
905 }
906 if (!be32_eq(super->anchor.magic, DDF_HEADER_MAGIC)) {
907 if (devname)
908 pr_err("no DDF anchor found on %s\n",
909 devname);
910 return 2;
911 }
912 if (!be32_eq(calc_crc(&super->anchor, 512), super->anchor.crc)) {
913 if (devname)
914 pr_err("bad CRC on anchor on %s\n",
915 devname);
916 return 2;
917 }
918 if (memcmp(super->anchor.revision, DDF_REVISION_0, 8) != 0 &&
919 memcmp(super->anchor.revision, DDF_REVISION_2, 8) != 0) {
920 if (devname)
921 pr_err("can only support super revision"
922 " %.8s and earlier, not %.8s on %s\n",
923 DDF_REVISION_2, super->anchor.revision,devname);
924 return 2;
925 }
926 super->active = NULL;
927 if (load_ddf_header(fd, be64_to_cpu(super->anchor.primary_lba),
928 dsize >> 9, 1,
929 &super->primary, &super->anchor) == 0) {
930 if (devname)
931 pr_err("Failed to load primary DDF header "
932 "on %s\n", devname);
933 } else
934 super->active = &super->primary;
935
936 if (load_ddf_header(fd, be64_to_cpu(super->anchor.secondary_lba),
937 dsize >> 9, 2,
938 &super->secondary, &super->anchor)) {
939 if (super->active == NULL
940 || (be32_to_cpu(super->primary.seq)
941 < be32_to_cpu(super->secondary.seq) &&
942 !super->secondary.openflag)
943 || (be32_to_cpu(super->primary.seq)
944 == be32_to_cpu(super->secondary.seq) &&
945 super->primary.openflag && !super->secondary.openflag)
946 )
947 super->active = &super->secondary;
948 } else if (devname &&
949 be64_to_cpu(super->anchor.secondary_lba) != ~(__u64)0)
950 pr_err("Failed to load secondary DDF header on %s\n",
951 devname);
952 if (super->active == NULL)
953 return 2;
954 return 0;
955 }
956
957 static int load_ddf_global(int fd, struct ddf_super *super, char *devname)
958 {
959 void *ok;
960 ok = load_section(fd, super, &super->controller,
961 super->active->controller_section_offset,
962 super->active->controller_section_length,
963 0);
964 super->phys = load_section(fd, super, NULL,
965 super->active->phys_section_offset,
966 super->active->phys_section_length,
967 1);
968 super->pdsize = be32_to_cpu(super->active->phys_section_length) * 512;
969
970 super->virt = load_section(fd, super, NULL,
971 super->active->virt_section_offset,
972 super->active->virt_section_length,
973 1);
974 super->vdsize = be32_to_cpu(super->active->virt_section_length) * 512;
975 if (!ok ||
976 !super->phys ||
977 !super->virt) {
978 free(super->phys);
979 free(super->virt);
980 super->phys = NULL;
981 super->virt = NULL;
982 return 2;
983 }
984 super->conflist = NULL;
985 super->dlist = NULL;
986
987 super->max_part = be16_to_cpu(super->active->max_partitions);
988 super->mppe = be16_to_cpu(super->active->max_primary_element_entries);
989 super->conf_rec_len = be16_to_cpu(super->active->config_record_len);
990 return 0;
991 }
992
993 #define DDF_UNUSED_BVD 0xff
994 static int alloc_other_bvds(const struct ddf_super *ddf, struct vcl *vcl)
995 {
996 unsigned int n_vds = vcl->conf.sec_elmnt_count - 1;
997 unsigned int i, vdsize;
998 void *p;
999 if (n_vds == 0) {
1000 vcl->other_bvds = NULL;
1001 return 0;
1002 }
1003 vdsize = ddf->conf_rec_len * 512;
1004 if (posix_memalign(&p, 512, n_vds *
1005 (vdsize + sizeof(struct vd_config *))) != 0)
1006 return -1;
1007 vcl->other_bvds = (struct vd_config **) (p + n_vds * vdsize);
1008 for (i = 0; i < n_vds; i++) {
1009 vcl->other_bvds[i] = p + i * vdsize;
1010 memset(vcl->other_bvds[i], 0, vdsize);
1011 vcl->other_bvds[i]->sec_elmnt_seq = DDF_UNUSED_BVD;
1012 }
1013 return 0;
1014 }
1015
1016 static void add_other_bvd(struct vcl *vcl, struct vd_config *vd,
1017 unsigned int len)
1018 {
1019 int i;
1020 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1021 if (vcl->other_bvds[i]->sec_elmnt_seq == vd->sec_elmnt_seq)
1022 break;
1023
1024 if (i < vcl->conf.sec_elmnt_count-1) {
1025 if (be32_to_cpu(vd->seqnum) <=
1026 be32_to_cpu(vcl->other_bvds[i]->seqnum))
1027 return;
1028 } else {
1029 for (i = 0; i < vcl->conf.sec_elmnt_count-1; i++)
1030 if (vcl->other_bvds[i]->sec_elmnt_seq == DDF_UNUSED_BVD)
1031 break;
1032 if (i == vcl->conf.sec_elmnt_count-1) {
1033 pr_err("no space for sec level config %u, count is %u\n",
1034 vd->sec_elmnt_seq, vcl->conf.sec_elmnt_count);
1035 return;
1036 }
1037 }
1038 memcpy(vcl->other_bvds[i], vd, len);
1039 }
1040
1041 static int load_ddf_local(int fd, struct ddf_super *super,
1042 char *devname, int keep)
1043 {
1044 struct dl *dl;
1045 struct stat stb;
1046 char *conf;
1047 unsigned int i;
1048 unsigned int confsec;
1049 int vnum;
1050 unsigned int max_virt_disks =
1051 be16_to_cpu(super->active->max_vd_entries);
1052 unsigned long long dsize;
1053
1054 /* First the local disk info */
1055 if (posix_memalign((void**)&dl, 512,
1056 sizeof(*dl) +
1057 (super->max_part) * sizeof(dl->vlist[0])) != 0) {
1058 pr_err("%s could not allocate disk info buffer\n",
1059 __func__);
1060 return 1;
1061 }
1062
1063 load_section(fd, super, &dl->disk,
1064 super->active->data_section_offset,
1065 super->active->data_section_length,
1066 0);
1067 dl->devname = devname ? xstrdup(devname) : NULL;
1068
1069 fstat(fd, &stb);
1070 dl->major = major(stb.st_rdev);
1071 dl->minor = minor(stb.st_rdev);
1072 dl->next = super->dlist;
1073 dl->fd = keep ? fd : -1;
1074
1075 dl->size = 0;
1076 if (get_dev_size(fd, devname, &dsize))
1077 dl->size = dsize >> 9;
1078 /* If the disks have different sizes, the LBAs will differ
1079 * between phys disks.
1080 * At this point here, the values in super->active must be valid
1081 * for this phys disk. */
1082 dl->primary_lba = super->active->primary_lba;
1083 dl->secondary_lba = super->active->secondary_lba;
1084 dl->workspace_lba = super->active->workspace_lba;
1085 dl->spare = NULL;
1086 for (i = 0 ; i < super->max_part ; i++)
1087 dl->vlist[i] = NULL;
1088 super->dlist = dl;
1089 dl->pdnum = -1;
1090 for (i = 0; i < be16_to_cpu(super->active->max_pd_entries); i++)
1091 if (memcmp(super->phys->entries[i].guid,
1092 dl->disk.guid, DDF_GUID_LEN) == 0)
1093 dl->pdnum = i;
1094
1095 /* Now the config list. */
1096 /* 'conf' is an array of config entries, some of which are
1097 * probably invalid. Those which are good need to be copied into
1098 * the conflist
1099 */
1100
1101 conf = load_section(fd, super, super->conf,
1102 super->active->config_section_offset,
1103 super->active->config_section_length,
1104 0);
1105 super->conf = conf;
1106 vnum = 0;
1107 for (confsec = 0;
1108 confsec < be32_to_cpu(super->active->config_section_length);
1109 confsec += super->conf_rec_len) {
1110 struct vd_config *vd =
1111 (struct vd_config *)((char*)conf + confsec*512);
1112 struct vcl *vcl;
1113
1114 if (be32_eq(vd->magic, DDF_SPARE_ASSIGN_MAGIC)) {
1115 if (dl->spare)
1116 continue;
1117 if (posix_memalign((void**)&dl->spare, 512,
1118 super->conf_rec_len*512) != 0) {
1119 pr_err("%s could not allocate spare info buf\n",
1120 __func__);
1121 return 1;
1122 }
1123
1124 memcpy(dl->spare, vd, super->conf_rec_len*512);
1125 continue;
1126 }
1127 if (!be32_eq(vd->magic, DDF_VD_CONF_MAGIC))
1128 /* Must be vendor-unique - I cannot handle those */
1129 continue;
1130
1131 for (vcl = super->conflist; vcl; vcl = vcl->next) {
1132 if (memcmp(vcl->conf.guid,
1133 vd->guid, DDF_GUID_LEN) == 0)
1134 break;
1135 }
1136
1137 if (vcl) {
1138 dl->vlist[vnum++] = vcl;
1139 if (vcl->other_bvds != NULL &&
1140 vcl->conf.sec_elmnt_seq != vd->sec_elmnt_seq) {
1141 add_other_bvd(vcl, vd, super->conf_rec_len*512);
1142 continue;
1143 }
1144 if (be32_to_cpu(vd->seqnum) <=
1145 be32_to_cpu(vcl->conf.seqnum))
1146 continue;
1147 } else {
1148 if (posix_memalign((void**)&vcl, 512,
1149 (super->conf_rec_len*512 +
1150 offsetof(struct vcl, conf))) != 0) {
1151 pr_err("%s could not allocate vcl buf\n",
1152 __func__);
1153 return 1;
1154 }
1155 vcl->next = super->conflist;
1156 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
1157 vcl->conf.sec_elmnt_count = vd->sec_elmnt_count;
1158 if (alloc_other_bvds(super, vcl) != 0) {
1159 pr_err("%s could not allocate other bvds\n",
1160 __func__);
1161 free(vcl);
1162 return 1;
1163 };
1164 super->conflist = vcl;
1165 dl->vlist[vnum++] = vcl;
1166 }
1167 memcpy(&vcl->conf, vd, super->conf_rec_len*512);
1168 for (i=0; i < max_virt_disks ; i++)
1169 if (memcmp(super->virt->entries[i].guid,
1170 vcl->conf.guid, DDF_GUID_LEN)==0)
1171 break;
1172 if (i < max_virt_disks)
1173 vcl->vcnum = i;
1174 }
1175
1176 return 0;
1177 }
1178
1179 static int load_super_ddf(struct supertype *st, int fd,
1180 char *devname)
1181 {
1182 unsigned long long dsize;
1183 struct ddf_super *super;
1184 int rv;
1185
1186 if (get_dev_size(fd, devname, &dsize) == 0)
1187 return 1;
1188
1189 if (test_partition(fd))
1190 /* DDF is not allowed on partitions */
1191 return 1;
1192
1193 /* 32M is a lower bound */
1194 if (dsize <= 32*1024*1024) {
1195 if (devname)
1196 pr_err("%s is too small for ddf: "
1197 "size is %llu sectors.\n",
1198 devname, dsize>>9);
1199 return 1;
1200 }
1201 if (dsize & 511) {
1202 if (devname)
1203 pr_err("%s is an odd size for ddf: "
1204 "size is %llu bytes.\n",
1205 devname, dsize);
1206 return 1;
1207 }
1208
1209 free_super_ddf(st);
1210
1211 if (posix_memalign((void**)&super, 512, sizeof(*super))!= 0) {
1212 pr_err("malloc of %zu failed.\n",
1213 sizeof(*super));
1214 return 1;
1215 }
1216 memset(super, 0, sizeof(*super));
1217
1218 rv = load_ddf_headers(fd, super, devname);
1219 if (rv) {
1220 free(super);
1221 return rv;
1222 }
1223
1224 /* Have valid headers and have chosen the best. Let's read in the rest*/
1225
1226 rv = load_ddf_global(fd, super, devname);
1227
1228 if (rv) {
1229 if (devname)
1230 pr_err("Failed to load all information "
1231 "sections on %s\n", devname);
1232 free(super);
1233 return rv;
1234 }
1235
1236 rv = load_ddf_local(fd, super, devname, 0);
1237
1238 if (rv) {
1239 if (devname)
1240 pr_err("Failed to load all information "
1241 "sections on %s\n", devname);
1242 free(super);
1243 return rv;
1244 }
1245
1246 /* Should possibly check the sections .... */
1247
1248 st->sb = super;
1249 if (st->ss == NULL) {
1250 st->ss = &super_ddf;
1251 st->minor_version = 0;
1252 st->max_devs = 512;
1253 }
1254 return 0;
1255
1256 }
1257
1258 static void free_super_ddf(struct supertype *st)
1259 {
1260 struct ddf_super *ddf = st->sb;
1261 if (ddf == NULL)
1262 return;
1263 free(ddf->phys);
1264 free(ddf->virt);
1265 free(ddf->conf);
1266 while (ddf->conflist) {
1267 struct vcl *v = ddf->conflist;
1268 ddf->conflist = v->next;
1269 if (v->block_sizes)
1270 free(v->block_sizes);
1271 if (v->other_bvds)
1272 /*
1273 v->other_bvds[0] points to beginning of buffer,
1274 see alloc_other_bvds()
1275 */
1276 free(v->other_bvds[0]);
1277 free(v);
1278 }
1279 while (ddf->dlist) {
1280 struct dl *d = ddf->dlist;
1281 ddf->dlist = d->next;
1282 if (d->fd >= 0)
1283 close(d->fd);
1284 if (d->spare)
1285 free(d->spare);
1286 free(d);
1287 }
1288 while (ddf->add_list) {
1289 struct dl *d = ddf->add_list;
1290 ddf->add_list = d->next;
1291 if (d->fd >= 0)
1292 close(d->fd);
1293 if (d->spare)
1294 free(d->spare);
1295 free(d);
1296 }
1297 free(ddf);
1298 st->sb = NULL;
1299 }
1300
1301 static struct supertype *match_metadata_desc_ddf(char *arg)
1302 {
1303 /* 'ddf' only supports containers */
1304 struct supertype *st;
1305 if (strcmp(arg, "ddf") != 0 &&
1306 strcmp(arg, "default") != 0
1307 )
1308 return NULL;
1309
1310 st = xcalloc(1, sizeof(*st));
1311 st->ss = &super_ddf;
1312 st->max_devs = 512;
1313 st->minor_version = 0;
1314 st->sb = NULL;
1315 return st;
1316 }
1317
1318 #ifndef MDASSEMBLE
1319
1320 static mapping_t ddf_state[] = {
1321 { "Optimal", 0},
1322 { "Degraded", 1},
1323 { "Deleted", 2},
1324 { "Missing", 3},
1325 { "Failed", 4},
1326 { "Partially Optimal", 5},
1327 { "-reserved-", 6},
1328 { "-reserved-", 7},
1329 { NULL, 0}
1330 };
1331
1332 static mapping_t ddf_init_state[] = {
1333 { "Not Initialised", 0},
1334 { "QuickInit in Progress", 1},
1335 { "Fully Initialised", 2},
1336 { "*UNKNOWN*", 3},
1337 { NULL, 0}
1338 };
1339 static mapping_t ddf_access[] = {
1340 { "Read/Write", 0},
1341 { "Reserved", 1},
1342 { "Read Only", 2},
1343 { "Blocked (no access)", 3},
1344 { NULL ,0}
1345 };
1346
1347 static mapping_t ddf_level[] = {
1348 { "RAID0", DDF_RAID0},
1349 { "RAID1", DDF_RAID1},
1350 { "RAID3", DDF_RAID3},
1351 { "RAID4", DDF_RAID4},
1352 { "RAID5", DDF_RAID5},
1353 { "RAID1E",DDF_RAID1E},
1354 { "JBOD", DDF_JBOD},
1355 { "CONCAT",DDF_CONCAT},
1356 { "RAID5E",DDF_RAID5E},
1357 { "RAID5EE",DDF_RAID5EE},
1358 { "RAID6", DDF_RAID6},
1359 { NULL, 0}
1360 };
1361 static mapping_t ddf_sec_level[] = {
1362 { "Striped", DDF_2STRIPED},
1363 { "Mirrored", DDF_2MIRRORED},
1364 { "Concat", DDF_2CONCAT},
1365 { "Spanned", DDF_2SPANNED},
1366 { NULL, 0}
1367 };
1368 #endif
1369
1370 static int all_ff(const char *guid)
1371 {
1372 int i;
1373 for (i = 0; i < DDF_GUID_LEN; i++)
1374 if (guid[i] != (char)0xff)
1375 return 0;
1376 return 1;
1377 }
1378
1379 static const char *guid_str(const char *guid)
1380 {
1381 static char buf[DDF_GUID_LEN*2+1];
1382 int i;
1383 char *p = buf;
1384 for (i = 0; i < DDF_GUID_LEN; i++) {
1385 unsigned char c = guid[i];
1386 if (c >= 32 && c < 127)
1387 p += sprintf(p, "%c", c);
1388 else
1389 p += sprintf(p, "%02x", c);
1390 }
1391 *p = '\0';
1392 return (const char *) buf;
1393 }
1394
1395 #ifndef MDASSEMBLE
1396 static void print_guid(char *guid, int tstamp)
1397 {
1398 /* A GUIDs are part (or all) ASCII and part binary.
1399 * They tend to be space padded.
1400 * We print the GUID in HEX, then in parentheses add
1401 * any initial ASCII sequence, and a possible
1402 * time stamp from bytes 16-19
1403 */
1404 int l = DDF_GUID_LEN;
1405 int i;
1406
1407 for (i=0 ; i<DDF_GUID_LEN ; i++) {
1408 if ((i&3)==0 && i != 0) printf(":");
1409 printf("%02X", guid[i]&255);
1410 }
1411
1412 printf("\n (");
1413 while (l && guid[l-1] == ' ')
1414 l--;
1415 for (i=0 ; i<l ; i++) {
1416 if (guid[i] >= 0x20 && guid[i] < 0x7f)
1417 fputc(guid[i], stdout);
1418 else
1419 break;
1420 }
1421 if (tstamp) {
1422 time_t then = __be32_to_cpu(*(__u32*)(guid+16)) + DECADE;
1423 char tbuf[100];
1424 struct tm *tm;
1425 tm = localtime(&then);
1426 strftime(tbuf, 100, " %D %T",tm);
1427 fputs(tbuf, stdout);
1428 }
1429 printf(")");
1430 }
1431
1432 static void examine_vd(int n, struct ddf_super *sb, char *guid)
1433 {
1434 int crl = sb->conf_rec_len;
1435 struct vcl *vcl;
1436
1437 for (vcl = sb->conflist ; vcl ; vcl = vcl->next) {
1438 unsigned int i;
1439 struct vd_config *vc = &vcl->conf;
1440
1441 if (!be32_eq(calc_crc(vc, crl*512), vc->crc))
1442 continue;
1443 if (memcmp(vc->guid, guid, DDF_GUID_LEN) != 0)
1444 continue;
1445
1446 /* Ok, we know about this VD, let's give more details */
1447 printf(" Raid Devices[%d] : %d (", n,
1448 be16_to_cpu(vc->prim_elmnt_count));
1449 for (i = 0; i < be16_to_cpu(vc->prim_elmnt_count); i++) {
1450 int j;
1451 int cnt = be16_to_cpu(sb->phys->max_pdes);
1452 for (j=0; j<cnt; j++)
1453 if (be32_eq(vc->phys_refnum[i],
1454 sb->phys->entries[j].refnum))
1455 break;
1456 if (i) printf(" ");
1457 if (j < cnt)
1458 printf("%d", j);
1459 else
1460 printf("--");
1461 printf("@%lluK", (unsigned long long) be64_to_cpu(LBA_OFFSET(sb, vc)[i])/2);
1462 }
1463 printf(")\n");
1464 if (vc->chunk_shift != 255)
1465 printf(" Chunk Size[%d] : %d sectors\n", n,
1466 1 << vc->chunk_shift);
1467 printf(" Raid Level[%d] : %s\n", n,
1468 map_num(ddf_level, vc->prl)?:"-unknown-");
1469 if (vc->sec_elmnt_count != 1) {
1470 printf(" Secondary Position[%d] : %d of %d\n", n,
1471 vc->sec_elmnt_seq, vc->sec_elmnt_count);
1472 printf(" Secondary Level[%d] : %s\n", n,
1473 map_num(ddf_sec_level, vc->srl) ?: "-unknown-");
1474 }
1475 printf(" Device Size[%d] : %llu\n", n,
1476 be64_to_cpu(vc->blocks)/2);
1477 printf(" Array Size[%d] : %llu\n", n,
1478 be64_to_cpu(vc->array_blocks)/2);
1479 }
1480 }
1481
1482 static void examine_vds(struct ddf_super *sb)
1483 {
1484 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1485 unsigned int i;
1486 printf(" Virtual Disks : %d\n", cnt);
1487
1488 for (i = 0; i < be16_to_cpu(sb->virt->max_vdes); i++) {
1489 struct virtual_entry *ve = &sb->virt->entries[i];
1490 if (all_ff(ve->guid))
1491 continue;
1492 printf("\n");
1493 printf(" VD GUID[%d] : ", i); print_guid(ve->guid, 1);
1494 printf("\n");
1495 printf(" unit[%d] : %d\n", i, be16_to_cpu(ve->unit));
1496 printf(" state[%d] : %s, %s%s\n", i,
1497 map_num(ddf_state, ve->state & 7),
1498 (ve->state & DDF_state_morphing) ? "Morphing, ": "",
1499 (ve->state & DDF_state_inconsistent)? "Not Consistent" : "Consistent");
1500 printf(" init state[%d] : %s\n", i,
1501 map_num(ddf_init_state, ve->init_state&DDF_initstate_mask));
1502 printf(" access[%d] : %s\n", i,
1503 map_num(ddf_access, (ve->init_state & DDF_access_mask) >> 6));
1504 printf(" Name[%d] : %.16s\n", i, ve->name);
1505 examine_vd(i, sb, ve->guid);
1506 }
1507 if (cnt) printf("\n");
1508 }
1509
1510 static void examine_pds(struct ddf_super *sb)
1511 {
1512 int cnt = be16_to_cpu(sb->phys->max_pdes);
1513 int i;
1514 struct dl *dl;
1515 int unlisted = 0;
1516 printf(" Physical Disks : %d\n", cnt);
1517 printf(" Number RefNo Size Device Type/State\n");
1518
1519 for (dl = sb->dlist; dl; dl = dl->next)
1520 dl->displayed = 0;
1521
1522 for (i=0 ; i<cnt ; i++) {
1523 struct phys_disk_entry *pd = &sb->phys->entries[i];
1524 int type = be16_to_cpu(pd->type);
1525 int state = be16_to_cpu(pd->state);
1526
1527 if (be32_to_cpu(pd->refnum) == 0xffffffff)
1528 /* Not in use */
1529 continue;
1530 //printf(" PD GUID[%d] : ", i); print_guid(pd->guid, 0);
1531 //printf("\n");
1532 printf(" %3d %08x ", i,
1533 be32_to_cpu(pd->refnum));
1534 printf("%8lluK ",
1535 be64_to_cpu(pd->config_size)>>1);
1536 for (dl = sb->dlist; dl ; dl = dl->next) {
1537 if (be32_eq(dl->disk.refnum, pd->refnum)) {
1538 char *dv = map_dev(dl->major, dl->minor, 0);
1539 if (dv) {
1540 printf("%-15s", dv);
1541 break;
1542 }
1543 }
1544 }
1545 if (!dl)
1546 printf("%15s","");
1547 else
1548 dl->displayed = 1;
1549 printf(" %s%s%s%s%s",
1550 (type&2) ? "active":"",
1551 (type&4) ? "Global-Spare":"",
1552 (type&8) ? "spare" : "",
1553 (type&16)? ", foreign" : "",
1554 (type&32)? "pass-through" : "");
1555 if (state & DDF_Failed)
1556 /* This over-rides these three */
1557 state &= ~(DDF_Online|DDF_Rebuilding|DDF_Transition);
1558 printf("/%s%s%s%s%s%s%s",
1559 (state&1)? "Online": "Offline",
1560 (state&2)? ", Failed": "",
1561 (state&4)? ", Rebuilding": "",
1562 (state&8)? ", in-transition": "",
1563 (state&16)? ", SMART-errors": "",
1564 (state&32)? ", Unrecovered-Read-Errors": "",
1565 (state&64)? ", Missing" : "");
1566 printf("\n");
1567 }
1568 for (dl = sb->dlist; dl; dl = dl->next) {
1569 char *dv;
1570 if (dl->displayed)
1571 continue;
1572 if (!unlisted)
1573 printf(" Physical disks not in metadata!:\n");
1574 unlisted = 1;
1575 dv = map_dev(dl->major, dl->minor, 0);
1576 printf(" %08x %s\n", be32_to_cpu(dl->disk.refnum),
1577 dv ? dv : "-unknown-");
1578 }
1579 if (unlisted)
1580 printf("\n");
1581 }
1582
1583 static void examine_super_ddf(struct supertype *st, char *homehost)
1584 {
1585 struct ddf_super *sb = st->sb;
1586
1587 printf(" Magic : %08x\n", be32_to_cpu(sb->anchor.magic));
1588 printf(" Version : %.8s\n", sb->anchor.revision);
1589 printf("Controller GUID : "); print_guid(sb->controller.guid, 0);
1590 printf("\n");
1591 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1592 printf("\n");
1593 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1594 printf(" Redundant hdr : %s\n", (be32_eq(sb->secondary.magic,
1595 DDF_HEADER_MAGIC)
1596 ?"yes" : "no"));
1597 examine_vds(sb);
1598 examine_pds(sb);
1599 }
1600
1601 static unsigned int get_vd_num_of_subarray(struct supertype *st)
1602 {
1603 /*
1604 * Figure out the VD number for this supertype.
1605 * Returns DDF_CONTAINER for the container itself,
1606 * and DDF_NOTFOUND on error.
1607 */
1608 struct ddf_super *ddf = st->sb;
1609 struct mdinfo *sra;
1610 char *sub, *end;
1611 unsigned int vcnum;
1612
1613 if (*st->container_devnm == '\0')
1614 return DDF_CONTAINER;
1615
1616 sra = sysfs_read(-1, st->devnm, GET_VERSION);
1617 if (!sra || sra->array.major_version != -1 ||
1618 sra->array.minor_version != -2 ||
1619 !is_subarray(sra->text_version))
1620 return DDF_NOTFOUND;
1621
1622 sub = strchr(sra->text_version + 1, '/');
1623 if (sub != NULL)
1624 vcnum = strtoul(sub + 1, &end, 10);
1625 if (sub == NULL || *sub == '\0' || *end != '\0' ||
1626 vcnum >= be16_to_cpu(ddf->active->max_vd_entries))
1627 return DDF_NOTFOUND;
1628
1629 return vcnum;
1630 }
1631
1632 static void brief_examine_super_ddf(struct supertype *st, int verbose)
1633 {
1634 /* We just write a generic DDF ARRAY entry
1635 */
1636 struct mdinfo info;
1637 char nbuf[64];
1638 getinfo_super_ddf(st, &info, NULL);
1639 fname_from_uuid(st, &info, nbuf, ':');
1640
1641 printf("ARRAY metadata=ddf UUID=%s\n", nbuf + 5);
1642 }
1643
1644 static void brief_examine_subarrays_ddf(struct supertype *st, int verbose)
1645 {
1646 /* We write a DDF ARRAY member entry for each vd, identifying container
1647 * by uuid and member by unit number and uuid.
1648 */
1649 struct ddf_super *ddf = st->sb;
1650 struct mdinfo info;
1651 unsigned int i;
1652 char nbuf[64];
1653 getinfo_super_ddf(st, &info, NULL);
1654 fname_from_uuid(st, &info, nbuf, ':');
1655
1656 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
1657 struct virtual_entry *ve = &ddf->virt->entries[i];
1658 struct vcl vcl;
1659 char nbuf1[64];
1660 char namebuf[17];
1661 if (all_ff(ve->guid))
1662 continue;
1663 memcpy(vcl.conf.guid, ve->guid, DDF_GUID_LEN);
1664 ddf->currentconf =&vcl;
1665 vcl.vcnum = i;
1666 uuid_from_super_ddf(st, info.uuid);
1667 fname_from_uuid(st, &info, nbuf1, ':');
1668 _ddf_array_name(namebuf, ddf, i);
1669 printf("ARRAY%s%s container=%s member=%d UUID=%s\n",
1670 namebuf[0] == '\0' ? "" : " /dev/md/", namebuf,
1671 nbuf+5, i, nbuf1+5);
1672 }
1673 }
1674
1675 static void export_examine_super_ddf(struct supertype *st)
1676 {
1677 struct mdinfo info;
1678 char nbuf[64];
1679 getinfo_super_ddf(st, &info, NULL);
1680 fname_from_uuid(st, &info, nbuf, ':');
1681 printf("MD_METADATA=ddf\n");
1682 printf("MD_LEVEL=container\n");
1683 printf("MD_UUID=%s\n", nbuf+5);
1684 printf("MD_DEVICES=%u\n",
1685 be16_to_cpu(((struct ddf_super *)st->sb)->phys->used_pdes));
1686 }
1687
1688 static int copy_metadata_ddf(struct supertype *st, int from, int to)
1689 {
1690 void *buf;
1691 unsigned long long dsize, offset;
1692 int bytes;
1693 struct ddf_header *ddf;
1694 int written = 0;
1695
1696 /* The meta consists of an anchor, a primary, and a secondary.
1697 * This all lives at the end of the device.
1698 * So it is easiest to find the earliest of primary and
1699 * secondary, and copy everything from there.
1700 *
1701 * Anchor is 512 from end. It contains primary_lba and secondary_lba
1702 * we choose one of those
1703 */
1704
1705 if (posix_memalign(&buf, 4096, 4096) != 0)
1706 return 1;
1707
1708 if (!get_dev_size(from, NULL, &dsize))
1709 goto err;
1710
1711 if (lseek64(from, dsize-512, 0) < 0)
1712 goto err;
1713 if (read(from, buf, 512) != 512)
1714 goto err;
1715 ddf = buf;
1716 if (!be32_eq(ddf->magic, DDF_HEADER_MAGIC) ||
1717 !be32_eq(calc_crc(ddf, 512), ddf->crc) ||
1718 (memcmp(ddf->revision, DDF_REVISION_0, 8) != 0 &&
1719 memcmp(ddf->revision, DDF_REVISION_2, 8) != 0))
1720 goto err;
1721
1722 offset = dsize - 512;
1723 if ((be64_to_cpu(ddf->primary_lba) << 9) < offset)
1724 offset = be64_to_cpu(ddf->primary_lba) << 9;
1725 if ((be64_to_cpu(ddf->secondary_lba) << 9) < offset)
1726 offset = be64_to_cpu(ddf->secondary_lba) << 9;
1727
1728 bytes = dsize - offset;
1729
1730 if (lseek64(from, offset, 0) < 0 ||
1731 lseek64(to, offset, 0) < 0)
1732 goto err;
1733 while (written < bytes) {
1734 int n = bytes - written;
1735 if (n > 4096)
1736 n = 4096;
1737 if (read(from, buf, n) != n)
1738 goto err;
1739 if (write(to, buf, n) != n)
1740 goto err;
1741 written += n;
1742 }
1743 free(buf);
1744 return 0;
1745 err:
1746 free(buf);
1747 return 1;
1748 }
1749
1750 static void detail_super_ddf(struct supertype *st, char *homehost)
1751 {
1752 struct ddf_super *sb = st->sb;
1753 int cnt = be16_to_cpu(sb->virt->populated_vdes);
1754
1755 printf(" Container GUID : "); print_guid(sb->anchor.guid, 1);
1756 printf("\n");
1757 printf(" Seq : %08x\n", be32_to_cpu(sb->active->seq));
1758 printf(" Virtual Disks : %d\n", cnt);
1759 printf("\n");
1760 }
1761
1762 static const char *vendors_with_variable_volume_UUID[] = {
1763 "LSI ",
1764 };
1765
1766 static int volume_id_is_reliable(const struct ddf_super *ddf)
1767 {
1768 int n = ARRAY_SIZE(vendors_with_variable_volume_UUID);
1769 int i;
1770 for (i = 0; i < n; i++)
1771 if (!memcmp(ddf->controller.guid,
1772 vendors_with_variable_volume_UUID[i], 8))
1773 return 0;
1774 return 1;
1775 }
1776
1777 static void uuid_of_ddf_subarray(const struct ddf_super *ddf,
1778 unsigned int vcnum, int uuid[4])
1779 {
1780 char buf[DDF_GUID_LEN+18], sha[20], *p;
1781 struct sha1_ctx ctx;
1782 if (volume_id_is_reliable(ddf)) {
1783 uuid_from_ddf_guid(ddf->virt->entries[vcnum].guid, uuid);
1784 return;
1785 }
1786 /*
1787 * Some fake RAID BIOSes (in particular, LSI ones) change the
1788 * VD GUID at every boot. These GUIDs are not suitable for
1789 * identifying an array. Luckily the header GUID appears to
1790 * remain constant.
1791 * We construct a pseudo-UUID from the header GUID and those
1792 * properties of the subarray that we expect to remain constant.
1793 */
1794 memset(buf, 0, sizeof(buf));
1795 p = buf;
1796 memcpy(p, ddf->anchor.guid, DDF_GUID_LEN);
1797 p += DDF_GUID_LEN;
1798 memcpy(p, ddf->virt->entries[vcnum].name, 16);
1799 p += 16;
1800 *((__u16 *) p) = vcnum;
1801 sha1_init_ctx(&ctx);
1802 sha1_process_bytes(buf, sizeof(buf), &ctx);
1803 sha1_finish_ctx(&ctx, sha);
1804 memcpy(uuid, sha, 4*4);
1805 }
1806
1807 static void brief_detail_super_ddf(struct supertype *st)
1808 {
1809 struct mdinfo info;
1810 char nbuf[64];
1811 struct ddf_super *ddf = st->sb;
1812 unsigned int vcnum = get_vd_num_of_subarray(st);
1813 if (vcnum == DDF_CONTAINER)
1814 uuid_from_super_ddf(st, info.uuid);
1815 else if (vcnum == DDF_NOTFOUND)
1816 return;
1817 else
1818 uuid_of_ddf_subarray(ddf, vcnum, info.uuid);
1819 fname_from_uuid(st, &info, nbuf,':');
1820 printf(" UUID=%s", nbuf + 5);
1821 }
1822 #endif
1823
1824 static int match_home_ddf(struct supertype *st, char *homehost)
1825 {
1826 /* It matches 'this' host if the controller is a
1827 * Linux-MD controller with vendor_data matching
1828 * the hostname. It would be nice if we could
1829 * test against controller found in /sys or somewhere...
1830 */
1831 struct ddf_super *ddf = st->sb;
1832 unsigned int len;
1833
1834 if (!homehost)
1835 return 0;
1836 len = strlen(homehost);
1837
1838 return (memcmp(ddf->controller.guid, T10, 8) == 0 &&
1839 len < sizeof(ddf->controller.vendor_data) &&
1840 memcmp(ddf->controller.vendor_data, homehost,len) == 0 &&
1841 ddf->controller.vendor_data[len] == 0);
1842 }
1843
1844 #ifndef MDASSEMBLE
1845 static int find_index_in_bvd(const struct ddf_super *ddf,
1846 const struct vd_config *conf, unsigned int n,
1847 unsigned int *n_bvd)
1848 {
1849 /*
1850 * Find the index of the n-th valid physical disk in this BVD.
1851 * Unused entries can be sprinkled in with the used entries,
1852 * but don't count.
1853 */
1854 unsigned int i, j;
1855 for (i = 0, j = 0;
1856 i < ddf->mppe && j < be16_to_cpu(conf->prim_elmnt_count);
1857 i++) {
1858 if (be32_to_cpu(conf->phys_refnum[i]) != 0xffffffff) {
1859 if (n == j) {
1860 *n_bvd = i;
1861 return 1;
1862 }
1863 j++;
1864 }
1865 }
1866 dprintf("%s: couldn't find BVD member %u (total %u)\n",
1867 __func__, n, be16_to_cpu(conf->prim_elmnt_count));
1868 return 0;
1869 }
1870
1871 /* Given a member array instance number, and a raid disk within that instance,
1872 * find the vd_config structure. The offset of the given disk in the phys_refnum
1873 * table is returned in n_bvd.
1874 * For two-level members with a secondary raid level the vd_config for
1875 * the appropriate BVD is returned.
1876 * The return value is always &vlc->conf, where vlc is returned in last pointer.
1877 */
1878 static struct vd_config *find_vdcr(struct ddf_super *ddf, unsigned int inst,
1879 unsigned int n,
1880 unsigned int *n_bvd, struct vcl **vcl)
1881 {
1882 struct vcl *v;
1883
1884 for (v = ddf->conflist; v; v = v->next) {
1885 unsigned int nsec, ibvd = 0;
1886 struct vd_config *conf;
1887 if (inst != v->vcnum)
1888 continue;
1889 conf = &v->conf;
1890 if (conf->sec_elmnt_count == 1) {
1891 if (find_index_in_bvd(ddf, conf, n, n_bvd)) {
1892 *vcl = v;
1893 return conf;
1894 } else
1895 goto bad;
1896 }
1897 if (v->other_bvds == NULL) {
1898 pr_err("%s: BUG: other_bvds is NULL, nsec=%u\n",
1899 __func__, conf->sec_elmnt_count);
1900 goto bad;
1901 }
1902 nsec = n / be16_to_cpu(conf->prim_elmnt_count);
1903 if (conf->sec_elmnt_seq != nsec) {
1904 for (ibvd = 1; ibvd < conf->sec_elmnt_count; ibvd++) {
1905 if (v->other_bvds[ibvd-1]->sec_elmnt_seq
1906 == nsec)
1907 break;
1908 }
1909 if (ibvd == conf->sec_elmnt_count)
1910 goto bad;
1911 conf = v->other_bvds[ibvd-1];
1912 }
1913 if (!find_index_in_bvd(ddf, conf,
1914 n - nsec*conf->sec_elmnt_count, n_bvd))
1915 goto bad;
1916 dprintf("%s: found disk %u as member %u in bvd %d of array %u\n"
1917 , __func__, n, *n_bvd, ibvd, inst);
1918 *vcl = v;
1919 return conf;
1920 }
1921 bad:
1922 pr_err("%s: Could't find disk %d in array %u\n", __func__, n, inst);
1923 return NULL;
1924 }
1925 #endif
1926
1927 static int find_phys(const struct ddf_super *ddf, be32 phys_refnum)
1928 {
1929 /* Find the entry in phys_disk which has the given refnum
1930 * and return it's index
1931 */
1932 unsigned int i;
1933 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++)
1934 if (be32_eq(ddf->phys->entries[i].refnum, phys_refnum))
1935 return i;
1936 return -1;
1937 }
1938
1939 static void uuid_from_ddf_guid(const char *guid, int uuid[4])
1940 {
1941 char buf[20];
1942 struct sha1_ctx ctx;
1943 sha1_init_ctx(&ctx);
1944 sha1_process_bytes(guid, DDF_GUID_LEN, &ctx);
1945 sha1_finish_ctx(&ctx, buf);
1946 memcpy(uuid, buf, 4*4);
1947 }
1948
1949 static void uuid_from_super_ddf(struct supertype *st, int uuid[4])
1950 {
1951 /* The uuid returned here is used for:
1952 * uuid to put into bitmap file (Create, Grow)
1953 * uuid for backup header when saving critical section (Grow)
1954 * comparing uuids when re-adding a device into an array
1955 * In these cases the uuid required is that of the data-array,
1956 * not the device-set.
1957 * uuid to recognise same set when adding a missing device back
1958 * to an array. This is a uuid for the device-set.
1959 *
1960 * For each of these we can make do with a truncated
1961 * or hashed uuid rather than the original, as long as
1962 * everyone agrees.
1963 * In the case of SVD we assume the BVD is of interest,
1964 * though that might be the case if a bitmap were made for
1965 * a mirrored SVD - worry about that later.
1966 * So we need to find the VD configuration record for the
1967 * relevant BVD and extract the GUID and Secondary_Element_Seq.
1968 * The first 16 bytes of the sha1 of these is used.
1969 */
1970 struct ddf_super *ddf = st->sb;
1971 struct vcl *vcl = ddf->currentconf;
1972
1973 if (vcl)
1974 uuid_of_ddf_subarray(ddf, vcl->vcnum, uuid);
1975 else
1976 uuid_from_ddf_guid(ddf->anchor.guid, uuid);
1977 }
1978
1979 static void getinfo_super_ddf(struct supertype *st, struct mdinfo *info, char *map)
1980 {
1981 struct ddf_super *ddf = st->sb;
1982 int map_disks = info->array.raid_disks;
1983 __u32 *cptr;
1984
1985 if (ddf->currentconf) {
1986 getinfo_super_ddf_bvd(st, info, map);
1987 return;
1988 }
1989 memset(info, 0, sizeof(*info));
1990
1991 info->array.raid_disks = be16_to_cpu(ddf->phys->used_pdes);
1992 info->array.level = LEVEL_CONTAINER;
1993 info->array.layout = 0;
1994 info->array.md_minor = -1;
1995 cptr = (__u32 *)(ddf->anchor.guid + 16);
1996 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
1997
1998 info->array.chunk_size = 0;
1999 info->container_enough = 1;
2000
2001 info->disk.major = 0;
2002 info->disk.minor = 0;
2003 if (ddf->dlist) {
2004 struct phys_disk_entry *pde = NULL;
2005 info->disk.number = be32_to_cpu(ddf->dlist->disk.refnum);
2006 info->disk.raid_disk = find_phys(ddf, ddf->dlist->disk.refnum);
2007
2008 info->data_offset = be64_to_cpu(ddf->phys->
2009 entries[info->disk.raid_disk].
2010 config_size);
2011 info->component_size = ddf->dlist->size - info->data_offset;
2012 if (info->disk.raid_disk >= 0)
2013 pde = ddf->phys->entries + info->disk.raid_disk;
2014 if (pde &&
2015 !(be16_to_cpu(pde->state) & DDF_Failed) &&
2016 !(be16_to_cpu(pde->state) & DDF_Missing))
2017 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2018 else
2019 info->disk.state = 1 << MD_DISK_FAULTY;
2020
2021 } else {
2022 /* There should always be a dlist, but just in case...*/
2023 info->disk.number = -1;
2024 info->disk.raid_disk = -1;
2025 info->disk.state = (1 << MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE);
2026 }
2027 info->events = be32_to_cpu(ddf->active->seq);
2028 info->array.utime = DECADE + be32_to_cpu(ddf->active->timestamp);
2029
2030 info->recovery_start = MaxSector;
2031 info->reshape_active = 0;
2032 info->recovery_blocked = 0;
2033 info->name[0] = 0;
2034
2035 info->array.major_version = -1;
2036 info->array.minor_version = -2;
2037 strcpy(info->text_version, "ddf");
2038 info->safe_mode_delay = 0;
2039
2040 uuid_from_super_ddf(st, info->uuid);
2041
2042 if (map) {
2043 int i, e = 0;
2044 int max = be16_to_cpu(ddf->phys->max_pdes);
2045 for (i = e = 0 ; i < map_disks ; i++, e++) {
2046 while (e < max &&
2047 be32_to_cpu(ddf->phys->entries[e].refnum) == 0xffffffff)
2048 e++;
2049 if (i < info->array.raid_disks && e < max &&
2050 !(be16_to_cpu(ddf->phys->entries[e].state)
2051 & DDF_Failed))
2052 map[i] = 1;
2053 else
2054 map[i] = 0;
2055 }
2056 }
2057 }
2058
2059 /* size of name must be at least 17 bytes! */
2060 static void _ddf_array_name(char *name, const struct ddf_super *ddf, int i)
2061 {
2062 int j;
2063 memcpy(name, ddf->virt->entries[i].name, 16);
2064 name[16] = 0;
2065 for(j = 0; j < 16; j++)
2066 if (name[j] == ' ')
2067 name[j] = 0;
2068 }
2069
2070 static void getinfo_super_ddf_bvd(struct supertype *st, struct mdinfo *info, char *map)
2071 {
2072 struct ddf_super *ddf = st->sb;
2073 struct vcl *vc = ddf->currentconf;
2074 int cd = ddf->currentdev;
2075 int n_prim;
2076 int j;
2077 struct dl *dl;
2078 int map_disks = info->array.raid_disks;
2079 __u32 *cptr;
2080 struct vd_config *conf;
2081
2082 memset(info, 0, sizeof(*info));
2083 if (layout_ddf2md(&vc->conf, &info->array) == -1)
2084 return;
2085 info->array.md_minor = -1;
2086 cptr = (__u32 *)(vc->conf.guid + 16);
2087 info->array.ctime = DECADE + __be32_to_cpu(*cptr);
2088 info->array.utime = DECADE + be32_to_cpu(vc->conf.timestamp);
2089 info->array.chunk_size = 512 << vc->conf.chunk_shift;
2090 info->custom_array_size = be64_to_cpu(vc->conf.array_blocks);
2091
2092 conf = &vc->conf;
2093 n_prim = be16_to_cpu(conf->prim_elmnt_count);
2094 if (conf->sec_elmnt_count > 1 && cd >= n_prim) {
2095 int ibvd = cd / n_prim - 1;
2096 cd %= n_prim;
2097 conf = vc->other_bvds[ibvd];
2098 }
2099
2100 if (cd >= 0 && (unsigned)cd < ddf->mppe) {
2101 info->data_offset =
2102 be64_to_cpu(LBA_OFFSET(ddf, conf)[cd]);
2103 if (vc->block_sizes)
2104 info->component_size = vc->block_sizes[cd];
2105 else
2106 info->component_size = be64_to_cpu(conf->blocks);
2107
2108 for (dl = ddf->dlist; dl ; dl = dl->next)
2109 if (be32_eq(dl->disk.refnum, conf->phys_refnum[cd]))
2110 break;
2111 }
2112
2113 info->disk.major = 0;
2114 info->disk.minor = 0;
2115 info->disk.state = 0;
2116 if (dl && dl->pdnum >= 0) {
2117 info->disk.major = dl->major;
2118 info->disk.minor = dl->minor;
2119 info->disk.raid_disk = cd + conf->sec_elmnt_seq
2120 * be16_to_cpu(conf->prim_elmnt_count);
2121 info->disk.number = dl->pdnum;
2122 info->disk.state = 0;
2123 if (info->disk.number >= 0 &&
2124 (be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Online) &&
2125 !(be16_to_cpu(ddf->phys->entries[info->disk.number].state) & DDF_Failed))
2126 info->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
2127 info->events = be32_to_cpu(ddf->active->seq);
2128 }
2129
2130 info->container_member = ddf->currentconf->vcnum;
2131
2132 info->recovery_start = MaxSector;
2133 info->resync_start = 0;
2134 info->reshape_active = 0;
2135 info->recovery_blocked = 0;
2136 if (!(ddf->virt->entries[info->container_member].state
2137 & DDF_state_inconsistent) &&
2138 (ddf->virt->entries[info->container_member].init_state
2139 & DDF_initstate_mask)
2140 == DDF_init_full)
2141 info->resync_start = MaxSector;
2142
2143 uuid_from_super_ddf(st, info->uuid);
2144
2145 info->array.major_version = -1;
2146 info->array.minor_version = -2;
2147 sprintf(info->text_version, "/%s/%d",
2148 st->container_devnm,
2149 info->container_member);
2150 info->safe_mode_delay = DDF_SAFE_MODE_DELAY;
2151
2152 _ddf_array_name(info->name, ddf, info->container_member);
2153
2154 if (map)
2155 for (j = 0; j < map_disks; j++) {
2156 map[j] = 0;
2157 if (j < info->array.raid_disks) {
2158 int i = find_phys(ddf, vc->conf.phys_refnum[j]);
2159 if (i >= 0 &&
2160 (be16_to_cpu(ddf->phys->entries[i].state)
2161 & DDF_Online) &&
2162 !(be16_to_cpu(ddf->phys->entries[i].state)
2163 & DDF_Failed))
2164 map[i] = 1;
2165 }
2166 }
2167 }
2168
2169 static int update_super_ddf(struct supertype *st, struct mdinfo *info,
2170 char *update,
2171 char *devname, int verbose,
2172 int uuid_set, char *homehost)
2173 {
2174 /* For 'assemble' and 'force' we need to return non-zero if any
2175 * change was made. For others, the return value is ignored.
2176 * Update options are:
2177 * force-one : This device looks a bit old but needs to be included,
2178 * update age info appropriately.
2179 * assemble: clear any 'faulty' flag to allow this device to
2180 * be assembled.
2181 * force-array: Array is degraded but being forced, mark it clean
2182 * if that will be needed to assemble it.
2183 *
2184 * newdev: not used ????
2185 * grow: Array has gained a new device - this is currently for
2186 * linear only
2187 * resync: mark as dirty so a resync will happen.
2188 * uuid: Change the uuid of the array to match what is given
2189 * homehost: update the recorded homehost
2190 * name: update the name - preserving the homehost
2191 * _reshape_progress: record new reshape_progress position.
2192 *
2193 * Following are not relevant for this version:
2194 * sparc2.2 : update from old dodgey metadata
2195 * super-minor: change the preferred_minor number
2196 * summaries: update redundant counters.
2197 */
2198 int rv = 0;
2199 // struct ddf_super *ddf = st->sb;
2200 // struct vd_config *vd = find_vdcr(ddf, info->container_member);
2201 // struct virtual_entry *ve = find_ve(ddf);
2202
2203 /* we don't need to handle "force-*" or "assemble" as
2204 * there is no need to 'trick' the kernel. When the metadata is
2205 * first updated to activate the array, all the implied modifications
2206 * will just happen.
2207 */
2208
2209 if (strcmp(update, "grow") == 0) {
2210 /* FIXME */
2211 } else if (strcmp(update, "resync") == 0) {
2212 // info->resync_checkpoint = 0;
2213 } else if (strcmp(update, "homehost") == 0) {
2214 /* homehost is stored in controller->vendor_data,
2215 * or it is when we are the vendor
2216 */
2217 // if (info->vendor_is_local)
2218 // strcpy(ddf->controller.vendor_data, homehost);
2219 rv = -1;
2220 } else if (strcmp(update, "name") == 0) {
2221 /* name is stored in virtual_entry->name */
2222 // memset(ve->name, ' ', 16);
2223 // strncpy(ve->name, info->name, 16);
2224 rv = -1;
2225 } else if (strcmp(update, "_reshape_progress") == 0) {
2226 /* We don't support reshape yet */
2227 } else if (strcmp(update, "assemble") == 0 ) {
2228 /* Do nothing, just succeed */
2229 rv = 0;
2230 } else
2231 rv = -1;
2232
2233 // update_all_csum(ddf);
2234
2235 return rv;
2236 }
2237
2238 static void make_header_guid(char *guid)
2239 {
2240 be32 stamp;
2241 /* Create a DDF Header of Virtual Disk GUID */
2242
2243 /* 24 bytes of fiction required.
2244 * first 8 are a 'vendor-id' - "Linux-MD"
2245 * next 8 are controller type.. how about 0X DEAD BEEF 0000 0000
2246 * Remaining 8 random number plus timestamp
2247 */
2248 memcpy(guid, T10, sizeof(T10));
2249 stamp = cpu_to_be32(0xdeadbeef);
2250 memcpy(guid+8, &stamp, 4);
2251 stamp = cpu_to_be32(0);
2252 memcpy(guid+12, &stamp, 4);
2253 stamp = cpu_to_be32(time(0) - DECADE);
2254 memcpy(guid+16, &stamp, 4);
2255 stamp._v32 = random32();
2256 memcpy(guid+20, &stamp, 4);
2257 }
2258
2259 static unsigned int find_unused_vde(const struct ddf_super *ddf)
2260 {
2261 unsigned int i;
2262 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2263 if (all_ff(ddf->virt->entries[i].guid))
2264 return i;
2265 }
2266 return DDF_NOTFOUND;
2267 }
2268
2269 static unsigned int find_vde_by_name(const struct ddf_super *ddf,
2270 const char *name)
2271 {
2272 unsigned int i;
2273 if (name == NULL)
2274 return DDF_NOTFOUND;
2275 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++) {
2276 if (all_ff(ddf->virt->entries[i].guid))
2277 continue;
2278 if (!strncmp(name, ddf->virt->entries[i].name,
2279 sizeof(ddf->virt->entries[i].name)))
2280 return i;
2281 }
2282 return DDF_NOTFOUND;
2283 }
2284
2285 #ifndef MDASSEMBLE
2286 static unsigned int find_vde_by_guid(const struct ddf_super *ddf,
2287 const char *guid)
2288 {
2289 unsigned int i;
2290 if (guid == NULL || all_ff(guid))
2291 return DDF_NOTFOUND;
2292 for (i = 0; i < be16_to_cpu(ddf->virt->max_vdes); i++)
2293 if (!memcmp(ddf->virt->entries[i].guid, guid, DDF_GUID_LEN))
2294 return i;
2295 return DDF_NOTFOUND;
2296 }
2297 #endif
2298
2299 static int init_super_ddf(struct supertype *st,
2300 mdu_array_info_t *info,
2301 unsigned long long size, char *name, char *homehost,
2302 int *uuid, unsigned long long data_offset)
2303 {
2304 /* This is primarily called by Create when creating a new array.
2305 * We will then get add_to_super called for each component, and then
2306 * write_init_super called to write it out to each device.
2307 * For DDF, Create can create on fresh devices or on a pre-existing
2308 * array.
2309 * To create on a pre-existing array a different method will be called.
2310 * This one is just for fresh drives.
2311 *
2312 * We need to create the entire 'ddf' structure which includes:
2313 * DDF headers - these are easy.
2314 * Controller data - a Sector describing this controller .. not that
2315 * this is a controller exactly.
2316 * Physical Disk Record - one entry per device, so
2317 * leave plenty of space.
2318 * Virtual Disk Records - again, just leave plenty of space.
2319 * This just lists VDs, doesn't give details.
2320 * Config records - describe the VDs that use this disk
2321 * DiskData - describes 'this' device.
2322 * BadBlockManagement - empty
2323 * Diag Space - empty
2324 * Vendor Logs - Could we put bitmaps here?
2325 *
2326 */
2327 struct ddf_super *ddf;
2328 char hostname[17];
2329 int hostlen;
2330 int max_phys_disks, max_virt_disks;
2331 unsigned long long sector;
2332 int clen;
2333 int i;
2334 int pdsize, vdsize;
2335 struct phys_disk *pd;
2336 struct virtual_disk *vd;
2337
2338 if (st->sb)
2339 return init_super_ddf_bvd(st, info, size, name, homehost, uuid,
2340 data_offset);
2341
2342 if (posix_memalign((void**)&ddf, 512, sizeof(*ddf)) != 0) {
2343 pr_err("%s could not allocate superblock\n", __func__);
2344 return 0;
2345 }
2346 memset(ddf, 0, sizeof(*ddf));
2347 st->sb = ddf;
2348
2349 if (info == NULL) {
2350 /* zeroing superblock */
2351 return 0;
2352 }
2353
2354 /* At least 32MB *must* be reserved for the ddf. So let's just
2355 * start 32MB from the end, and put the primary header there.
2356 * Don't do secondary for now.
2357 * We don't know exactly where that will be yet as it could be
2358 * different on each device. So just set up the lengths.
2359 */
2360
2361 ddf->anchor.magic = DDF_HEADER_MAGIC;
2362 make_header_guid(ddf->anchor.guid);
2363
2364 memcpy(ddf->anchor.revision, DDF_REVISION_2, 8);
2365 ddf->anchor.seq = cpu_to_be32(1);
2366 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
2367 ddf->anchor.openflag = 0xFF;
2368 ddf->anchor.foreignflag = 0;
2369 ddf->anchor.enforcegroups = 0; /* Is this best?? */
2370 ddf->anchor.pad0 = 0xff;
2371 memset(ddf->anchor.pad1, 0xff, 12);
2372 memset(ddf->anchor.header_ext, 0xff, 32);
2373 ddf->anchor.primary_lba = cpu_to_be64(~(__u64)0);
2374 ddf->anchor.secondary_lba = cpu_to_be64(~(__u64)0);
2375 ddf->anchor.type = DDF_HEADER_ANCHOR;
2376 memset(ddf->anchor.pad2, 0xff, 3);
2377 ddf->anchor.workspace_len = cpu_to_be32(32768); /* Must be reserved */
2378 /* Put this at bottom of 32M reserved.. */
2379 ddf->anchor.workspace_lba = cpu_to_be64(~(__u64)0);
2380 max_phys_disks = 1023; /* Should be enough, 4095 is also allowed */
2381 ddf->anchor.max_pd_entries = cpu_to_be16(max_phys_disks);
2382 max_virt_disks = 255; /* 15, 63, 255, 1024, 4095 are all allowed */
2383 ddf->anchor.max_vd_entries = cpu_to_be16(max_virt_disks);
2384 ddf->max_part = 64;
2385 ddf->anchor.max_partitions = cpu_to_be16(ddf->max_part);
2386 ddf->mppe = 256; /* 16, 64, 256, 1024, 4096 are all allowed */
2387 ddf->conf_rec_len = 1 + ROUND_UP(ddf->mppe * (4+8), 512)/512;
2388 ddf->anchor.config_record_len = cpu_to_be16(ddf->conf_rec_len);
2389 ddf->anchor.max_primary_element_entries = cpu_to_be16(ddf->mppe);
2390 memset(ddf->anchor.pad3, 0xff, 54);
2391 /* Controller section is one sector long immediately
2392 * after the ddf header */
2393 sector = 1;
2394 ddf->anchor.controller_section_offset = cpu_to_be32(sector);
2395 ddf->anchor.controller_section_length = cpu_to_be32(1);
2396 sector += 1;
2397
2398 /* phys is 8 sectors after that */
2399 pdsize = ROUND_UP(sizeof(struct phys_disk) +
2400 sizeof(struct phys_disk_entry)*max_phys_disks,
2401 512);
2402 switch(pdsize/512) {
2403 case 2: case 8: case 32: case 128: case 512: break;
2404 default: abort();
2405 }
2406 ddf->anchor.phys_section_offset = cpu_to_be32(sector);
2407 ddf->anchor.phys_section_length =
2408 cpu_to_be32(pdsize/512); /* max_primary_element_entries/8 */
2409 sector += pdsize/512;
2410
2411 /* virt is another 32 sectors */
2412 vdsize = ROUND_UP(sizeof(struct virtual_disk) +
2413 sizeof(struct virtual_entry) * max_virt_disks,
2414 512);
2415 switch(vdsize/512) {
2416 case 2: case 8: case 32: case 128: case 512: break;
2417 default: abort();
2418 }
2419 ddf->anchor.virt_section_offset = cpu_to_be32(sector);
2420 ddf->anchor.virt_section_length =
2421 cpu_to_be32(vdsize/512); /* max_vd_entries/8 */
2422 sector += vdsize/512;
2423
2424 clen = ddf->conf_rec_len * (ddf->max_part+1);
2425 ddf->anchor.config_section_offset = cpu_to_be32(sector);
2426 ddf->anchor.config_section_length = cpu_to_be32(clen);
2427 sector += clen;
2428
2429 ddf->anchor.data_section_offset = cpu_to_be32(sector);
2430 ddf->anchor.data_section_length = cpu_to_be32(1);
2431 sector += 1;
2432
2433 ddf->anchor.bbm_section_length = cpu_to_be32(0);
2434 ddf->anchor.bbm_section_offset = cpu_to_be32(0xFFFFFFFF);
2435 ddf->anchor.diag_space_length = cpu_to_be32(0);
2436 ddf->anchor.diag_space_offset = cpu_to_be32(0xFFFFFFFF);
2437 ddf->anchor.vendor_length = cpu_to_be32(0);
2438 ddf->anchor.vendor_offset = cpu_to_be32(0xFFFFFFFF);
2439
2440 memset(ddf->anchor.pad4, 0xff, 256);
2441
2442 memcpy(&ddf->primary, &ddf->anchor, 512);
2443 memcpy(&ddf->secondary, &ddf->anchor, 512);
2444
2445 ddf->primary.openflag = 1; /* I guess.. */
2446 ddf->primary.type = DDF_HEADER_PRIMARY;
2447
2448 ddf->secondary.openflag = 1; /* I guess.. */
2449 ddf->secondary.type = DDF_HEADER_SECONDARY;
2450
2451 ddf->active = &ddf->primary;
2452
2453 ddf->controller.magic = DDF_CONTROLLER_MAGIC;
2454
2455 /* 24 more bytes of fiction required.
2456 * first 8 are a 'vendor-id' - "Linux-MD"
2457 * Remaining 16 are serial number.... maybe a hostname would do?
2458 */
2459 memcpy(ddf->controller.guid, T10, sizeof(T10));
2460 gethostname(hostname, sizeof(hostname));
2461 hostname[sizeof(hostname) - 1] = 0;
2462 hostlen = strlen(hostname);
2463 memcpy(ddf->controller.guid + 24 - hostlen, hostname, hostlen);
2464 for (i = strlen(T10) ; i+hostlen < 24; i++)
2465 ddf->controller.guid[i] = ' ';
2466
2467 ddf->controller.type.vendor_id = cpu_to_be16(0xDEAD);
2468 ddf->controller.type.device_id = cpu_to_be16(0xBEEF);
2469 ddf->controller.type.sub_vendor_id = cpu_to_be16(0);
2470 ddf->controller.type.sub_device_id = cpu_to_be16(0);
2471 memcpy(ddf->controller.product_id, "What Is My PID??", 16);
2472 memset(ddf->controller.pad, 0xff, 8);
2473 memset(ddf->controller.vendor_data, 0xff, 448);
2474 if (homehost && strlen(homehost) < 440)
2475 strcpy((char*)ddf->controller.vendor_data, homehost);
2476
2477 if (posix_memalign((void**)&pd, 512, pdsize) != 0) {
2478 pr_err("%s could not allocate pd\n", __func__);
2479 return 0;
2480 }
2481 ddf->phys = pd;
2482 ddf->pdsize = pdsize;
2483
2484 memset(pd, 0xff, pdsize);
2485 memset(pd, 0, sizeof(*pd));
2486 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2487 pd->used_pdes = cpu_to_be16(0);
2488 pd->max_pdes = cpu_to_be16(max_phys_disks);
2489 memset(pd->pad, 0xff, 52);
2490 for (i = 0; i < max_phys_disks; i++)
2491 memset(pd->entries[i].guid, 0xff, DDF_GUID_LEN);
2492
2493 if (posix_memalign((void**)&vd, 512, vdsize) != 0) {
2494 pr_err("%s could not allocate vd\n", __func__);
2495 return 0;
2496 }
2497 ddf->virt = vd;
2498 ddf->vdsize = vdsize;
2499 memset(vd, 0, vdsize);
2500 vd->magic = DDF_VIRT_RECORDS_MAGIC;
2501 vd->populated_vdes = cpu_to_be16(0);
2502 vd->max_vdes = cpu_to_be16(max_virt_disks);
2503 memset(vd->pad, 0xff, 52);
2504
2505 for (i=0; i<max_virt_disks; i++)
2506 memset(&vd->entries[i], 0xff, sizeof(struct virtual_entry));
2507
2508 st->sb = ddf;
2509 ddf_set_updates_pending(ddf, NULL);
2510 return 1;
2511 }
2512
2513 static int chunk_to_shift(int chunksize)
2514 {
2515 return ffs(chunksize/512)-1;
2516 }
2517
2518 #ifndef MDASSEMBLE
2519 struct extent {
2520 unsigned long long start, size;
2521 };
2522 static int cmp_extent(const void *av, const void *bv)
2523 {
2524 const struct extent *a = av;
2525 const struct extent *b = bv;
2526 if (a->start < b->start)
2527 return -1;
2528 if (a->start > b->start)
2529 return 1;
2530 return 0;
2531 }
2532
2533 static struct extent *get_extents(struct ddf_super *ddf, struct dl *dl)
2534 {
2535 /* Find a list of used extents on the given physical device
2536 * (dnum) of the given ddf.
2537 * Return a malloced array of 'struct extent'
2538 */
2539 struct extent *rv;
2540 int n = 0;
2541 unsigned int i;
2542 __u16 state;
2543
2544 if (dl->pdnum < 0)
2545 return NULL;
2546 state = be16_to_cpu(ddf->phys->entries[dl->pdnum].state);
2547
2548 if ((state & (DDF_Online|DDF_Failed|DDF_Missing)) != DDF_Online)
2549 return NULL;
2550
2551 rv = xmalloc(sizeof(struct extent) * (ddf->max_part + 2));
2552
2553 for (i = 0; i < ddf->max_part; i++) {
2554 const struct vd_config *bvd;
2555 unsigned int ibvd;
2556 struct vcl *v = dl->vlist[i];
2557 if (v == NULL ||
2558 get_pd_index_from_refnum(v, dl->disk.refnum, ddf->mppe,
2559 &bvd, &ibvd) == DDF_NOTFOUND)
2560 continue;
2561 rv[n].start = be64_to_cpu(LBA_OFFSET(ddf, bvd)[ibvd]);
2562 rv[n].size = be64_to_cpu(bvd->blocks);
2563 n++;
2564 }
2565 qsort(rv, n, sizeof(*rv), cmp_extent);
2566
2567 rv[n].start = be64_to_cpu(ddf->phys->entries[dl->pdnum].config_size);
2568 rv[n].size = 0;
2569 return rv;
2570 }
2571
2572 static unsigned long long find_space(
2573 struct ddf_super *ddf, struct dl *dl,
2574 unsigned long long data_offset,
2575 unsigned long long *size)
2576 {
2577 /* Find if the requested amount of space is available.
2578 * If it is, return start.
2579 * If not, set *size to largest space.
2580 * If data_offset != INVALID_SECTORS, then the space must start
2581 * at this location.
2582 */
2583 struct extent *e = get_extents(ddf, dl);
2584 int i = 0;
2585 unsigned long long pos = 0;
2586 unsigned long long max_size = 0;
2587
2588 if (!e) {
2589 *size = 0;
2590 return INVALID_SECTORS;
2591 }
2592 do {
2593 unsigned long long esize = e[i].start - pos;
2594 if (data_offset != INVALID_SECTORS &&
2595 pos <= data_offset &&
2596 e[i].start > data_offset) {
2597 pos = data_offset;
2598 esize = e[i].start - pos;
2599 }
2600 if (data_offset != INVALID_SECTORS &&
2601 pos != data_offset) {
2602 i++;
2603 continue;
2604 }
2605 if (esize >= *size) {
2606 /* Found! */
2607 free(e);
2608 return pos;
2609 }
2610 if (esize > max_size)
2611 max_size = esize;
2612 pos = e[i].start + e[i].size;
2613 i++;
2614 } while (e[i-1].size);
2615 *size = max_size;
2616 free(e);
2617 return INVALID_SECTORS;
2618 }
2619 #endif
2620
2621 static int init_super_ddf_bvd(struct supertype *st,
2622 mdu_array_info_t *info,
2623 unsigned long long size,
2624 char *name, char *homehost,
2625 int *uuid, unsigned long long data_offset)
2626 {
2627 /* We are creating a BVD inside a pre-existing container.
2628 * so st->sb is already set.
2629 * We need to create a new vd_config and a new virtual_entry
2630 */
2631 struct ddf_super *ddf = st->sb;
2632 unsigned int venum, i;
2633 struct virtual_entry *ve;
2634 struct vcl *vcl;
2635 struct vd_config *vc;
2636
2637 if (find_vde_by_name(ddf, name) != DDF_NOTFOUND) {
2638 pr_err("This ddf already has an array called %s\n", name);
2639 return 0;
2640 }
2641 venum = find_unused_vde(ddf);
2642 if (venum == DDF_NOTFOUND) {
2643 pr_err("Cannot find spare slot for virtual disk\n");
2644 return 0;
2645 }
2646 ve = &ddf->virt->entries[venum];
2647
2648 /* A Virtual Disk GUID contains the T10 Vendor ID, controller type,
2649 * timestamp, random number
2650 */
2651 make_header_guid(ve->guid);
2652 ve->unit = cpu_to_be16(info->md_minor);
2653 ve->pad0 = 0xFFFF;
2654 ve->guid_crc._v16 = crc32(0, (unsigned char *)ddf->anchor.guid,
2655 DDF_GUID_LEN);
2656 ve->type = cpu_to_be16(0);
2657 ve->state = DDF_state_degraded; /* Will be modified as devices are added */
2658 if (info->state & 1) /* clean */
2659 ve->init_state = DDF_init_full;
2660 else
2661 ve->init_state = DDF_init_not;
2662
2663 memset(ve->pad1, 0xff, 14);
2664 memset(ve->name, ' ', 16);
2665 if (name)
2666 strncpy(ve->name, name, 16);
2667 ddf->virt->populated_vdes =
2668 cpu_to_be16(be16_to_cpu(ddf->virt->populated_vdes)+1);
2669
2670 /* Now create a new vd_config */
2671 if (posix_memalign((void**)&vcl, 512,
2672 (offsetof(struct vcl, conf) + ddf->conf_rec_len * 512)) != 0) {
2673 pr_err("%s could not allocate vd_config\n", __func__);
2674 return 0;
2675 }
2676 vcl->vcnum = venum;
2677 vcl->block_sizes = NULL; /* FIXME not for CONCAT */
2678 vc = &vcl->conf;
2679
2680 vc->magic = DDF_VD_CONF_MAGIC;
2681 memcpy(vc->guid, ve->guid, DDF_GUID_LEN);
2682 vc->timestamp = cpu_to_be32(time(0)-DECADE);
2683 vc->seqnum = cpu_to_be32(1);
2684 memset(vc->pad0, 0xff, 24);
2685 vc->chunk_shift = chunk_to_shift(info->chunk_size);
2686 if (layout_md2ddf(info, vc) == -1 ||
2687 be16_to_cpu(vc->prim_elmnt_count) > ddf->mppe) {
2688 pr_err("%s: unsupported RAID level/layout %d/%d with %d disks\n",
2689 __func__, info->level, info->layout, info->raid_disks);
2690 free(vcl);
2691 return 0;
2692 }
2693 vc->sec_elmnt_seq = 0;
2694 if (alloc_other_bvds(ddf, vcl) != 0) {
2695 pr_err("%s could not allocate other bvds\n",
2696 __func__);
2697 free(vcl);
2698 return 0;
2699 }
2700 vc->blocks = cpu_to_be64(info->size * 2);
2701 vc->array_blocks = cpu_to_be64(
2702 calc_array_size(info->level, info->raid_disks, info->layout,
2703 info->chunk_size, info->size*2));
2704 memset(vc->pad1, 0xff, 8);
2705 vc->spare_refs[0] = cpu_to_be32(0xffffffff);
2706 vc->spare_refs[1] = cpu_to_be32(0xffffffff);
2707 vc->spare_refs[2] = cpu_to_be32(0xffffffff);
2708 vc->spare_refs[3] = cpu_to_be32(0xffffffff);
2709 vc->spare_refs[4] = cpu_to_be32(0xffffffff);
2710 vc->spare_refs[5] = cpu_to_be32(0xffffffff);
2711 vc->spare_refs[6] = cpu_to_be32(0xffffffff);
2712 vc->spare_refs[7] = cpu_to_be32(0xffffffff);
2713 memset(vc->cache_pol, 0, 8);
2714 vc->bg_rate = 0x80;
2715 memset(vc->pad2, 0xff, 3);
2716 memset(vc->pad3, 0xff, 52);
2717 memset(vc->pad4, 0xff, 192);
2718 memset(vc->v0, 0xff, 32);
2719 memset(vc->v1, 0xff, 32);
2720 memset(vc->v2, 0xff, 16);
2721 memset(vc->v3, 0xff, 16);
2722 memset(vc->vendor, 0xff, 32);
2723
2724 memset(vc->phys_refnum, 0xff, 4*ddf->mppe);
2725 memset(vc->phys_refnum+ddf->mppe, 0x00, 8*ddf->mppe);
2726
2727 for (i = 1; i < vc->sec_elmnt_count; i++) {
2728 memcpy(vcl->other_bvds[i-1], vc, ddf->conf_rec_len * 512);
2729 vcl->other_bvds[i-1]->sec_elmnt_seq = i;
2730 }
2731
2732 vcl->next = ddf->conflist;
2733 ddf->conflist = vcl;
2734 ddf->currentconf = vcl;
2735 ddf_set_updates_pending(ddf, NULL);
2736 return 1;
2737 }
2738
2739 #ifndef MDASSEMBLE
2740 static void add_to_super_ddf_bvd(struct supertype *st,
2741 mdu_disk_info_t *dk, int fd, char *devname,
2742 unsigned long long data_offset)
2743 {
2744 /* fd and devname identify a device within the ddf container (st).
2745 * dk identifies a location in the new BVD.
2746 * We need to find suitable free space in that device and update
2747 * the phys_refnum and lba_offset for the newly created vd_config.
2748 * We might also want to update the type in the phys_disk
2749 * section.
2750 *
2751 * Alternately: fd == -1 and we have already chosen which device to
2752 * use and recorded in dlist->raid_disk;
2753 */
2754 struct dl *dl;
2755 struct ddf_super *ddf = st->sb;
2756 struct vd_config *vc;
2757 unsigned int i;
2758 unsigned long long blocks, pos;
2759 unsigned int raid_disk = dk->raid_disk;
2760
2761 if (fd == -1) {
2762 for (dl = ddf->dlist; dl ; dl = dl->next)
2763 if (dl->raiddisk == dk->raid_disk)
2764 break;
2765 } else {
2766 for (dl = ddf->dlist; dl ; dl = dl->next)
2767 if (dl->major == dk->major &&
2768 dl->minor == dk->minor)
2769 break;
2770 }
2771 if (!dl || dl->pdnum < 0 || ! (dk->state & (1<<MD_DISK_SYNC)))
2772 return;
2773
2774 vc = &ddf->currentconf->conf;
2775 if (vc->sec_elmnt_count > 1) {
2776 unsigned int n = be16_to_cpu(vc->prim_elmnt_count);
2777 if (raid_disk >= n)
2778 vc = ddf->currentconf->other_bvds[raid_disk / n - 1];
2779 raid_disk %= n;
2780 }
2781
2782 blocks = be64_to_cpu(vc->blocks);
2783 if (ddf->currentconf->block_sizes)
2784 blocks = ddf->currentconf->block_sizes[dk->raid_disk];
2785
2786 pos = find_space(ddf, dl, data_offset, &blocks);
2787 if (pos == INVALID_SECTORS)
2788 return;
2789
2790 ddf->currentdev = dk->raid_disk;
2791 vc->phys_refnum[raid_disk] = dl->disk.refnum;
2792 LBA_OFFSET(ddf, vc)[raid_disk] = cpu_to_be64(pos);
2793
2794 for (i = 0; i < ddf->max_part ; i++)
2795 if (dl->vlist[i] == NULL)
2796 break;
2797 if (i == ddf->max_part)
2798 return;
2799 dl->vlist[i] = ddf->currentconf;
2800
2801 if (fd >= 0)
2802 dl->fd = fd;
2803 if (devname)
2804 dl->devname = devname;
2805
2806 /* Check if we can mark array as optimal yet */
2807 i = ddf->currentconf->vcnum;
2808 ddf->virt->entries[i].state =
2809 (ddf->virt->entries[i].state & ~DDF_state_mask)
2810 | get_svd_state(ddf, ddf->currentconf);
2811 be16_clear(ddf->phys->entries[dl->pdnum].type,
2812 cpu_to_be16(DDF_Global_Spare));
2813 be16_set(ddf->phys->entries[dl->pdnum].type,
2814 cpu_to_be16(DDF_Active_in_VD));
2815 dprintf("%s: added disk %d/%08x to VD %d/%s as disk %d\n",
2816 __func__, dl->pdnum, be32_to_cpu(dl->disk.refnum),
2817 ddf->currentconf->vcnum, guid_str(vc->guid),
2818 dk->raid_disk);
2819 ddf_set_updates_pending(ddf, vc);
2820 }
2821
2822 static unsigned int find_unused_pde(const struct ddf_super *ddf)
2823 {
2824 unsigned int i;
2825 for (i = 0; i < be16_to_cpu(ddf->phys->max_pdes); i++) {
2826 if (all_ff(ddf->phys->entries[i].guid))
2827 return i;
2828 }
2829 return DDF_NOTFOUND;
2830 }
2831
2832 static void _set_config_size(struct phys_disk_entry *pde, const struct dl *dl)
2833 {
2834 __u64 cfs, t;
2835 cfs = min(dl->size - 32*1024*2ULL, be64_to_cpu(dl->primary_lba));
2836 t = be64_to_cpu(dl->secondary_lba);
2837 if (t != ~(__u64)0)
2838 cfs = min(cfs, t);
2839 /*
2840 * Some vendor DDF structures interpret workspace_lba
2841 * very differently than we do: Make a sanity check on the value.
2842 */
2843 t = be64_to_cpu(dl->workspace_lba);
2844 if (t < cfs) {
2845 __u64 wsp = cfs - t;
2846 if (wsp > 1024*1024*2ULL && wsp > dl->size / 16) {
2847 pr_err("%s: %x:%x: workspace size 0x%llx too big, ignoring\n",
2848 __func__, dl->major, dl->minor, wsp);
2849 } else
2850 cfs = t;
2851 }
2852 pde->config_size = cpu_to_be64(cfs);
2853 dprintf("%s: %x:%x config_size %llx, DDF structure is %llx blocks\n",
2854 __func__, dl->major, dl->minor, cfs, dl->size-cfs);
2855 }
2856
2857 /* Add a device to a container, either while creating it or while
2858 * expanding a pre-existing container
2859 */
2860 static int add_to_super_ddf(struct supertype *st,
2861 mdu_disk_info_t *dk, int fd, char *devname,
2862 unsigned long long data_offset)
2863 {
2864 struct ddf_super *ddf = st->sb;
2865 struct dl *dd;
2866 time_t now;
2867 struct tm *tm;
2868 unsigned long long size;
2869 struct phys_disk_entry *pde;
2870 unsigned int n, i;
2871 struct stat stb;
2872 __u32 *tptr;
2873
2874 if (ddf->currentconf) {
2875 add_to_super_ddf_bvd(st, dk, fd, devname, data_offset);
2876 return 0;
2877 }
2878
2879 /* This is device numbered dk->number. We need to create
2880 * a phys_disk entry and a more detailed disk_data entry.
2881 */
2882 fstat(fd, &stb);
2883 n = find_unused_pde(ddf);
2884 if (n == DDF_NOTFOUND) {
2885 pr_err("%s: No free slot in array, cannot add disk\n",
2886 __func__);
2887 return 1;
2888 }
2889 pde = &ddf->phys->entries[n];
2890 get_dev_size(fd, NULL, &size);
2891 if (size <= 32*1024*1024) {
2892 pr_err("%s: device size must be at least 32MB\n",
2893 __func__);
2894 return 1;
2895 }
2896 size >>= 9;
2897
2898 if (posix_memalign((void**)&dd, 512,
2899 sizeof(*dd) + sizeof(dd->vlist[0]) * ddf->max_part) != 0) {
2900 pr_err("%s could allocate buffer for new disk, aborting\n",
2901 __func__);
2902 return 1;
2903 }
2904 dd->major = major(stb.st_rdev);
2905 dd->minor = minor(stb.st_rdev);
2906 dd->devname = devname;
2907 dd->fd = fd;
2908 dd->spare = NULL;
2909
2910 dd->disk.magic = DDF_PHYS_DATA_MAGIC;
2911 now = time(0);
2912 tm = localtime(&now);
2913 sprintf(dd->disk.guid, "%8s%04d%02d%02d",
2914 T10, tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday);
2915 tptr = (__u32 *)(dd->disk.guid + 16);
2916 *tptr++ = random32();
2917 *tptr = random32();
2918
2919 do {
2920 /* Cannot be bothered finding a CRC of some irrelevant details*/
2921 dd->disk.refnum._v32 = random32();
2922 for (i = be16_to_cpu(ddf->active->max_pd_entries);
2923 i > 0; i--)
2924 if (be32_eq(ddf->phys->entries[i-1].refnum,
2925 dd->disk.refnum))
2926 break;
2927 } while (i > 0);
2928
2929 dd->disk.forced_ref = 1;
2930 dd->disk.forced_guid = 1;
2931 memset(dd->disk.vendor, ' ', 32);
2932 memcpy(dd->disk.vendor, "Linux", 5);
2933 memset(dd->disk.pad, 0xff, 442);
2934 for (i = 0; i < ddf->max_part ; i++)
2935 dd->vlist[i] = NULL;
2936
2937 dd->pdnum = n;
2938
2939 if (st->update_tail) {
2940 int len = (sizeof(struct phys_disk) +
2941 sizeof(struct phys_disk_entry));
2942 struct phys_disk *pd;
2943
2944 pd = xmalloc(len);
2945 pd->magic = DDF_PHYS_RECORDS_MAGIC;
2946 pd->used_pdes = cpu_to_be16(n);
2947 pde = &pd->entries[0];
2948 dd->mdupdate = pd;
2949 } else
2950 ddf->phys->used_pdes = cpu_to_be16(
2951 1 + be16_to_cpu(ddf->phys->used_pdes));
2952
2953 memcpy(pde->guid, dd->disk.guid, DDF_GUID_LEN);
2954 pde->refnum = dd->disk.refnum;
2955 pde->type = cpu_to_be16(DDF_Forced_PD_GUID | DDF_Global_Spare);
2956 pde->state = cpu_to_be16(DDF_Online);
2957 dd->size = size;
2958 /*
2959 * If there is already a device in dlist, try to reserve the same
2960 * amount of workspace. Otherwise, use 32MB.
2961 * We checked disk size above already.
2962 */
2963 #define __calc_lba(new, old, lba, mb) do { \
2964 unsigned long long dif; \
2965 if ((old) != NULL) \
2966 dif = (old)->size - be64_to_cpu((old)->lba); \
2967 else \
2968 dif = (new)->size; \
2969 if ((new)->size > dif) \
2970 (new)->lba = cpu_to_be64((new)->size - dif); \
2971 else \
2972 (new)->lba = cpu_to_be64((new)->size - (mb*1024*2)); \
2973 } while (0)
2974 __calc_lba(dd, ddf->dlist, workspace_lba, 32);
2975 __calc_lba(dd, ddf->dlist, primary_lba, 16);
2976 if (ddf->dlist == NULL ||
2977 be64_to_cpu(ddf->dlist->secondary_lba) != ~(__u64)0)
2978 __calc_lba(dd, ddf->dlist, secondary_lba, 32);
2979 _set_config_size(pde, dd);
2980
2981 sprintf(pde->path, "%17.17s","Information: nil") ;
2982 memset(pde->pad, 0xff, 6);
2983
2984 if (st->update_tail) {
2985 dd->next = ddf->add_list;
2986 ddf->add_list = dd;
2987 } else {
2988 dd->next = ddf->dlist;
2989 ddf->dlist = dd;
2990 ddf_set_updates_pending(ddf, NULL);
2991 }
2992
2993 return 0;
2994 }
2995
2996 static int remove_from_super_ddf(struct supertype *st, mdu_disk_info_t *dk)
2997 {
2998 struct ddf_super *ddf = st->sb;
2999 struct dl *dl;
3000
3001 /* mdmon has noticed that this disk (dk->major/dk->minor) has
3002 * disappeared from the container.
3003 * We need to arrange that it disappears from the metadata and
3004 * internal data structures too.
3005 * Most of the work is done by ddf_process_update which edits
3006 * the metadata and closes the file handle and attaches the memory
3007 * where free_updates will free it.
3008 */
3009 for (dl = ddf->dlist; dl ; dl = dl->next)
3010 if (dl->major == dk->major &&
3011 dl->minor == dk->minor)
3012 break;
3013 if (!dl || dl->pdnum < 0)
3014 return -1;
3015
3016 if (st->update_tail) {
3017 int len = (sizeof(struct phys_disk) +
3018 sizeof(struct phys_disk_entry));
3019 struct phys_disk *pd;
3020
3021 pd = xmalloc(len);
3022 pd->magic = DDF_PHYS_RECORDS_MAGIC;
3023 pd->used_pdes = cpu_to_be16(dl->pdnum);
3024 pd->entries[0].state = cpu_to_be16(DDF_Missing);
3025 append_metadata_update(st, pd, len);
3026 }
3027 return 0;
3028 }
3029 #endif
3030
3031 /*
3032 * This is the write_init_super method for a ddf container. It is
3033 * called when creating a container or adding another device to a
3034 * container.
3035 */
3036
3037 static int __write_ddf_structure(struct dl *d, struct ddf_super *ddf, __u8 type)
3038 {
3039 unsigned long long sector;
3040 struct ddf_header *header;
3041 int fd, i, n_config, conf_size, buf_size;
3042 int ret = 0;
3043 char *conf;
3044
3045 fd = d->fd;
3046
3047 switch (type) {
3048 case DDF_HEADER_PRIMARY:
3049 header = &ddf->primary;
3050 sector = be64_to_cpu(header->primary_lba);
3051 break;
3052 case DDF_HEADER_SECONDARY:
3053 header = &ddf->secondary;
3054 sector = be64_to_cpu(header->secondary_lba);
3055 break;
3056 default:
3057 return 0;
3058 }
3059 if (sector == ~(__u64)0)
3060 return 0;
3061
3062 header->type = type;
3063 header->openflag = 1;
3064 header->crc = calc_crc(header, 512);
3065
3066 lseek64(fd, sector<<9, 0);
3067 if (write(fd, header, 512) < 0)
3068 goto out;
3069
3070 ddf->controller.crc = calc_crc(&ddf->controller, 512);
3071 if (write(fd, &ddf->controller, 512) < 0)
3072 goto out;
3073
3074 ddf->phys->crc = calc_crc(ddf->phys, ddf->pdsize);
3075 if (write(fd, ddf->phys, ddf->pdsize) < 0)
3076 goto out;
3077 ddf->virt->crc = calc_crc(ddf->virt, ddf->vdsize);
3078 if (write(fd, ddf->virt, ddf->vdsize) < 0)
3079 goto out;
3080
3081 /* Now write lots of config records. */
3082 n_config = ddf->max_part;
3083 conf_size = ddf->conf_rec_len * 512;
3084 conf = ddf->conf;
3085 buf_size = conf_size * (n_config + 1);
3086 if (!conf) {
3087 if (posix_memalign((void**)&conf, 512, buf_size) != 0)
3088 goto out;
3089 ddf->conf = conf;
3090 }
3091 for (i = 0 ; i <= n_config ; i++) {
3092 struct vcl *c;
3093 struct vd_config *vdc = NULL;
3094 if (i == n_config) {
3095 c = (struct vcl *)d->spare;
3096 if (c)
3097 vdc = &c->conf;
3098 } else {
3099 unsigned int dummy;
3100 c = d->vlist[i];
3101 if (c)
3102 get_pd_index_from_refnum(
3103 c, d->disk.refnum,
3104 ddf->mppe,
3105 (const struct vd_config **)&vdc,
3106 &dummy);
3107 }
3108 if (vdc) {
3109 dprintf("writing conf record %i on disk %08x for %s/%u\n",
3110 i, be32_to_cpu(d->disk.refnum),
3111 guid_str(vdc->guid),
3112 vdc->sec_elmnt_seq);
3113 vdc->crc = calc_crc(vdc, conf_size);
3114 memcpy(conf + i*conf_size, vdc, conf_size);
3115 } else
3116 memset(conf + i*conf_size, 0xff, conf_size);
3117 }
3118 if (write(fd, conf, buf_size) != buf_size)
3119 goto out;
3120
3121 d->disk.crc = calc_crc(&d->disk, 512);
3122 if (write(fd, &d->disk, 512) < 0)
3123 goto out;
3124
3125 ret = 1;
3126 out:
3127 header->openflag = 0;
3128 header->crc = calc_crc(header, 512);
3129
3130 lseek64(fd, sector<<9, 0);
3131 if (write(fd, header, 512) < 0)
3132 ret = 0;
3133
3134 return ret;
3135 }
3136
3137 static int _write_super_to_disk(struct ddf_super *ddf, struct dl *d)
3138 {
3139 unsigned long long size;
3140 int fd = d->fd;
3141 if (fd < 0)
3142 return 0;
3143
3144 /* We need to fill in the primary, (secondary) and workspace
3145 * lba's in the headers, set their checksums,
3146 * Also checksum phys, virt....
3147 *
3148 * Then write everything out, finally the anchor is written.
3149 */
3150 get_dev_size(fd, NULL, &size);
3151 size /= 512;
3152 memcpy(&ddf->anchor, ddf->active, 512);
3153 if (be64_to_cpu(d->workspace_lba) != 0ULL)
3154 ddf->anchor.workspace_lba = d->workspace_lba;
3155 else
3156 ddf->anchor.workspace_lba =
3157 cpu_to_be64(size - 32*1024*2);
3158 if (be64_to_cpu(d->primary_lba) != 0ULL)
3159 ddf->anchor.primary_lba = d->primary_lba;
3160 else
3161 ddf->anchor.primary_lba =
3162 cpu_to_be64(size - 16*1024*2);
3163 if (be64_to_cpu(d->secondary_lba) != 0ULL)
3164 ddf->anchor.secondary_lba = d->secondary_lba;
3165 else
3166 ddf->anchor.secondary_lba =
3167 cpu_to_be64(size - 32*1024*2);
3168 ddf->anchor.timestamp = cpu_to_be32(time(0) - DECADE);
3169 memcpy(&ddf->primary, &ddf->anchor, 512);
3170 memcpy(&ddf->secondary, &ddf->anchor, 512);
3171
3172 ddf->anchor.openflag = 0xFF; /* 'open' means nothing */
3173 ddf->anchor.seq = cpu_to_be32(0xFFFFFFFF); /* no sequencing in anchor */
3174 ddf->anchor.crc = calc_crc(&ddf->anchor, 512);
3175
3176 if (!__write_ddf_structure(d, ddf, DDF_HEADER_PRIMARY))
3177 return 0;
3178
3179 if (!__write_ddf_structure(d, ddf, DDF_HEADER_SECONDARY))
3180 return 0;
3181
3182 lseek64(fd, (size-1)*512, SEEK_SET);
3183 if (write(fd, &ddf->anchor, 512) < 0)
3184 return 0;
3185
3186 return 1;
3187 }
3188
3189 #ifndef MDASSEMBLE
3190 static int __write_init_super_ddf(struct supertype *st)
3191 {
3192 struct ddf_super *ddf = st->sb;
3193 struct dl *d;
3194 int attempts = 0;
3195 int successes = 0;
3196
3197 pr_state(ddf, __func__);
3198
3199 /* try to write updated metadata,
3200 * if we catch a failure move on to the next disk
3201 */
3202 for (d = ddf->dlist; d; d=d->next) {
3203 attempts++;
3204 successes += _write_super_to_disk(ddf, d);
3205 }
3206
3207 return attempts != successes;
3208 }
3209
3210 static int write_init_super_ddf(struct supertype *st)
3211 {
3212 struct ddf_super *ddf = st->sb;
3213 struct vcl *currentconf = ddf->currentconf;
3214
3215 /* We are done with currentconf - reset it so st refers to the container */
3216 ddf->currentconf = NULL;
3217
3218 if (st->update_tail) {
3219 /* queue the virtual_disk and vd_config as metadata updates */
3220 struct virtual_disk *vd;
3221 struct vd_config *vc;
3222 int len, tlen;
3223 unsigned int i;
3224
3225 if (!currentconf) {
3226 /* Must be adding a physical disk to the container */
3227 int len = (sizeof(struct phys_disk) +
3228 sizeof(struct phys_disk_entry));
3229
3230 /* adding a disk to the container. */
3231 if (!ddf->add_list)
3232 return 0;
3233
3234 append_metadata_update(st, ddf->add_list->mdupdate, len);
3235 ddf->add_list->mdupdate = NULL;
3236 return 0;
3237 }
3238
3239 /* Newly created VD */
3240
3241 /* First the virtual disk. We have a slightly fake header */
3242 len = sizeof(struct virtual_disk) + sizeof(struct virtual_entry);
3243 vd = xmalloc(len);
3244 *vd = *ddf->virt;
3245 vd->entries[0] = ddf->virt->entries[currentconf->vcnum];
3246 vd->populated_vdes = cpu_to_be16(currentconf->vcnum);
3247 append_metadata_update(st, vd, len);
3248
3249 /* Then the vd_config */
3250 len = ddf->conf_rec_len * 512;
3251 tlen = len * currentconf->conf.sec_elmnt_count;
3252 vc = xmalloc(tlen);
3253 memcpy(vc, &currentconf->conf, len);
3254 for (i = 1; i < currentconf->conf.sec_elmnt_count; i++)
3255 memcpy((char *)vc + i*len, currentconf->other_bvds[i-1],
3256 len);
3257 append_metadata_update(st, vc, tlen);
3258
3259 return 0;
3260 } else {
3261 struct dl *d;
3262 if (!currentconf)
3263 for (d = ddf->dlist; d; d=d->next)
3264 while (Kill(d->devname, NULL, 0, -1, 1) == 0);
3265 /* Note: we don't close the fd's now, but a subsequent
3266 * ->free_super() will
3267 */
3268 return __write_init_super_ddf(st);
3269 }
3270 }
3271
3272 #endif
3273
3274 static __u64 avail_size_ddf(struct supertype *st, __u64 devsize,
3275 unsigned long long data_offset)
3276 {
3277 /* We must reserve the last 32Meg */
3278 if (devsize <= 32*1024*2)
3279 return 0;
3280 return devsize - 32*1024*2;
3281 }
3282
3283 #ifndef MDASSEMBLE
3284
3285 static int reserve_space(struct supertype *st, int raiddisks,
3286 unsigned long long size, int chunk,
3287 unsigned long long data_offset,
3288 unsigned long long *freesize)
3289 {
3290 /* Find 'raiddisks' spare extents at least 'size' big (but
3291 * only caring about multiples of 'chunk') and remember
3292 * them. If size==0, find the largest size possible.
3293 * Report available size in *freesize
3294 * If space cannot be found, fail.
3295 */
3296 struct dl *dl;
3297 struct ddf_super *ddf = st->sb;
3298 int cnt = 0;
3299
3300 for (dl = ddf->dlist; dl ; dl=dl->next) {
3301 dl->raiddisk = -1;
3302 dl->esize = 0;
3303 }
3304 /* Now find largest extent on each device */
3305 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3306 unsigned long long minsize = ULLONG_MAX;
3307
3308 find_space(ddf, dl, data_offset, &minsize);
3309 if (minsize >= size && minsize >= (unsigned)chunk) {
3310 cnt++;
3311 dl->esize = minsize;
3312 }
3313 }
3314 if (cnt < raiddisks) {
3315 pr_err("not enough devices with space to create array.\n");
3316 return 0; /* No enough free spaces large enough */
3317 }
3318 if (size == 0) {
3319 /* choose the largest size of which there are at least 'raiddisk' */
3320 for (dl = ddf->dlist ; dl ; dl=dl->next) {
3321 struct dl *dl2;
3322 if (dl->esize <= size)
3323 continue;
3324 /* This is bigger than 'size', see if there are enough */
3325 cnt = 0;
3326 for (dl2 = ddf->dlist; dl2 ; dl2=dl2->next)
3327 if (dl2->esize >= dl->esize)
3328 cnt++;
3329 if (cnt >= raiddisks)
3330 size = dl->esize;
3331 }
3332 if (chunk) {
3333 size = size / chunk;
3334 size *= chunk;
3335 }
3336 *freesize = size;
3337 if (size < 32) {
3338 pr_err("not enough spare devices to create array.\n");
3339 return 0;
3340 }
3341 }
3342 /* We have a 'size' of which there are enough spaces.
3343 * We simply do a first-fit */
3344 cnt = 0;
3345 for (dl = ddf->dlist ; dl && cnt < raiddisks ; dl=dl->next) {
3346 if (dl->esize < size)
3347 continue;
3348
3349 dl->raiddisk = cnt;
3350 cnt++;
3351 }
3352 return 1;
3353 }
3354
3355 static int validate_geometry_ddf(struct supertype *st,
3356 int level, int layout, int raiddisks,
3357 int *chunk, unsigned long long size,
3358 unsigned long long data_offset,
3359 char *dev, unsigned long long *freesize,
3360 int verbose)
3361 {
3362 int fd;
3363 struct mdinfo *sra;
3364 int cfd;
3365
3366 /* ddf potentially supports lots of things, but it depends on
3367 * what devices are offered (and maybe kernel version?)
3368 * If given unused devices, we will make a container.
3369 * If given devices in a container, we will make a BVD.
3370 * If given BVDs, we make an SVD, changing all the GUIDs in the process.
3371 */
3372
3373 if (*chunk == UnSet)
3374 *chunk = DEFAULT_CHUNK;
3375
3376 if (level == LEVEL_NONE)
3377 level = LEVEL_CONTAINER;
3378 if (level == LEVEL_CONTAINER) {
3379 /* Must be a fresh device to add to a container */
3380 return validate_geometry_ddf_container(st, level, layout,
3381 raiddisks, *chunk,
3382 size, data_offset, dev,
3383 freesize,
3384 verbose);
3385 }
3386
3387 if (!dev) {
3388 mdu_array_info_t array = {
3389 .level = level,
3390 .layout = layout,
3391 .raid_disks = raiddisks
3392 };
3393 struct vd_config conf;
3394 if (layout_md2ddf(&array, &conf) == -1) {
3395 if (verbose)
3396 pr_err("DDF does not support level %d /layout %d arrays with %d disks\n",
3397 level, layout, raiddisks);
3398 return 0;
3399 }
3400 /* Should check layout? etc */
3401
3402 if (st->sb && freesize) {
3403 /* --create was given a container to create in.
3404 * So we need to check that there are enough
3405 * free spaces and return the amount of space.
3406 * We may as well remember which drives were
3407 * chosen so that add_to_super/getinfo_super
3408 * can return them.
3409 */
3410 return reserve_space(st, raiddisks, size, *chunk,
3411 data_offset, freesize);
3412 }
3413 return 1;
3414 }
3415
3416 if (st->sb) {
3417 /* A container has already been opened, so we are
3418 * creating in there. Maybe a BVD, maybe an SVD.
3419 * Should make a distinction one day.
3420 */
3421 return validate_geometry_ddf_bvd(st, level, layout, raiddisks,
3422 chunk, size, data_offset, dev,
3423 freesize,
3424 verbose);
3425 }
3426 /* This is the first device for the array.
3427 * If it is a container, we read it in and do automagic allocations,
3428 * no other devices should be given.
3429 * Otherwise it must be a member device of a container, and we
3430 * do manual allocation.
3431 * Later we should check for a BVD and make an SVD.
3432 */
3433 fd = open(dev, O_RDONLY|O_EXCL, 0);
3434 if (fd >= 0) {
3435 close(fd);
3436 /* Just a bare device, no good to us */
3437 if (verbose)
3438 pr_err("ddf: Cannot create this array "
3439 "on device %s - a container is required.\n",
3440 dev);
3441 return 0;
3442 }
3443 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3444 if (verbose)
3445 pr_err("ddf: Cannot open %s: %s\n",
3446 dev, strerror(errno));
3447 return 0;
3448 }
3449 /* Well, it is in use by someone, maybe a 'ddf' container. */
3450 cfd = open_container(fd);
3451 if (cfd < 0) {
3452 close(fd);
3453 if (verbose)
3454 pr_err("ddf: Cannot use %s: %s\n",
3455 dev, strerror(EBUSY));
3456 return 0;
3457 }
3458 sra = sysfs_read(cfd, NULL, GET_VERSION);
3459 close(fd);
3460 if (sra && sra->array.major_version == -1 &&
3461 strcmp(sra->text_version, "ddf") == 0) {
3462 /* This is a member of a ddf container. Load the container
3463 * and try to create a bvd
3464 */
3465 struct ddf_super *ddf;
3466 if (load_super_ddf_all(st, cfd, (void **)&ddf, NULL) == 0) {
3467 st->sb = ddf;
3468 strcpy(st->container_devnm, fd2devnm(cfd));
3469 close(cfd);
3470 return validate_geometry_ddf_bvd(st, level, layout,
3471 raiddisks, chunk, size,
3472 data_offset,
3473 dev, freesize,
3474 verbose);
3475 }
3476 close(cfd);
3477 } else /* device may belong to a different container */
3478 return 0;
3479
3480 return 1;
3481 }
3482
3483 static int
3484 validate_geometry_ddf_container(struct supertype *st,
3485 int level, int layout, int raiddisks,
3486 int chunk, unsigned long long size,
3487 unsigned long long data_offset,
3488 char *dev, unsigned long long *freesize,
3489 int verbose)
3490 {
3491 int fd;
3492 unsigned long long ldsize;
3493
3494 if (level != LEVEL_CONTAINER)
3495 return 0;
3496 if (!dev)
3497 return 1;
3498
3499 fd = open(dev, O_RDONLY|O_EXCL, 0);
3500 if (fd < 0) {
3501 if (verbose)
3502 pr_err("ddf: Cannot open %s: %s\n",
3503 dev, strerror(errno));
3504 return 0;
3505 }
3506 if (!get_dev_size(fd, dev, &ldsize)) {
3507 close(fd);
3508 return 0;
3509 }
3510 close(fd);
3511
3512 *freesize = avail_size_ddf(st, ldsize >> 9, INVALID_SECTORS);
3513 if (*freesize == 0)
3514 return 0;
3515
3516 return 1;
3517 }
3518
3519 static int validate_geometry_ddf_bvd(struct supertype *st,
3520 int level, int layout, int raiddisks,
3521 int *chunk, unsigned long long size,
3522 unsigned long long data_offset,
3523 char *dev, unsigned long long *freesize,
3524 int verbose)
3525 {
3526 struct stat stb;
3527 struct ddf_super *ddf = st->sb;
3528 struct dl *dl;
3529 unsigned long long maxsize;
3530 /* ddf/bvd supports lots of things, but not containers */
3531 if (level == LEVEL_CONTAINER) {
3532 if (verbose)
3533 pr_err("DDF cannot create a container within an container\n");
3534 return 0;
3535 }
3536 /* We must have the container info already read in. */
3537 if (!ddf)
3538 return 0;
3539
3540 if (!dev) {
3541 /* General test: make sure there is space for
3542 * 'raiddisks' device extents of size 'size'.
3543 */
3544 unsigned long long minsize = size;
3545 int dcnt = 0;
3546 if (minsize == 0)
3547 minsize = 8;
3548 for (dl = ddf->dlist; dl ; dl = dl->next) {
3549 if (find_space(ddf, dl, data_offset, &minsize)
3550 != INVALID_SECTORS)
3551 dcnt++;
3552 }
3553 if (dcnt < raiddisks) {
3554 if (verbose)
3555 pr_err("ddf: Not enough devices with "
3556 "space for this array (%d < %d)\n",
3557 dcnt, raiddisks);
3558 return 0;
3559 }
3560 return 1;
3561 }
3562 /* This device must be a member of the set */
3563 if (stat(dev, &stb) < 0)
3564 return 0;
3565 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3566 return 0;
3567 for (dl = ddf->dlist ; dl ; dl = dl->next) {
3568 if (dl->major == (int)major(stb.st_rdev) &&
3569 dl->minor == (int)minor(stb.st_rdev))
3570 break;
3571 }
3572 if (!dl) {
3573 if (verbose)
3574 pr_err("ddf: %s is not in the "
3575 "same DDF set\n",
3576 dev);
3577 return 0;
3578 }
3579 maxsize = ULLONG_MAX;
3580 find_space(ddf, dl, data_offset, &maxsize);
3581 *freesize = maxsize;
3582
3583 return 1;
3584 }
3585
3586 static int load_super_ddf_all(struct supertype *st, int fd,
3587 void **sbp, char *devname)
3588 {
3589 struct mdinfo *sra;
3590 struct ddf_super *super;
3591 struct mdinfo *sd, *best = NULL;
3592 int bestseq = 0;
3593 int seq;
3594 char nm[20];
3595 int dfd;
3596
3597 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
3598 if (!sra)
3599 return 1;
3600 if (sra->array.major_version != -1 ||
3601 sra->array.minor_version != -2 ||
3602 strcmp(sra->text_version, "ddf") != 0)
3603 return 1;
3604
3605 if (posix_memalign((void**)&super, 512, sizeof(*super)) != 0)
3606 return 1;
3607 memset(super, 0, sizeof(*super));
3608
3609 /* first, try each device, and choose the best ddf */
3610 for (sd = sra->devs ; sd ; sd = sd->next) {
3611 int rv;
3612 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3613 dfd = dev_open(nm, O_RDONLY);
3614 if (dfd < 0)
3615 return 2;
3616 rv = load_ddf_headers(dfd, super, NULL);
3617 close(dfd);
3618 if (rv == 0) {
3619 seq = be32_to_cpu(super->active->seq);
3620 if (super->active->openflag)
3621 seq--;
3622 if (!best || seq > bestseq) {
3623 bestseq = seq;
3624 best = sd;
3625 }
3626 }
3627 }
3628 if (!best)
3629 return 1;
3630 /* OK, load this ddf */
3631 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
3632 dfd = dev_open(nm, O_RDONLY);
3633 if (dfd < 0)
3634 return 1;
3635 load_ddf_headers(dfd, super, NULL);
3636 load_ddf_global(dfd, super, NULL);
3637 close(dfd);
3638 /* Now we need the device-local bits */
3639 for (sd = sra->devs ; sd ; sd = sd->next) {
3640 int rv;
3641
3642 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
3643 dfd = dev_open(nm, O_RDWR);
3644 if (dfd < 0)
3645 return 2;
3646 rv = load_ddf_headers(dfd, super, NULL);
3647 if (rv == 0)
3648 rv = load_ddf_local(dfd, super, NULL, 1);
3649 if (rv)
3650 return 1;
3651 }
3652
3653 *sbp = super;
3654 if (st->ss == NULL) {
3655 st->ss = &super_ddf;
3656 st->minor_version = 0;
3657 st->max_devs = 512;
3658 }
3659 strcpy(st->container_devnm, fd2devnm(fd));
3660 return 0;
3661 }
3662
3663 static int load_container_ddf(struct supertype *st, int fd,
3664 char *devname)
3665 {
3666 return load_super_ddf_all(st, fd, &st->sb, devname);
3667 }
3668
3669 #endif /* MDASSEMBLE */
3670
3671 static int check_secondary(const struct vcl *vc)
3672 {
3673 const struct vd_config *conf = &vc->conf;
3674 int i;
3675
3676 /* The only DDF secondary RAID level md can support is
3677 * RAID 10, if the stripe sizes and Basic volume sizes
3678 * are all equal.
3679 * Other configurations could in theory be supported by exposing
3680 * the BVDs to user space and using device mapper for the secondary
3681 * mapping. So far we don't support that.
3682 */
3683
3684 __u64 sec_elements[4] = {0, 0, 0, 0};
3685 #define __set_sec_seen(n) (sec_elements[(n)>>6] |= (1<<((n)&63)))
3686 #define __was_sec_seen(n) ((sec_elements[(n)>>6] & (1<<((n)&63))) != 0)
3687
3688 if (vc->other_bvds == NULL) {
3689 pr_err("No BVDs for secondary RAID found\n");
3690 return -1;
3691 }
3692 if (conf->prl != DDF_RAID1) {
3693 pr_err("Secondary RAID level only supported for mirrored BVD\n");
3694 return -1;
3695 }
3696 if (conf->srl != DDF_2STRIPED && conf->srl != DDF_2SPANNED) {
3697 pr_err("Secondary RAID level %d is unsupported\n",
3698 conf->srl);
3699 return -1;
3700 }
3701 __set_sec_seen(conf->sec_elmnt_seq);
3702 for (i = 0; i < conf->sec_elmnt_count-1; i++) {
3703 const struct vd_config *bvd = vc->other_bvds[i];
3704 if (bvd->sec_elmnt_seq == DDF_UNUSED_BVD)
3705 continue;
3706 if (bvd->srl != conf->srl) {
3707 pr_err("Inconsistent secondary RAID level across BVDs\n");
3708 return -1;
3709 }
3710 if (bvd->prl != conf->prl) {
3711 pr_err("Different RAID levels for BVDs are unsupported\n");
3712 return -1;
3713 }
3714 if (!be16_eq(bvd->prim_elmnt_count, conf->prim_elmnt_count)) {
3715 pr_err("All BVDs must have the same number of primary elements\n");
3716 return -1;
3717 }
3718 if (bvd->chunk_shift != conf->chunk_shift) {
3719 pr_err("Different strip sizes for BVDs are unsupported\n");
3720 return -1;
3721 }
3722 if (!be64_eq(bvd->array_blocks, conf->array_blocks)) {
3723 pr_err("Different BVD sizes are unsupported\n");
3724 return -1;
3725 }
3726 __set_sec_seen(bvd->sec_elmnt_seq);
3727 }
3728 for (i = 0; i < conf->sec_elmnt_count; i++) {
3729 if (!__was_sec_seen(i)) {
3730 /* pr_err("BVD %d is missing\n", i); */
3731 return -1;
3732 }
3733 }
3734 return 0;
3735 }
3736
3737 static unsigned int get_pd_index_from_refnum(const struct vcl *vc,
3738 be32 refnum, unsigned int nmax,
3739 const struct vd_config **bvd,
3740 unsigned int *idx)
3741 {
3742 unsigned int i, j, n, sec, cnt;
3743
3744 cnt = be16_to_cpu(vc->conf.prim_elmnt_count);
3745 sec = (vc->conf.sec_elmnt_count == 1 ? 0 : vc->conf.sec_elmnt_seq);
3746
3747 for (i = 0, j = 0 ; i < nmax ; i++) {
3748 /* j counts valid entries for this BVD */
3749 if (be32_eq(vc->conf.phys_refnum[i], refnum)) {
3750 *bvd = &vc->conf;
3751 *idx = i;
3752 return sec * cnt + j;
3753 }
3754 if (be32_to_cpu(vc->conf.phys_refnum[i]) != 0xffffffff)
3755 j++;
3756 }
3757 if (vc->other_bvds == NULL)
3758 goto bad;
3759
3760 for (n = 1; n < vc->conf.sec_elmnt_count; n++) {
3761 struct vd_config *vd = vc->other_bvds[n-1];
3762 sec = vd->sec_elmnt_seq;
3763 if (sec == DDF_UNUSED_BVD)
3764 continue;
3765 for (i = 0, j = 0 ; i < nmax ; i++) {
3766 if (be32_eq(vd->phys_refnum[i], refnum)) {
3767 *bvd = vd;
3768 *idx = i;
3769 return sec * cnt + j;
3770 }
3771 if (be32_to_cpu(vd->phys_refnum[i]) != 0xffffffff)
3772 j++;
3773 }
3774 }
3775 bad:
3776 *bvd = NULL;
3777 return DDF_NOTFOUND;
3778 }
3779
3780 static struct mdinfo *container_content_ddf(struct supertype *st, char *subarray)
3781 {
3782 /* Given a container loaded by load_super_ddf_all,
3783 * extract information about all the arrays into
3784 * an mdinfo tree.
3785 *
3786 * For each vcl in conflist: create an mdinfo, fill it in,
3787 * then look for matching devices (phys_refnum) in dlist
3788 * and create appropriate device mdinfo.
3789 */
3790 struct ddf_super *ddf = st->sb;
3791 struct mdinfo *rest = NULL;
3792 struct vcl *vc;
3793
3794 for (vc = ddf->conflist ; vc ; vc=vc->next) {
3795 unsigned int i;
3796 struct mdinfo *this;
3797 char *ep;
3798 __u32 *cptr;
3799 unsigned int pd;
3800
3801 if (subarray &&
3802 (strtoul(subarray, &ep, 10) != vc->vcnum ||
3803 *ep != '\0'))
3804 continue;
3805
3806 if (vc->conf.sec_elmnt_count > 1) {
3807 if (check_secondary(vc) != 0)
3808 continue;
3809 }
3810
3811 this = xcalloc(1, sizeof(*this));
3812 this->next = rest;
3813 rest = this;
3814
3815 if (layout_ddf2md(&vc->conf, &this->array))
3816 continue;
3817 this->array.md_minor = -1;
3818 this->array.major_version = -1;
3819 this->array.minor_version = -2;
3820 this->safe_mode_delay = DDF_SAFE_MODE_DELAY;
3821 cptr = (__u32 *)(vc->conf.guid + 16);
3822 this->array.ctime = DECADE + __be32_to_cpu(*cptr);
3823 this->array.utime = DECADE +
3824 be32_to_cpu(vc->conf.timestamp);
3825 this->array.chunk_size = 512 << vc->conf.chunk_shift;
3826
3827 i = vc->vcnum;
3828 if ((ddf->virt->entries[i].state & DDF_state_inconsistent) ||
3829 (ddf->virt->entries[i].init_state & DDF_initstate_mask) !=
3830 DDF_init_full) {
3831 this->array.state = 0;
3832 this->resync_start = 0;
3833 } else {
3834 this->array.state = 1;
3835 this->resync_start = MaxSector;
3836 }
3837 _ddf_array_name(this->name, ddf, i);
3838 memset(this->uuid, 0, sizeof(this->uuid));
3839 this->component_size = be64_to_cpu(vc->conf.blocks);
3840 this->array.size = this->component_size / 2;
3841 this->container_member = i;
3842
3843 ddf->currentconf = vc;
3844 uuid_from_super_ddf(st, this->uuid);
3845 if (!subarray)
3846 ddf->currentconf = NULL;
3847
3848 sprintf(this->text_version, "/%s/%d",
3849 st->container_devnm, this->container_member);
3850
3851 for (pd = 0; pd < be16_to_cpu(ddf->phys->max_pdes); pd++) {
3852 struct mdinfo *dev;
3853 struct dl *d;
3854 const struct vd_config *bvd;
3855 unsigned int iphys;
3856 int stt;
3857
3858 if (be32_to_cpu(ddf->phys->entries[pd].refnum)
3859 == 0xFFFFFFFF)
3860 continue;
3861
3862 stt = be16_to_cpu(ddf->phys->entries[pd].state);
3863 if ((stt & (DDF_Online|DDF_Failed|DDF_Rebuilding))
3864 != DDF_Online)
3865 continue;
3866
3867 i = get_pd_index_from_refnum(
3868 vc, ddf->phys->entries[pd].refnum,
3869 ddf->mppe, &bvd, &iphys);
3870 if (i == DDF_NOTFOUND)
3871 continue;
3872
3873 this->array.working_disks++;
3874
3875 for (d = ddf->dlist; d ; d=d->next)
3876 if (be32_eq(d->disk.refnum,
3877 ddf->phys->entries[pd].refnum))
3878 break;
3879 if (d == NULL)
3880 /* Haven't found that one yet, maybe there are others */
3881 continue;
3882
3883 dev = xcalloc(1, sizeof(*dev));
3884 dev->next = this->devs;
3885 this->devs = dev;
3886
3887 dev->disk.number = be32_to_cpu(d->disk.refnum);
3888 dev->disk.major = d->major;
3889 dev->disk.minor = d->minor;
3890 dev->disk.raid_disk = i;
3891 dev->disk.state = (1<<MD_DISK_SYNC)|(1<<MD_DISK_ACTIVE);
3892 dev->recovery_start = MaxSector;
3893
3894 dev->events = be32_to_cpu(ddf->active->seq);
3895 dev->data_offset =
3896 be64_to_cpu(LBA_OFFSET(ddf, bvd)[iphys]);
3897 dev->component_size = be64_to_cpu(bvd->blocks);
3898 if (d->devname)
3899 strcpy(dev->name, d->devname);
3900 }
3901 }
3902 return rest;
3903 }
3904
3905 static int store_super_ddf(struct supertype *st, int fd)
3906 {
3907 struct ddf_super *ddf = st->sb;
3908 unsigned long long dsize;
3909 void *buf;
3910 int rc;
3911
3912 if (!ddf)
3913 return 1;
3914
3915 if (!get_dev_size(fd, NULL, &dsize))
3916 return 1;
3917
3918 if (ddf->dlist || ddf->conflist) {
3919 struct stat sta;
3920 struct dl *dl;
3921 int ofd, ret;
3922
3923 if (fstat(fd, &sta) == -1 || !S_ISBLK(sta.st_mode)) {
3924 pr_err("%s: file descriptor for invalid device\n",
3925 __func__);
3926 return 1;
3927 }
3928 for (dl = ddf->dlist; dl; dl = dl->next)
3929 if (dl->major == (int)major(sta.st_rdev) &&
3930 dl->minor == (int)minor(sta.st_rdev))
3931 break;
3932 if (!dl) {
3933 pr_err("%s: couldn't find disk %d/%d\n", __func__,
3934 (int)major(sta.st_rdev),
3935 (int)minor(sta.st_rdev));
3936 return 1;
3937 }
3938 ofd = dl->fd;
3939 dl->fd = fd;
3940 ret = (_write_super_to_disk(ddf, dl) != 1);
3941 dl->fd = ofd;
3942 return ret;
3943 }
3944
3945 if (posix_memalign(&buf, 512, 512) != 0)
3946 return 1;
3947 memset(buf, 0, 512);
3948
3949 lseek64(fd, dsize-512, 0);
3950 rc = write(fd, buf, 512);
3951 free(buf);
3952 if (rc < 0)
3953 return 1;
3954 return 0;
3955 }
3956
3957 static int compare_super_ddf(struct supertype *st, struct supertype *tst)
3958 {
3959 /*
3960 * return:
3961 * 0 same, or first was empty, and second was copied
3962 * 1 second had wrong magic number - but that isn't possible
3963 * 2 wrong uuid
3964 * 3 wrong other info
3965 */
3966 struct ddf_super *first = st->sb;
3967 struct ddf_super *second = tst->sb;
3968 struct dl *dl1, *dl2;
3969 struct vcl *vl1, *vl2;
3970 unsigned int max_vds, max_pds, pd, vd;
3971
3972 if (!first) {
3973 st->sb = tst->sb;
3974 tst->sb = NULL;
3975 return 0;
3976 }
3977
3978 if (memcmp(first->anchor.guid, second->anchor.guid, DDF_GUID_LEN) != 0)
3979 return 2;
3980
3981 /* It is only OK to compare info in the anchor. Anything else
3982 * could be changing due to a reconfig so must be ignored.
3983 * guid really should be enough anyway.
3984 */
3985
3986 if (!be32_eq(first->active->seq, second->active->seq)) {
3987 dprintf("%s: sequence number mismatch %u<->%u\n", __func__,
3988 be32_to_cpu(first->active->seq),
3989 be32_to_cpu(second->active->seq));
3990 return 0;
3991 }
3992
3993 /*
3994 * At this point we are fairly sure that the meta data matches.
3995 * But the new disk may contain additional local data.
3996 * Add it to the super block.
3997 */
3998 max_vds = be16_to_cpu(first->active->max_vd_entries);
3999 max_pds = be16_to_cpu(first->phys->max_pdes);
4000 for (vl2 = second->conflist; vl2; vl2 = vl2->next) {
4001 for (vl1 = first->conflist; vl1; vl1 = vl1->next)
4002 if (!memcmp(vl1->conf.guid, vl2->conf.guid,
4003 DDF_GUID_LEN))
4004 break;
4005 if (vl1) {
4006 if (vl1->other_bvds != NULL &&
4007 vl1->conf.sec_elmnt_seq !=
4008 vl2->conf.sec_elmnt_seq) {
4009 dprintf("%s: adding BVD %u\n", __func__,
4010 vl2->conf.sec_elmnt_seq);
4011 add_other_bvd(vl1, &vl2->conf,
4012 first->conf_rec_len*512);
4013 }
4014 continue;
4015 }
4016
4017 if (posix_memalign((void **)&vl1, 512,
4018 (first->conf_rec_len*512 +
4019 offsetof(struct vcl, conf))) != 0) {
4020 pr_err("%s could not allocate vcl buf\n",
4021 __func__);
4022 return 3;
4023 }
4024
4025 vl1->next = first->conflist;
4026 vl1->block_sizes = NULL;
4027 memcpy(&vl1->conf, &vl2->conf, first->conf_rec_len*512);
4028 if (alloc_other_bvds(first, vl1) != 0) {
4029 pr_err("%s could not allocate other bvds\n",
4030 __func__);
4031 free(vl1);
4032 return 3;
4033 }
4034 for (vd = 0; vd < max_vds; vd++)
4035 if (!memcmp(first->virt->entries[vd].guid,
4036 vl1->conf.guid, DDF_GUID_LEN))
4037 break;
4038 vl1->vcnum = vd;
4039 dprintf("%s: added config for VD %u\n", __func__, vl1->vcnum);
4040 first->conflist = vl1;
4041 }
4042
4043 for (dl2 = second->dlist; dl2; dl2 = dl2->next) {
4044 for (dl1 = first->dlist; dl1; dl1 = dl1->next)
4045 if (be32_eq(dl1->disk.refnum, dl2->disk.refnum))
4046 break;
4047 if (dl1)
4048 continue;
4049
4050 if (posix_memalign((void **)&dl1, 512,
4051 sizeof(*dl1) + (first->max_part) * sizeof(dl1->vlist[0]))
4052 != 0) {
4053 pr_err("%s could not allocate disk info buffer\n",
4054 __func__);
4055 return 3;
4056 }
4057 memcpy(dl1, dl2, sizeof(*dl1));
4058 dl1->mdupdate = NULL;
4059 dl1->next = first->dlist;
4060 dl1->fd = -1;
4061 for (pd = 0; pd < max_pds; pd++)
4062 if (be32_eq(first->phys->entries[pd].refnum,
4063 dl1->disk.refnum))
4064 break;
4065 dl1->pdnum = pd < max_pds ? (int)pd : -1;
4066 if (dl2->spare) {
4067 if (posix_memalign((void **)&dl1->spare, 512,
4068 first->conf_rec_len*512) != 0) {
4069 pr_err("%s could not allocate spare info buf\n",
4070 __func__);
4071 return 3;
4072 }
4073 memcpy(dl1->spare, dl2->spare, first->conf_rec_len*512);
4074 }
4075 for (vd = 0 ; vd < first->max_part ; vd++) {
4076 if (!dl2->vlist[vd]) {
4077 dl1->vlist[vd] = NULL;
4078 continue;
4079 }
4080 for (vl1 = first->conflist; vl1; vl1 = vl1->next) {
4081 if (!memcmp(vl1->conf.guid,
4082 dl2->vlist[vd]->conf.guid,
4083 DDF_GUID_LEN))
4084 break;
4085 dl1->vlist[vd] = vl1;
4086 }
4087 }
4088 first->dlist = dl1;
4089 dprintf("%s: added disk %d: %08x\n", __func__, dl1->pdnum,
4090 be32_to_cpu(dl1->disk.refnum));
4091 }
4092
4093 return 0;
4094 }
4095
4096 #ifndef MDASSEMBLE
4097 /*
4098 * A new array 'a' has been started which claims to be instance 'inst'
4099 * within container 'c'.
4100 * We need to confirm that the array matches the metadata in 'c' so
4101 * that we don't corrupt any metadata.
4102 */
4103 static int ddf_open_new(struct supertype *c, struct active_array *a, char *inst)
4104 {
4105 struct ddf_super *ddf = c->sb;
4106 int n = atoi(inst);
4107 struct mdinfo *dev;
4108 struct dl *dl;
4109 static const char faulty[] = "faulty";
4110
4111 if (all_ff(ddf->virt->entries[n].guid)) {
4112 pr_err("%s: subarray %d doesn't exist\n", __func__, n);
4113 return -ENODEV;
4114 }
4115 dprintf("%s: new subarray %d, GUID: %s\n", __func__, n,
4116 guid_str(ddf->virt->entries[n].guid));
4117 for (dev = a->info.devs; dev; dev = dev->next) {
4118 for (dl = ddf->dlist; dl; dl = dl->next)
4119 if (dl->major == dev->disk.major &&
4120 dl->minor == dev->disk.minor)
4121 break;
4122 if (!dl || dl->pdnum < 0) {
4123 pr_err("%s: device %d/%d of subarray %d not found in meta data\n",
4124 __func__, dev->disk.major, dev->disk.minor, n);
4125 return -1;
4126 }
4127 if ((be16_to_cpu(ddf->phys->entries[dl->pdnum].state) &
4128 (DDF_Online|DDF_Missing|DDF_Failed)) != DDF_Online) {
4129 pr_err("%s: new subarray %d contains broken device %d/%d (%02x)\n",
4130 __func__, n, dl->major, dl->minor,
4131 be16_to_cpu(
4132 ddf->phys->entries[dl->pdnum].state));
4133 if (write(dev->state_fd, faulty, sizeof(faulty)-1) !=
4134 sizeof(faulty) - 1)
4135 pr_err("Write to state_fd failed\n");
4136 dev->curr_state = DS_FAULTY;
4137 }
4138 }
4139 a->info.container_member = n;
4140 return 0;
4141 }
4142
4143 static void handle_missing(struct ddf_super *ddf, struct active_array *a, int inst)
4144 {
4145 /* This member array is being activated. If any devices
4146 * are missing they must now be marked as failed.
4147 */
4148 struct vd_config *vc;
4149 unsigned int n_bvd;
4150 struct vcl *vcl;
4151 struct dl *dl;
4152 int pd;
4153 int n;
4154 int state;
4155
4156 for (n = 0; ; n++) {
4157 vc = find_vdcr(ddf, inst, n, &n_bvd, &vcl);
4158 if (!vc)
4159 break;
4160 for (dl = ddf->dlist; dl; dl = dl->next)
4161 if (be32_eq(dl->disk.refnum, vc->phys_refnum[n_bvd]))
4162 break;
4163 if (dl)
4164 /* Found this disk, so not missing */
4165 continue;
4166
4167 /* Mark the device as failed/missing. */
4168 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4169 if (pd >= 0 && be16_and(ddf->phys->entries[pd].state,
4170 cpu_to_be16(DDF_Online))) {
4171 be16_clear(ddf->phys->entries[pd].state,
4172 cpu_to_be16(DDF_Online));
4173 be16_set(ddf->phys->entries[pd].state,
4174 cpu_to_be16(DDF_Failed|DDF_Missing));
4175 vc->phys_refnum[n_bvd] = cpu_to_be32(0);
4176 ddf_set_updates_pending(ddf, vc);
4177 }
4178
4179 /* Mark the array as Degraded */
4180 state = get_svd_state(ddf, vcl);
4181 if (ddf->virt->entries[inst].state !=
4182 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4183 | state)) {
4184 ddf->virt->entries[inst].state =
4185 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4186 | state;
4187 a->check_degraded = 1;
4188 ddf_set_updates_pending(ddf, vc);
4189 }
4190 }
4191 }
4192
4193 /*
4194 * The array 'a' is to be marked clean in the metadata.
4195 * If '->resync_start' is not ~(unsigned long long)0, then the array is only
4196 * clean up to the point (in sectors). If that cannot be recorded in the
4197 * metadata, then leave it as dirty.
4198 *
4199 * For DDF, we need to clear the DDF_state_inconsistent bit in the
4200 * !global! virtual_disk.virtual_entry structure.
4201 */
4202 static int ddf_set_array_state(struct active_array *a, int consistent)
4203 {
4204 struct ddf_super *ddf = a->container->sb;
4205 int inst = a->info.container_member;
4206 int old = ddf->virt->entries[inst].state;
4207 if (consistent == 2) {
4208 handle_missing(ddf, a, inst);
4209 consistent = 1;
4210 if (!is_resync_complete(&a->info))
4211 consistent = 0;
4212 }
4213 if (consistent)
4214 ddf->virt->entries[inst].state &= ~DDF_state_inconsistent;
4215 else
4216 ddf->virt->entries[inst].state |= DDF_state_inconsistent;
4217 if (old != ddf->virt->entries[inst].state)
4218 ddf_set_updates_pending(ddf, NULL);
4219
4220 old = ddf->virt->entries[inst].init_state;
4221 ddf->virt->entries[inst].init_state &= ~DDF_initstate_mask;
4222 if (is_resync_complete(&a->info))
4223 ddf->virt->entries[inst].init_state |= DDF_init_full;
4224 else if (a->info.resync_start == 0)
4225 ddf->virt->entries[inst].init_state |= DDF_init_not;
4226 else
4227 ddf->virt->entries[inst].init_state |= DDF_init_quick;
4228 if (old != ddf->virt->entries[inst].init_state)
4229 ddf_set_updates_pending(ddf, NULL);
4230
4231 dprintf("ddf mark %d/%s (%d) %s %llu\n", inst,
4232 guid_str(ddf->virt->entries[inst].guid), a->curr_state,
4233 consistent?"clean":"dirty",
4234 a->info.resync_start);
4235 return consistent;
4236 }
4237
4238 static int get_bvd_state(const struct ddf_super *ddf,
4239 const struct vd_config *vc)
4240 {
4241 unsigned int i, n_bvd, working = 0;
4242 unsigned int n_prim = be16_to_cpu(vc->prim_elmnt_count);
4243 int pd, st, state;
4244 char *avail = xcalloc(1, n_prim);
4245 mdu_array_info_t array;
4246
4247 layout_ddf2md(vc, &array);
4248
4249 for (i = 0; i < n_prim; i++) {
4250 if (!find_index_in_bvd(ddf, vc, i, &n_bvd))
4251 continue;
4252 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4253 if (pd < 0)
4254 continue;
4255 st = be16_to_cpu(ddf->phys->entries[pd].state);
4256 if ((st & (DDF_Online|DDF_Failed|DDF_Rebuilding))
4257 == DDF_Online) {
4258 working++;
4259 avail[i] = 1;
4260 }
4261 }
4262
4263 state = DDF_state_degraded;
4264 if (working == n_prim)
4265 state = DDF_state_optimal;
4266 else
4267 switch (vc->prl) {
4268 case DDF_RAID0:
4269 case DDF_CONCAT:
4270 case DDF_JBOD:
4271 state = DDF_state_failed;
4272 break;
4273 case DDF_RAID1:
4274 if (working == 0)
4275 state = DDF_state_failed;
4276 else if (working >= 2)
4277 state = DDF_state_part_optimal;
4278 break;
4279 case DDF_RAID1E:
4280 if (!enough(10, n_prim, array.layout, 1, avail))
4281 state = DDF_state_failed;
4282 break;
4283 case DDF_RAID4:
4284 case DDF_RAID5:
4285 if (working < n_prim - 1)
4286 state = DDF_state_failed;
4287 break;
4288 case DDF_RAID6:
4289 if (working < n_prim - 2)
4290 state = DDF_state_failed;
4291 else if (working == n_prim - 1)
4292 state = DDF_state_part_optimal;
4293 break;
4294 }
4295 return state;
4296 }
4297
4298 static int secondary_state(int state, int other, int seclevel)
4299 {
4300 if (state == DDF_state_optimal && other == DDF_state_optimal)
4301 return DDF_state_optimal;
4302 if (seclevel == DDF_2MIRRORED) {
4303 if (state == DDF_state_optimal || other == DDF_state_optimal)
4304 return DDF_state_part_optimal;
4305 if (state == DDF_state_failed && other == DDF_state_failed)
4306 return DDF_state_failed;
4307 return DDF_state_degraded;
4308 } else {
4309 if (state == DDF_state_failed || other == DDF_state_failed)
4310 return DDF_state_failed;
4311 if (state == DDF_state_degraded || other == DDF_state_degraded)
4312 return DDF_state_degraded;
4313 return DDF_state_part_optimal;
4314 }
4315 }
4316
4317 static int get_svd_state(const struct ddf_super *ddf, const struct vcl *vcl)
4318 {
4319 int state = get_bvd_state(ddf, &vcl->conf);
4320 unsigned int i;
4321 for (i = 1; i < vcl->conf.sec_elmnt_count; i++) {
4322 state = secondary_state(
4323 state,
4324 get_bvd_state(ddf, vcl->other_bvds[i-1]),
4325 vcl->conf.srl);
4326 }
4327 return state;
4328 }
4329
4330 /*
4331 * The state of each disk is stored in the global phys_disk structure
4332 * in phys_disk.entries[n].state.
4333 * This makes various combinations awkward.
4334 * - When a device fails in any array, it must be failed in all arrays
4335 * that include a part of this device.
4336 * - When a component is rebuilding, we cannot include it officially in the
4337 * array unless this is the only array that uses the device.
4338 *
4339 * So: when transitioning:
4340 * Online -> failed, just set failed flag. monitor will propagate
4341 * spare -> online, the device might need to be added to the array.
4342 * spare -> failed, just set failed. Don't worry if in array or not.
4343 */
4344 static void ddf_set_disk(struct active_array *a, int n, int state)
4345 {
4346 struct ddf_super *ddf = a->container->sb;
4347 unsigned int inst = a->info.container_member, n_bvd;
4348 struct vcl *vcl;
4349 struct vd_config *vc = find_vdcr(ddf, inst, (unsigned int)n,
4350 &n_bvd, &vcl);
4351 int pd;
4352 struct mdinfo *mdi;
4353 struct dl *dl;
4354 int update = 0;
4355
4356 dprintf("%s: %d to %x\n", __func__, n, state);
4357 if (vc == NULL) {
4358 dprintf("ddf: cannot find instance %d!!\n", inst);
4359 return;
4360 }
4361 /* Find the matching slot in 'info'. */
4362 for (mdi = a->info.devs; mdi; mdi = mdi->next)
4363 if (mdi->disk.raid_disk == n)
4364 break;
4365 if (!mdi) {
4366 pr_err("%s: cannot find raid disk %d\n",
4367 __func__, n);
4368 return;
4369 }
4370
4371 /* and find the 'dl' entry corresponding to that. */
4372 for (dl = ddf->dlist; dl; dl = dl->next)
4373 if (mdi->state_fd >= 0 &&
4374 mdi->disk.major == dl->major &&
4375 mdi->disk.minor == dl->minor)
4376 break;
4377 if (!dl) {
4378 pr_err("%s: cannot find raid disk %d (%d/%d)\n",
4379 __func__, n,
4380 mdi->disk.major, mdi->disk.minor);
4381 return;
4382 }
4383
4384 pd = find_phys(ddf, vc->phys_refnum[n_bvd]);
4385 if (pd < 0 || pd != dl->pdnum) {
4386 /* disk doesn't currently exist or has changed.
4387 * If it is now in_sync, insert it. */
4388 dprintf("%s: phys disk not found for %d: %d/%d ref %08x\n",
4389 __func__, dl->pdnum, dl->major, dl->minor,
4390 be32_to_cpu(dl->disk.refnum));
4391 dprintf("%s: array %u disk %u ref %08x pd %d\n",
4392 __func__, inst, n_bvd,
4393 be32_to_cpu(vc->phys_refnum[n_bvd]), pd);
4394 if ((state & DS_INSYNC) && ! (state & DS_FAULTY) &&
4395 dl->pdnum >= 0) {
4396 pd = dl->pdnum;
4397 vc->phys_refnum[n_bvd] = dl->disk.refnum;
4398 LBA_OFFSET(ddf, vc)[n_bvd] =
4399 cpu_to_be64(mdi->data_offset);
4400 be16_clear(ddf->phys->entries[pd].type,
4401 cpu_to_be16(DDF_Global_Spare));
4402 be16_set(ddf->phys->entries[pd].type,
4403 cpu_to_be16(DDF_Active_in_VD));
4404 update = 1;
4405 }
4406 } else {
4407 be16 old = ddf->phys->entries[pd].state;
4408 if (state & DS_FAULTY)
4409 be16_set(ddf->phys->entries[pd].state,
4410 cpu_to_be16(DDF_Failed));
4411 if (state & DS_INSYNC) {
4412 be16_set(ddf->phys->entries[pd].state,
4413 cpu_to_be16(DDF_Online));
4414 be16_clear(ddf->phys->entries[pd].state,
4415 cpu_to_be16(DDF_Rebuilding));
4416 }
4417 if (!be16_eq(old, ddf->phys->entries[pd].state))
4418 update = 1;
4419 }
4420
4421 dprintf("ddf: set_disk %d (%08x) to %x->%02x\n", n,
4422 be32_to_cpu(dl->disk.refnum), state,
4423 be16_to_cpu(ddf->phys->entries[pd].state));
4424
4425 /* Now we need to check the state of the array and update
4426 * virtual_disk.entries[n].state.
4427 * It needs to be one of "optimal", "degraded", "failed".
4428 * I don't understand 'deleted' or 'missing'.
4429 */
4430 state = get_svd_state(ddf, vcl);
4431
4432 if (ddf->virt->entries[inst].state !=
4433 ((ddf->virt->entries[inst].state & ~DDF_state_mask)
4434 | state)) {
4435 ddf->virt->entries[inst].state =
4436 (ddf->virt->entries[inst].state & ~DDF_state_mask)
4437 | state;
4438 update = 1;
4439 }
4440 if (update)
4441 ddf_set_updates_pending(ddf, vc);
4442 }
4443
4444 static void ddf_sync_metadata(struct supertype *st)
4445 {
4446 /*
4447 * Write all data to all devices.
4448 * Later, we might be able to track whether only local changes
4449 * have been made, or whether any global data has been changed,
4450 * but ddf is sufficiently weird that it probably always
4451 * changes global data ....
4452 */
4453 struct ddf_super *ddf = st->sb;
4454 if (!ddf->updates_pending)
4455 return;
4456 ddf->updates_pending = 0;
4457 __write_init_super_ddf(st);
4458 dprintf("ddf: sync_metadata\n");
4459 }
4460
4461 static int del_from_conflist(struct vcl **list, const char *guid)
4462 {
4463 struct vcl **p;
4464 int found = 0;
4465 for (p = list; p && *p; p = &((*p)->next))
4466 if (!memcmp((*p)->conf.guid, guid, DDF_GUID_LEN)) {
4467 found = 1;
4468 *p = (*p)->next;
4469 }
4470 return found;
4471 }
4472
4473 static int _kill_subarray_ddf(struct ddf_super *ddf, const char *guid)
4474 {
4475 struct dl *dl;
4476 unsigned int vdnum, i;
4477 vdnum = find_vde_by_guid(ddf, guid);
4478 if (vdnum == DDF_NOTFOUND) {
4479 pr_err("%s: could not find VD %s\n", __func__,
4480 guid_str(guid));
4481 return -1;
4482 }
4483 if (del_from_conflist(&ddf->conflist, guid) == 0) {
4484 pr_err("%s: could not find conf %s\n", __func__,
4485 guid_str(guid));
4486 return -1;
4487 }
4488 for (dl = ddf->dlist; dl; dl = dl->next)
4489 for (i = 0; i < ddf->max_part; i++)
4490 if (dl->vlist[i] != NULL &&
4491 !memcmp(dl->vlist[i]->conf.guid, guid,
4492 DDF_GUID_LEN))
4493 dl->vlist[i] = NULL;
4494 memset(ddf->virt->entries[vdnum].guid, 0xff, DDF_GUID_LEN);
4495 dprintf("%s: deleted %s\n", __func__, guid_str(guid));
4496 return 0;
4497 }
4498
4499 static int kill_subarray_ddf(struct supertype *st)
4500 {
4501 struct ddf_super *ddf = st->sb;
4502 /*
4503 * currentconf is set in container_content_ddf,
4504 * called with subarray arg
4505 */
4506 struct vcl *victim = ddf->currentconf;
4507 struct vd_config *conf;
4508 unsigned int vdnum;
4509
4510 ddf->currentconf = NULL;
4511 if (!victim) {
4512 pr_err("%s: nothing to kill\n", __func__);
4513 return -1;
4514 }
4515 conf = &victim->conf;
4516 vdnum = find_vde_by_guid(ddf, conf->guid);
4517 if (vdnum == DDF_NOTFOUND) {
4518 pr_err("%s: could not find VD %s\n", __func__,
4519 guid_str(conf->guid));
4520 return -1;
4521 }
4522 if (st->update_tail) {
4523 struct virtual_disk *vd;
4524 int len = sizeof(struct virtual_disk)
4525 + sizeof(struct virtual_entry);
4526 vd = xmalloc(len);
4527 if (vd == NULL) {
4528 pr_err("%s: failed to allocate %d bytes\n", __func__,
4529 len);
4530 return -1;
4531 }
4532 memset(vd, 0 , len);
4533 vd->magic = DDF_VIRT_RECORDS_MAGIC;
4534 vd->populated_vdes = cpu_to_be16(0);
4535 memcpy(vd->entries[0].guid, conf->guid, DDF_GUID_LEN);
4536 /* we use DDF_state_deleted as marker */
4537 vd->entries[0].state = DDF_state_deleted;
4538 append_metadata_update(st, vd, len);
4539 } else {
4540 _kill_subarray_ddf(ddf, conf->guid);
4541 ddf_set_updates_pending(ddf, NULL);
4542 ddf_sync_metadata(st);
4543 }
4544 return 0;
4545 }
4546
4547 static void copy_matching_bvd(struct ddf_super *ddf,
4548 struct vd_config *conf,
4549 const struct metadata_update *update)
4550 {
4551 unsigned int mppe =
4552 be16_to_cpu(ddf->anchor.max_primary_element_entries);
4553 unsigned int len = ddf->conf_rec_len * 512;
4554 char *p;
4555 struct vd_config *vc;
4556 for (p = update->buf; p < update->buf + update->len; p += len) {
4557 vc = (struct vd_config *) p;
4558 if (vc->sec_elmnt_seq == conf->sec_elmnt_seq) {
4559 memcpy(conf->phys_refnum, vc->phys_refnum,
4560 mppe * (sizeof(__u32) + sizeof(__u64)));
4561 return;
4562 }
4563 }
4564 pr_err("%s: no match for BVD %d of %s in update\n", __func__,
4565 conf->sec_elmnt_seq, guid_str(conf->guid));
4566 }
4567
4568 static void ddf_process_phys_update(struct supertype *st,
4569 struct metadata_update *update)
4570 {
4571 struct ddf_super *ddf = st->sb;
4572 struct phys_disk *pd;
4573 unsigned int ent;
4574
4575 pd = (struct phys_disk*)update->buf;
4576 ent = be16_to_cpu(pd->used_pdes);
4577 if (ent >= be16_to_cpu(ddf->phys->max_pdes))
4578 return;
4579 if (be16_and(pd->entries[0].state, cpu_to_be16(DDF_Missing))) {
4580 struct dl **dlp;
4581 /* removing this disk. */
4582 be16_set(ddf->phys->entries[ent].state,
4583 cpu_to_be16(DDF_Missing));
4584 for (dlp = &ddf->dlist; *dlp; dlp = &(*dlp)->next) {
4585 struct dl *dl = *dlp;
4586 if (dl->pdnum == (signed)ent) {
4587 close(dl->fd);
4588 dl->fd = -1;
4589 *dlp = dl->next;
4590 update->space = dl->devname;
4591 *(void**)dl = update->space_list;
4592 update->space_list = (void**)dl;
4593 break;
4594 }
4595 }
4596 ddf_set_updates_pending(ddf, NULL);
4597 return;
4598 }
4599 if (!all_ff(ddf->phys->entries[ent].guid))
4600 return;
4601 ddf->phys->entries[ent] = pd->entries[0];
4602 ddf->phys->used_pdes = cpu_to_be16
4603 (1 + be16_to_cpu(ddf->phys->used_pdes));
4604 ddf_set_updates_pending(ddf, NULL);
4605 if (ddf->add_list) {
4606 struct active_array *a;
4607 struct dl *al = ddf->add_list;
4608 ddf->add_list = al->next;
4609
4610 al->next = ddf->dlist;
4611 ddf->dlist = al;
4612
4613 /* As a device has been added, we should check
4614 * for any degraded devices that might make
4615 * use of this spare */
4616 for (a = st->arrays ; a; a=a->next)
4617 a->check_degraded = 1;
4618 }
4619 }
4620
4621 static void ddf_process_virt_update(struct supertype *st,
4622 struct metadata_update *update)
4623 {
4624 struct ddf_super *ddf = st->sb;
4625 struct virtual_disk *vd;
4626 unsigned int ent;
4627
4628 vd = (struct virtual_disk*)update->buf;
4629
4630 if (vd->entries[0].state == DDF_state_deleted) {
4631 if (_kill_subarray_ddf(ddf, vd->entries[0].guid))
4632 return;
4633 } else {
4634 ent = find_vde_by_guid(ddf, vd->entries[0].guid);
4635 if (ent != DDF_NOTFOUND) {
4636 dprintf("%s: VD %s exists already in slot %d\n",
4637 __func__, guid_str(vd->entries[0].guid),
4638 ent);
4639 return;
4640 }
4641 ent = find_unused_vde(ddf);
4642 if (ent == DDF_NOTFOUND)
4643 return;
4644 ddf->virt->entries[ent] = vd->entries[0];
4645 ddf->virt->populated_vdes =
4646 cpu_to_be16(
4647 1 + be16_to_cpu(
4648 ddf->virt->populated_vdes));
4649 dprintf("%s: added VD %s in slot %d(s=%02x i=%02x)\n",
4650 __func__, guid_str(vd->entries[0].guid), ent,
4651 ddf->virt->entries[ent].state,
4652 ddf->virt->entries[ent].init_state);
4653 }
4654 ddf_set_updates_pending(ddf, NULL);
4655 }
4656
4657 static void ddf_remove_failed(struct ddf_super *ddf)
4658 {
4659 /* Now remove any 'Failed' devices that are not part
4660 * of any VD. They will have the Transition flag set.
4661 * Once done, we need to update all dl->pdnum numbers.
4662 */
4663 unsigned int pdnum;
4664 unsigned int pd2 = 0;
4665 struct dl *dl;
4666
4667 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4668 pdnum++) {
4669 if (be32_to_cpu(ddf->phys->entries[pdnum].refnum) ==
4670 0xFFFFFFFF)
4671 continue;
4672 if (be16_and(ddf->phys->entries[pdnum].state,
4673 cpu_to_be16(DDF_Failed))
4674 && be16_and(ddf->phys->entries[pdnum].state,
4675 cpu_to_be16(DDF_Transition))) {
4676 /* skip this one unless in dlist*/
4677 for (dl = ddf->dlist; dl; dl = dl->next)
4678 if (dl->pdnum == (int)pdnum)
4679 break;
4680 if (!dl)
4681 continue;
4682 }
4683 if (pdnum == pd2)
4684 pd2++;
4685 else {
4686 ddf->phys->entries[pd2] =
4687 ddf->phys->entries[pdnum];
4688 for (dl = ddf->dlist; dl; dl = dl->next)
4689 if (dl->pdnum == (int)pdnum)
4690 dl->pdnum = pd2;
4691 pd2++;
4692 }
4693 }
4694 ddf->phys->used_pdes = cpu_to_be16(pd2);
4695 while (pd2 < pdnum) {
4696 memset(ddf->phys->entries[pd2].guid, 0xff,
4697 DDF_GUID_LEN);
4698 pd2++;
4699 }
4700 }
4701
4702 static void ddf_update_vlist(struct ddf_super *ddf, struct dl *dl)
4703 {
4704 struct vcl *vcl;
4705 unsigned int vn = 0;
4706 int in_degraded = 0;
4707
4708 if (dl->pdnum < 0)
4709 return;
4710 for (vcl = ddf->conflist; vcl ; vcl = vcl->next) {
4711 unsigned int dn, ibvd;
4712 const struct vd_config *conf;
4713 int vstate;
4714 dn = get_pd_index_from_refnum(vcl,
4715 dl->disk.refnum,
4716 ddf->mppe,
4717 &conf, &ibvd);
4718 if (dn == DDF_NOTFOUND)
4719 continue;
4720 dprintf("dev %d/%08x has %s (sec=%u) at %d\n",
4721 dl->pdnum,
4722 be32_to_cpu(dl->disk.refnum),
4723 guid_str(conf->guid),
4724 conf->sec_elmnt_seq, vn);
4725 /* Clear the Transition flag */
4726 if (be16_and
4727 (ddf->phys->entries[dl->pdnum].state,
4728 cpu_to_be16(DDF_Failed)))
4729 be16_clear(ddf->phys
4730 ->entries[dl->pdnum].state,
4731 cpu_to_be16(DDF_Transition));
4732 dl->vlist[vn++] = vcl;
4733 vstate = ddf->virt->entries[vcl->vcnum].state
4734 & DDF_state_mask;
4735 if (vstate == DDF_state_degraded ||
4736 vstate == DDF_state_part_optimal)
4737 in_degraded = 1;
4738 }
4739 while (vn < ddf->max_part)
4740 dl->vlist[vn++] = NULL;
4741 if (dl->vlist[0]) {
4742 be16_clear(ddf->phys->entries[dl->pdnum].type,
4743 cpu_to_be16(DDF_Global_Spare));
4744 if (!be16_and(ddf->phys
4745 ->entries[dl->pdnum].type,
4746 cpu_to_be16(DDF_Active_in_VD))) {
4747 be16_set(ddf->phys
4748 ->entries[dl->pdnum].type,
4749 cpu_to_be16(DDF_Active_in_VD));
4750 if (in_degraded)
4751 be16_set(ddf->phys
4752 ->entries[dl->pdnum]
4753 .state,
4754 cpu_to_be16
4755 (DDF_Rebuilding));
4756 }
4757 }
4758 if (dl->spare) {
4759 be16_clear(ddf->phys->entries[dl->pdnum].type,
4760 cpu_to_be16(DDF_Global_Spare));
4761 be16_set(ddf->phys->entries[dl->pdnum].type,
4762 cpu_to_be16(DDF_Spare));
4763 }
4764 if (!dl->vlist[0] && !dl->spare) {
4765 be16_set(ddf->phys->entries[dl->pdnum].type,
4766 cpu_to_be16(DDF_Global_Spare));
4767 be16_clear(ddf->phys->entries[dl->pdnum].type,
4768 cpu_to_be16(DDF_Spare));
4769 be16_clear(ddf->phys->entries[dl->pdnum].type,
4770 cpu_to_be16(DDF_Active_in_VD));
4771 }
4772 }
4773
4774 static void ddf_process_conf_update(struct supertype *st,
4775 struct metadata_update *update)
4776 {
4777 struct ddf_super *ddf = st->sb;
4778 struct vd_config *vc;
4779 struct vcl *vcl;
4780 struct dl *dl;
4781 unsigned int ent;
4782 unsigned int pdnum, len;
4783
4784 vc = (struct vd_config*)update->buf;
4785 len = ddf->conf_rec_len * 512;
4786 if ((unsigned int)update->len != len * vc->sec_elmnt_count) {
4787 pr_err("%s: %s: insufficient data (%d) for %u BVDs\n",
4788 __func__, guid_str(vc->guid), update->len,
4789 vc->sec_elmnt_count);
4790 return;
4791 }
4792 for (vcl = ddf->conflist; vcl ; vcl = vcl->next)
4793 if (memcmp(vcl->conf.guid, vc->guid, DDF_GUID_LEN) == 0)
4794 break;
4795 dprintf("%s: conf update for %s (%s)\n", __func__,
4796 guid_str(vc->guid), (vcl ? "old" : "new"));
4797 if (vcl) {
4798 /* An update, just copy the phys_refnum and lba_offset
4799 * fields
4800 */
4801 unsigned int i;
4802 unsigned int k;
4803 copy_matching_bvd(ddf, &vcl->conf, update);
4804 for (k = 0; k < be16_to_cpu(vc->prim_elmnt_count); k++)
4805 dprintf("BVD %u has %08x at %llu\n", 0,
4806 be32_to_cpu(vcl->conf.phys_refnum[k]),
4807 be64_to_cpu(LBA_OFFSET(ddf,
4808 &vcl->conf)[k]));
4809 for (i = 1; i < vc->sec_elmnt_count; i++) {
4810 copy_matching_bvd(ddf, vcl->other_bvds[i-1],
4811 update);
4812 for (k = 0; k < be16_to_cpu(
4813 vc->prim_elmnt_count); k++)
4814 dprintf("BVD %u has %08x at %llu\n", i,
4815 be32_to_cpu
4816 (vcl->other_bvds[i-1]->
4817 phys_refnum[k]),
4818 be64_to_cpu
4819 (LBA_OFFSET
4820 (ddf,
4821 vcl->other_bvds[i-1])[k]));
4822 }
4823 } else {
4824 /* A new VD_CONF */
4825 unsigned int i;
4826 if (!update->space)
4827 return;
4828 vcl = update->space;
4829 update->space = NULL;
4830 vcl->next = ddf->conflist;
4831 memcpy(&vcl->conf, vc, len);
4832 ent = find_vde_by_guid(ddf, vc->guid);
4833 if (ent == DDF_NOTFOUND)
4834 return;
4835 vcl->vcnum = ent;
4836 ddf->conflist = vcl;
4837 for (i = 1; i < vc->sec_elmnt_count; i++)
4838 memcpy(vcl->other_bvds[i-1],
4839 update->buf + len * i, len);
4840 }
4841 /* Set DDF_Transition on all Failed devices - to help
4842 * us detect those that are no longer in use
4843 */
4844 for (pdnum = 0; pdnum < be16_to_cpu(ddf->phys->max_pdes);
4845 pdnum++)
4846 if (be16_and(ddf->phys->entries[pdnum].state,
4847 cpu_to_be16(DDF_Failed)))
4848 be16_set(ddf->phys->entries[pdnum].state,
4849 cpu_to_be16(DDF_Transition));
4850
4851 /* Now make sure vlist is correct for each dl. */
4852 for (dl = ddf->dlist; dl; dl = dl->next)
4853 ddf_update_vlist(ddf, dl);
4854 ddf_remove_failed(ddf);
4855
4856 ddf_set_updates_pending(ddf, vc);
4857 }
4858
4859 static void ddf_process_update(struct supertype *st,
4860 struct metadata_update *update)
4861 {
4862 /* Apply this update to the metadata.
4863 * The first 4 bytes are a DDF_*_MAGIC which guides
4864 * our actions.
4865 * Possible update are:
4866 * DDF_PHYS_RECORDS_MAGIC
4867 * Add a new physical device or remove an old one.
4868 * Changes to this record only happen implicitly.
4869 * used_pdes is the device number.
4870 * DDF_VIRT_RECORDS_MAGIC
4871 * Add a new VD. Possibly also change the 'access' bits.
4872 * populated_vdes is the entry number.
4873 * DDF_VD_CONF_MAGIC
4874 * New or updated VD. the VIRT_RECORD must already
4875 * exist. For an update, phys_refnum and lba_offset
4876 * (at least) are updated, and the VD_CONF must
4877 * be written to precisely those devices listed with
4878 * a phys_refnum.
4879 * DDF_SPARE_ASSIGN_MAGIC
4880 * replacement Spare Assignment Record... but for which device?
4881 *
4882 * So, e.g.:
4883 * - to create a new array, we send a VIRT_RECORD and
4884 * a VD_CONF. Then assemble and start the array.
4885 * - to activate a spare we send a VD_CONF to add the phys_refnum
4886 * and offset. This will also mark the spare as active with
4887 * a spare-assignment record.
4888 */
4889 be32 *magic = (be32 *)update->buf;
4890
4891 dprintf("Process update %x\n", be32_to_cpu(*magic));
4892
4893 if (be32_eq(*magic, DDF_PHYS_RECORDS_MAGIC)) {
4894 if (update->len == (sizeof(struct phys_disk) +
4895 sizeof(struct phys_disk_entry)))
4896 ddf_process_phys_update(st, update);
4897 } else if (be32_eq(*magic, DDF_VIRT_RECORDS_MAGIC)) {
4898 if (update->len == (sizeof(struct virtual_disk) +
4899 sizeof(struct virtual_entry)))
4900 ddf_process_virt_update(st, update);
4901 } else if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4902 ddf_process_conf_update(st, update);
4903 }
4904 /* case DDF_SPARE_ASSIGN_MAGIC */
4905 }
4906
4907 static void ddf_prepare_update(struct supertype *st,
4908 struct metadata_update *update)
4909 {
4910 /* This update arrived at managemon.
4911 * We are about to pass it to monitor.
4912 * If a malloc is needed, do it here.
4913 */
4914 struct ddf_super *ddf = st->sb;
4915 be32 *magic = (be32 *)update->buf;
4916 if (be32_eq(*magic, DDF_VD_CONF_MAGIC)) {
4917 struct vcl *vcl;
4918 struct vd_config *conf = (struct vd_config *) update->buf;
4919 if (posix_memalign(&update->space, 512,
4920 offsetof(struct vcl, conf)
4921 + ddf->conf_rec_len * 512) != 0) {
4922 update->space = NULL;
4923 return;
4924 }
4925 vcl = update->space;
4926 vcl->conf.sec_elmnt_count = conf->sec_elmnt_count;
4927 if (alloc_other_bvds(ddf, vcl) != 0) {
4928 free(update->space);
4929 update->space = NULL;
4930 }
4931 }
4932 }
4933
4934 /*
4935 * Check degraded state of a RAID10.
4936 * returns 2 for good, 1 for degraded, 0 for failed, and -1 for error
4937 */
4938 static int raid10_degraded(struct mdinfo *info)
4939 {
4940 int n_prim, n_bvds;
4941 int i;
4942 struct mdinfo *d;
4943 char *found;
4944 int ret = -1;
4945
4946 n_prim = info->array.layout & ~0x100;
4947 n_bvds = info->array.raid_disks / n_prim;
4948 found = xmalloc(n_bvds);
4949 if (found == NULL)
4950 return ret;
4951 memset(found, 0, n_bvds);
4952 for (d = info->devs; d; d = d->next) {
4953 i = d->disk.raid_disk / n_prim;
4954 if (i >= n_bvds) {
4955 pr_err("%s: BUG: invalid raid disk\n", __func__);
4956 goto out;
4957 }
4958 if (d->state_fd > 0)
4959 found[i]++;
4960 }
4961 ret = 2;
4962 for (i = 0; i < n_bvds; i++)
4963 if (!found[i]) {
4964 dprintf("%s: BVD %d/%d failed\n", __func__, i, n_bvds);
4965 ret = 0;
4966 goto out;
4967 } else if (found[i] < n_prim) {
4968 dprintf("%s: BVD %d/%d degraded\n", __func__, i,
4969 n_bvds);
4970 ret = 1;
4971 }
4972 out:
4973 free(found);
4974 return ret;
4975 }
4976
4977 /*
4978 * Check if the array 'a' is degraded but not failed.
4979 * If it is, find as many spares as are available and needed and
4980 * arrange for their inclusion.
4981 * We only choose devices which are not already in the array,
4982 * and prefer those with a spare-assignment to this array.
4983 * Otherwise we choose global spares - assuming always that
4984 * there is enough room.
4985 * For each spare that we assign, we return an 'mdinfo' which
4986 * describes the position for the device in the array.
4987 * We also add to 'updates' a DDF_VD_CONF_MAGIC update with
4988 * the new phys_refnum and lba_offset values.
4989 *
4990 * Only worry about BVDs at the moment.
4991 */
4992 static struct mdinfo *ddf_activate_spare(struct active_array *a,
4993 struct metadata_update **updates)
4994 {
4995 int working = 0;
4996 struct mdinfo *d;
4997 struct ddf_super *ddf = a->container->sb;
4998 int global_ok = 0;
4999 struct mdinfo *rv = NULL;
5000 struct mdinfo *di;
5001 struct metadata_update *mu;
5002 struct dl *dl;
5003 int i;
5004 unsigned int j;
5005 struct vcl *vcl;
5006 struct vd_config *vc;
5007 unsigned int n_bvd;
5008
5009 for (d = a->info.devs ; d ; d = d->next) {
5010 if ((d->curr_state & DS_FAULTY) &&
5011 d->state_fd >= 0)
5012 /* wait for Removal to happen */
5013 return NULL;
5014 if (d->state_fd >= 0)
5015 working ++;
5016 }
5017
5018 dprintf("%s: working=%d (%d) level=%d\n", __func__, working,
5019 a->info.array.raid_disks,
5020 a->info.array.level);
5021 if (working == a->info.array.raid_disks)
5022 return NULL; /* array not degraded */
5023 switch (a->info.array.level) {
5024 case 1:
5025 if (working == 0)
5026 return NULL; /* failed */
5027 break;
5028 case 4:
5029 case 5:
5030 if (working < a->info.array.raid_disks - 1)
5031 return NULL; /* failed */
5032 break;
5033 case 6:
5034 if (working < a->info.array.raid_disks - 2)
5035 return NULL; /* failed */
5036 break;
5037 case 10:
5038 if (raid10_degraded(&a->info) < 1)
5039 return NULL;
5040 break;
5041 default: /* concat or stripe */
5042 return NULL; /* failed */
5043 }
5044
5045 /* For each slot, if it is not working, find a spare */
5046 dl = ddf->dlist;
5047 for (i = 0; i < a->info.array.raid_disks; i++) {
5048 for (d = a->info.devs ; d ; d = d->next)
5049 if (d->disk.raid_disk == i)
5050 break;
5051 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
5052 if (d && (d->state_fd >= 0))
5053 continue;
5054
5055 /* OK, this device needs recovery. Find a spare */
5056 again:
5057 for ( ; dl ; dl = dl->next) {
5058 unsigned long long esize;
5059 unsigned long long pos;
5060 struct mdinfo *d2;
5061 int is_global = 0;
5062 int is_dedicated = 0;
5063 be16 state;
5064
5065 if (dl->pdnum < 0)
5066 continue;
5067 state = ddf->phys->entries[dl->pdnum].state;
5068 if (be16_and(state,
5069 cpu_to_be16(DDF_Failed|DDF_Missing)) ||
5070 !be16_and(state,
5071 cpu_to_be16(DDF_Online)))
5072 continue;
5073
5074 /* If in this array, skip */
5075 for (d2 = a->info.devs ; d2 ; d2 = d2->next)
5076 if (d2->state_fd >= 0 &&
5077 d2->disk.major == dl->major &&
5078 d2->disk.minor == dl->minor) {
5079 dprintf("%x:%x (%08x) already in array\n",
5080 dl->major, dl->minor,
5081 be32_to_cpu(dl->disk.refnum));
5082 break;
5083 }
5084 if (d2)
5085 continue;
5086 if (be16_and(ddf->phys->entries[dl->pdnum].type,
5087 cpu_to_be16(DDF_Spare))) {
5088 /* Check spare assign record */
5089 if (dl->spare) {
5090 if (dl->spare->type & DDF_spare_dedicated) {
5091 /* check spare_ents for guid */
5092 unsigned int j;
5093 for (j = 0 ;
5094 j < be16_to_cpu
5095 (dl->spare
5096 ->populated);
5097 j++) {
5098 if (memcmp(dl->spare->spare_ents[j].guid,
5099 ddf->virt->entries[a->info.container_member].guid,
5100 DDF_GUID_LEN) == 0)
5101 is_dedicated = 1;
5102 }
5103 } else
5104 is_global = 1;
5105 }
5106 } else if (be16_and(ddf->phys->entries[dl->pdnum].type,
5107 cpu_to_be16(DDF_Global_Spare))) {
5108 is_global = 1;
5109 } else if (!be16_and(ddf->phys
5110 ->entries[dl->pdnum].state,
5111 cpu_to_be16(DDF_Failed))) {
5112 /* we can possibly use some of this */
5113 is_global = 1;
5114 }
5115 if ( ! (is_dedicated ||
5116 (is_global && global_ok))) {
5117 dprintf("%x:%x not suitable: %d %d\n", dl->major, dl->minor,
5118 is_dedicated, is_global);
5119 continue;
5120 }
5121
5122 /* We are allowed to use this device - is there space?
5123 * We need a->info.component_size sectors */
5124 esize = a->info.component_size;
5125 pos = find_space(ddf, dl, INVALID_SECTORS, &esize);
5126
5127 if (esize < a->info.component_size) {
5128 dprintf("%x:%x has no room: %llu %llu\n",
5129 dl->major, dl->minor,
5130 esize, a->info.component_size);
5131 /* No room */
5132 continue;
5133 }
5134
5135 /* Cool, we have a device with some space at pos */
5136 di = xcalloc(1, sizeof(*di));
5137 di->disk.number = i;
5138 di->disk.raid_disk = i;
5139 di->disk.major = dl->major;
5140 di->disk.minor = dl->minor;
5141 di->disk.state = 0;
5142 di->recovery_start = 0;
5143 di->data_offset = pos;
5144 di->component_size = a->info.component_size;
5145 di->next = rv;
5146 rv = di;
5147 dprintf("%x:%x (%08x) to be %d at %llu\n",
5148 dl->major, dl->minor,
5149 be32_to_cpu(dl->disk.refnum), i, pos);
5150
5151 break;
5152 }
5153 if (!dl && ! global_ok) {
5154 /* not enough dedicated spares, try global */
5155 global_ok = 1;
5156 dl = ddf->dlist;
5157 goto again;
5158 }
5159 }
5160
5161 if (!rv)
5162 /* No spares found */
5163 return rv;
5164 /* Now 'rv' has a list of devices to return.
5165 * Create a metadata_update record to update the
5166 * phys_refnum and lba_offset values
5167 */
5168 vc = find_vdcr(ddf, a->info.container_member, rv->disk.raid_disk,
5169 &n_bvd, &vcl);
5170 if (vc == NULL)
5171 return NULL;
5172
5173 mu = xmalloc(sizeof(*mu));
5174 if (posix_memalign(&mu->space, 512, sizeof(struct vcl)) != 0) {
5175 free(mu);
5176 mu = NULL;
5177 }
5178
5179 mu->len = ddf->conf_rec_len * 512 * vcl->conf.sec_elmnt_count;
5180 mu->buf = xmalloc(mu->len);
5181 mu->space = NULL;
5182 mu->space_list = NULL;
5183 mu->next = *updates;
5184 memcpy(mu->buf, &vcl->conf, ddf->conf_rec_len * 512);
5185 for (j = 1; j < vcl->conf.sec_elmnt_count; j++)
5186 memcpy(mu->buf + j * ddf->conf_rec_len * 512,
5187 vcl->other_bvds[j-1], ddf->conf_rec_len * 512);
5188
5189 vc = (struct vd_config*)mu->buf;
5190 for (di = rv ; di ; di = di->next) {
5191 unsigned int i_sec, i_prim;
5192 i_sec = di->disk.raid_disk
5193 / be16_to_cpu(vcl->conf.prim_elmnt_count);
5194 i_prim = di->disk.raid_disk
5195 % be16_to_cpu(vcl->conf.prim_elmnt_count);
5196 vc = (struct vd_config *)(mu->buf
5197 + i_sec * ddf->conf_rec_len * 512);
5198 for (dl = ddf->dlist; dl; dl = dl->next)
5199 if (dl->major == di->disk.major
5200 && dl->minor == di->disk.minor)
5201 break;
5202 if (!dl || dl->pdnum < 0) {
5203 pr_err("%s: BUG: can't find disk %d (%d/%d)\n",
5204 __func__, di->disk.raid_disk,
5205 di->disk.major, di->disk.minor);
5206 return NULL;
5207 }
5208 vc->phys_refnum[i_prim] = ddf->phys->entries[dl->pdnum].refnum;
5209 LBA_OFFSET(ddf, vc)[i_prim] = cpu_to_be64(di->data_offset);
5210 dprintf("BVD %u gets %u: %08x at %llu\n", i_sec, i_prim,
5211 be32_to_cpu(vc->phys_refnum[i_prim]),
5212 be64_to_cpu(LBA_OFFSET(ddf, vc)[i_prim]));
5213 }
5214 *updates = mu;
5215 return rv;
5216 }
5217 #endif /* MDASSEMBLE */
5218
5219 static int ddf_level_to_layout(int level)
5220 {
5221 switch(level) {
5222 case 0:
5223 case 1:
5224 return 0;
5225 case 5:
5226 return ALGORITHM_LEFT_SYMMETRIC;
5227 case 6:
5228 return ALGORITHM_ROTATING_N_CONTINUE;
5229 case 10:
5230 return 0x102;
5231 default:
5232 return UnSet;
5233 }
5234 }
5235
5236 static void default_geometry_ddf(struct supertype *st, int *level, int *layout, int *chunk)
5237 {
5238 if (level && *level == UnSet)
5239 *level = LEVEL_CONTAINER;
5240
5241 if (level && layout && *layout == UnSet)
5242 *layout = ddf_level_to_layout(*level);
5243 }
5244
5245 struct superswitch super_ddf = {
5246 #ifndef MDASSEMBLE
5247 .examine_super = examine_super_ddf,
5248 .brief_examine_super = brief_examine_super_ddf,
5249 .brief_examine_subarrays = brief_examine_subarrays_ddf,
5250 .export_examine_super = export_examine_super_ddf,
5251 .detail_super = detail_super_ddf,
5252 .brief_detail_super = brief_detail_super_ddf,
5253 .validate_geometry = validate_geometry_ddf,
5254 .write_init_super = write_init_super_ddf,
5255 .add_to_super = add_to_super_ddf,
5256 .remove_from_super = remove_from_super_ddf,
5257 .load_container = load_container_ddf,
5258 .copy_metadata = copy_metadata_ddf,
5259 .kill_subarray = kill_subarray_ddf,
5260 #endif
5261 .match_home = match_home_ddf,
5262 .uuid_from_super= uuid_from_super_ddf,
5263 .getinfo_super = getinfo_super_ddf,
5264 .update_super = update_super_ddf,
5265
5266 .avail_size = avail_size_ddf,
5267
5268 .compare_super = compare_super_ddf,
5269
5270 .load_super = load_super_ddf,
5271 .init_super = init_super_ddf,
5272 .store_super = store_super_ddf,
5273 .free_super = free_super_ddf,
5274 .match_metadata_desc = match_metadata_desc_ddf,
5275 .container_content = container_content_ddf,
5276 .default_geometry = default_geometry_ddf,
5277
5278 .external = 1,
5279
5280 #ifndef MDASSEMBLE
5281 /* for mdmon */
5282 .open_new = ddf_open_new,
5283 .set_array_state= ddf_set_array_state,
5284 .set_disk = ddf_set_disk,
5285 .sync_metadata = ddf_sync_metadata,
5286 .process_update = ddf_process_update,
5287 .prepare_update = ddf_prepare_update,
5288 .activate_spare = ddf_activate_spare,
5289 #endif
5290 .name = "ddf",
5291 };