]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-intel.c
6e4a7d90d8e9676a7ba329790768af37922d4ccd
[thirdparty/mdadm.git] / super-intel.c
1 /*
2 * mdadm - Intel(R) Matrix Storage Manager Support
3 *
4 * Copyright (C) 2002-2008 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define HAVE_STDINT_H 1
21 #include "mdadm.h"
22 #include "mdmon.h"
23 #include "sha1.h"
24 #include "platform-intel.h"
25 #include <values.h>
26 #include <scsi/sg.h>
27 #include <ctype.h>
28 #include <dirent.h>
29
30 /* MPB == Metadata Parameter Block */
31 #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
32 #define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
33 #define MPB_VERSION_RAID0 "1.0.00"
34 #define MPB_VERSION_RAID1 "1.1.00"
35 #define MPB_VERSION_MANY_VOLUMES_PER_ARRAY "1.2.00"
36 #define MPB_VERSION_3OR4_DISK_ARRAY "1.2.01"
37 #define MPB_VERSION_RAID5 "1.2.02"
38 #define MPB_VERSION_5OR6_DISK_ARRAY "1.2.04"
39 #define MPB_VERSION_CNG "1.2.06"
40 #define MPB_VERSION_ATTRIBS "1.3.00"
41 #define MAX_SIGNATURE_LENGTH 32
42 #define MAX_RAID_SERIAL_LEN 16
43
44 #define MPB_ATTRIB_CHECKSUM_VERIFY __cpu_to_le32(0x80000000)
45 #define MPB_ATTRIB_PM __cpu_to_le32(0x40000000)
46 #define MPB_ATTRIB_2TB __cpu_to_le32(0x20000000)
47 #define MPB_ATTRIB_RAID0 __cpu_to_le32(0x00000001)
48 #define MPB_ATTRIB_RAID1 __cpu_to_le32(0x00000002)
49 #define MPB_ATTRIB_RAID10 __cpu_to_le32(0x00000004)
50 #define MPB_ATTRIB_RAID1E __cpu_to_le32(0x00000008)
51 #define MPB_ATTRIB_RAID5 __cpu_to_le32(0x00000010)
52 #define MPB_ATTRIB_RAIDCNG __cpu_to_le32(0x00000020)
53
54 #define MPB_SECTOR_CNT 418
55 #define IMSM_RESERVED_SECTORS 4096
56 #define SECT_PER_MB_SHIFT 11
57
58 /* Disk configuration info. */
59 #define IMSM_MAX_DEVICES 255
60 struct imsm_disk {
61 __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */
62 __u32 total_blocks; /* 0xE8 - 0xEB total blocks */
63 __u32 scsi_id; /* 0xEC - 0xEF scsi ID */
64 #define SPARE_DISK __cpu_to_le32(0x01) /* Spare */
65 #define CONFIGURED_DISK __cpu_to_le32(0x02) /* Member of some RaidDev */
66 #define FAILED_DISK __cpu_to_le32(0x04) /* Permanent failure */
67 #define USABLE_DISK __cpu_to_le32(0x08) /* Fully usable unless FAILED_DISK is set */
68 __u32 status; /* 0xF0 - 0xF3 */
69 __u32 owner_cfg_num; /* which config 0,1,2... owns this disk */
70 #define IMSM_DISK_FILLERS 4
71 __u32 filler[IMSM_DISK_FILLERS]; /* 0xF4 - 0x107 MPB_DISK_FILLERS for future expansion */
72 };
73
74 /* RAID map configuration infos. */
75 struct imsm_map {
76 __u32 pba_of_lba0; /* start address of partition */
77 __u32 blocks_per_member;/* blocks per member */
78 __u32 num_data_stripes; /* number of data stripes */
79 __u16 blocks_per_strip;
80 __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */
81 #define IMSM_T_STATE_NORMAL 0
82 #define IMSM_T_STATE_UNINITIALIZED 1
83 #define IMSM_T_STATE_DEGRADED 2
84 #define IMSM_T_STATE_FAILED 3
85 __u8 raid_level;
86 #define IMSM_T_RAID0 0
87 #define IMSM_T_RAID1 1
88 #define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
89 __u8 num_members; /* number of member disks */
90 __u8 num_domains; /* number of parity domains */
91 __u8 failed_disk_num; /* valid only when state is degraded */
92 __u8 ddf;
93 __u32 filler[7]; /* expansion area */
94 #define IMSM_ORD_REBUILD (1 << 24)
95 __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members],
96 * top byte contains some flags
97 */
98 } __attribute__ ((packed));
99
100 struct imsm_vol {
101 __u32 curr_migr_unit;
102 __u32 checkpoint_id; /* id to access curr_migr_unit */
103 __u8 migr_state; /* Normal or Migrating */
104 #define MIGR_INIT 0
105 #define MIGR_REBUILD 1
106 #define MIGR_VERIFY 2 /* analagous to echo check > sync_action */
107 #define MIGR_GEN_MIGR 3
108 #define MIGR_STATE_CHANGE 4
109 #define MIGR_REPAIR 5
110 __u8 migr_type; /* Initializing, Rebuilding, ... */
111 __u8 dirty;
112 __u8 fs_state; /* fast-sync state for CnG (0xff == disabled) */
113 __u16 verify_errors; /* number of mismatches */
114 __u16 bad_blocks; /* number of bad blocks during verify */
115 __u32 filler[4];
116 struct imsm_map map[1];
117 /* here comes another one if migr_state */
118 } __attribute__ ((packed));
119
120 struct imsm_dev {
121 __u8 volume[MAX_RAID_SERIAL_LEN];
122 __u32 size_low;
123 __u32 size_high;
124 #define DEV_BOOTABLE __cpu_to_le32(0x01)
125 #define DEV_BOOT_DEVICE __cpu_to_le32(0x02)
126 #define DEV_READ_COALESCING __cpu_to_le32(0x04)
127 #define DEV_WRITE_COALESCING __cpu_to_le32(0x08)
128 #define DEV_LAST_SHUTDOWN_DIRTY __cpu_to_le32(0x10)
129 #define DEV_HIDDEN_AT_BOOT __cpu_to_le32(0x20)
130 #define DEV_CURRENTLY_HIDDEN __cpu_to_le32(0x40)
131 #define DEV_VERIFY_AND_FIX __cpu_to_le32(0x80)
132 #define DEV_MAP_STATE_UNINIT __cpu_to_le32(0x100)
133 #define DEV_NO_AUTO_RECOVERY __cpu_to_le32(0x200)
134 #define DEV_CLONE_N_GO __cpu_to_le32(0x400)
135 #define DEV_CLONE_MAN_SYNC __cpu_to_le32(0x800)
136 #define DEV_CNG_MASTER_DISK_NUM __cpu_to_le32(0x1000)
137 __u32 status; /* Persistent RaidDev status */
138 __u32 reserved_blocks; /* Reserved blocks at beginning of volume */
139 __u8 migr_priority;
140 __u8 num_sub_vols;
141 __u8 tid;
142 __u8 cng_master_disk;
143 __u16 cache_policy;
144 __u8 cng_state;
145 __u8 cng_sub_state;
146 #define IMSM_DEV_FILLERS 10
147 __u32 filler[IMSM_DEV_FILLERS];
148 struct imsm_vol vol;
149 } __attribute__ ((packed));
150
151 struct imsm_super {
152 __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */
153 __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */
154 __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */
155 __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */
156 __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
157 __u32 error_log_size; /* 0x30 - 0x33 in bytes */
158 __u32 attributes; /* 0x34 - 0x37 */
159 __u8 num_disks; /* 0x38 Number of configured disks */
160 __u8 num_raid_devs; /* 0x39 Number of configured volumes */
161 __u8 error_log_pos; /* 0x3A */
162 __u8 fill[1]; /* 0x3B */
163 __u32 cache_size; /* 0x3c - 0x40 in mb */
164 __u32 orig_family_num; /* 0x40 - 0x43 original family num */
165 __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
166 __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
167 #define IMSM_FILLERS 35
168 __u32 filler[IMSM_FILLERS]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */
169 struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
170 /* here comes imsm_dev[num_raid_devs] */
171 /* here comes BBM logs */
172 } __attribute__ ((packed));
173
174 #define BBM_LOG_MAX_ENTRIES 254
175
176 struct bbm_log_entry {
177 __u64 defective_block_start;
178 #define UNREADABLE 0xFFFFFFFF
179 __u32 spare_block_offset;
180 __u16 remapped_marked_count;
181 __u16 disk_ordinal;
182 } __attribute__ ((__packed__));
183
184 struct bbm_log {
185 __u32 signature; /* 0xABADB10C */
186 __u32 entry_count;
187 __u32 reserved_spare_block_count; /* 0 */
188 __u32 reserved; /* 0xFFFF */
189 __u64 first_spare_lba;
190 struct bbm_log_entry mapped_block_entries[BBM_LOG_MAX_ENTRIES];
191 } __attribute__ ((__packed__));
192
193
194 #ifndef MDASSEMBLE
195 static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
196 #endif
197
198 static __u8 migr_type(struct imsm_dev *dev)
199 {
200 if (dev->vol.migr_type == MIGR_VERIFY &&
201 dev->status & DEV_VERIFY_AND_FIX)
202 return MIGR_REPAIR;
203 else
204 return dev->vol.migr_type;
205 }
206
207 static void set_migr_type(struct imsm_dev *dev, __u8 migr_type)
208 {
209 /* for compatibility with older oroms convert MIGR_REPAIR, into
210 * MIGR_VERIFY w/ DEV_VERIFY_AND_FIX status
211 */
212 if (migr_type == MIGR_REPAIR) {
213 dev->vol.migr_type = MIGR_VERIFY;
214 dev->status |= DEV_VERIFY_AND_FIX;
215 } else {
216 dev->vol.migr_type = migr_type;
217 dev->status &= ~DEV_VERIFY_AND_FIX;
218 }
219 }
220
221 static unsigned int sector_count(__u32 bytes)
222 {
223 return ((bytes + (512-1)) & (~(512-1))) / 512;
224 }
225
226 static unsigned int mpb_sectors(struct imsm_super *mpb)
227 {
228 return sector_count(__le32_to_cpu(mpb->mpb_size));
229 }
230
231 struct intel_dev {
232 struct imsm_dev *dev;
233 struct intel_dev *next;
234 int index;
235 };
236
237 /* internal representation of IMSM metadata */
238 struct intel_super {
239 union {
240 void *buf; /* O_DIRECT buffer for reading/writing metadata */
241 struct imsm_super *anchor; /* immovable parameters */
242 };
243 size_t len; /* size of the 'buf' allocation */
244 void *next_buf; /* for realloc'ing buf from the manager */
245 size_t next_len;
246 int updates_pending; /* count of pending updates for mdmon */
247 int creating_imsm; /* flag to indicate container creation */
248 int current_vol; /* index of raid device undergoing creation */
249 __u32 create_offset; /* common start for 'current_vol' */
250 __u32 random; /* random data for seeding new family numbers */
251 struct intel_dev *devlist;
252 struct dl {
253 struct dl *next;
254 int index;
255 __u8 serial[MAX_RAID_SERIAL_LEN];
256 int major, minor;
257 char *devname;
258 struct imsm_disk disk;
259 int fd;
260 int extent_cnt;
261 struct extent *e; /* for determining freespace @ create */
262 int raiddisk; /* slot to fill in autolayout */
263 } *disks;
264 struct dl *add; /* list of disks to add while mdmon active */
265 struct dl *missing; /* disks removed while we weren't looking */
266 struct bbm_log *bbm_log;
267 const char *hba; /* device path of the raid controller for this metadata */
268 const struct imsm_orom *orom; /* platform firmware support */
269 };
270
271 struct extent {
272 unsigned long long start, size;
273 };
274
275 /* definition of messages passed to imsm_process_update */
276 enum imsm_update_type {
277 update_activate_spare,
278 update_create_array,
279 update_add_disk,
280 };
281
282 struct imsm_update_activate_spare {
283 enum imsm_update_type type;
284 struct dl *dl;
285 int slot;
286 int array;
287 struct imsm_update_activate_spare *next;
288 };
289
290 struct disk_info {
291 __u8 serial[MAX_RAID_SERIAL_LEN];
292 };
293
294 struct imsm_update_create_array {
295 enum imsm_update_type type;
296 int dev_idx;
297 struct imsm_dev dev;
298 };
299
300 struct imsm_update_add_disk {
301 enum imsm_update_type type;
302 };
303
304 static struct supertype *match_metadata_desc_imsm(char *arg)
305 {
306 struct supertype *st;
307
308 if (strcmp(arg, "imsm") != 0 &&
309 strcmp(arg, "default") != 0
310 )
311 return NULL;
312
313 st = malloc(sizeof(*st));
314 memset(st, 0, sizeof(*st));
315 st->ss = &super_imsm;
316 st->max_devs = IMSM_MAX_DEVICES;
317 st->minor_version = 0;
318 st->sb = NULL;
319 return st;
320 }
321
322 #ifndef MDASSEMBLE
323 static __u8 *get_imsm_version(struct imsm_super *mpb)
324 {
325 return &mpb->sig[MPB_SIG_LEN];
326 }
327 #endif
328
329 /* retrieve a disk directly from the anchor when the anchor is known to be
330 * up-to-date, currently only at load time
331 */
332 static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index)
333 {
334 if (index >= mpb->num_disks)
335 return NULL;
336 return &mpb->disk[index];
337 }
338
339 #ifndef MDASSEMBLE
340 /* retrieve a disk from the parsed metadata */
341 static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index)
342 {
343 struct dl *d;
344
345 for (d = super->disks; d; d = d->next)
346 if (d->index == index)
347 return &d->disk;
348
349 return NULL;
350 }
351 #endif
352
353 /* generate a checksum directly from the anchor when the anchor is known to be
354 * up-to-date, currently only at load or write_super after coalescing
355 */
356 static __u32 __gen_imsm_checksum(struct imsm_super *mpb)
357 {
358 __u32 end = mpb->mpb_size / sizeof(end);
359 __u32 *p = (__u32 *) mpb;
360 __u32 sum = 0;
361
362 while (end--) {
363 sum += __le32_to_cpu(*p);
364 p++;
365 }
366
367 return sum - __le32_to_cpu(mpb->check_sum);
368 }
369
370 static size_t sizeof_imsm_map(struct imsm_map *map)
371 {
372 return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1);
373 }
374
375 struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
376 {
377 struct imsm_map *map = &dev->vol.map[0];
378
379 if (second_map && !dev->vol.migr_state)
380 return NULL;
381 else if (second_map) {
382 void *ptr = map;
383
384 return ptr + sizeof_imsm_map(map);
385 } else
386 return map;
387
388 }
389
390 /* return the size of the device.
391 * migr_state increases the returned size if map[0] were to be duplicated
392 */
393 static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state)
394 {
395 size_t size = sizeof(*dev) - sizeof(struct imsm_map) +
396 sizeof_imsm_map(get_imsm_map(dev, 0));
397
398 /* migrating means an additional map */
399 if (dev->vol.migr_state)
400 size += sizeof_imsm_map(get_imsm_map(dev, 1));
401 else if (migr_state)
402 size += sizeof_imsm_map(get_imsm_map(dev, 0));
403
404 return size;
405 }
406
407 #ifndef MDASSEMBLE
408 /* retrieve disk serial number list from a metadata update */
409 static struct disk_info *get_disk_info(struct imsm_update_create_array *update)
410 {
411 void *u = update;
412 struct disk_info *inf;
413
414 inf = u + sizeof(*update) - sizeof(struct imsm_dev) +
415 sizeof_imsm_dev(&update->dev, 0);
416
417 return inf;
418 }
419 #endif
420
421 static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index)
422 {
423 int offset;
424 int i;
425 void *_mpb = mpb;
426
427 if (index >= mpb->num_raid_devs)
428 return NULL;
429
430 /* devices start after all disks */
431 offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb;
432
433 for (i = 0; i <= index; i++)
434 if (i == index)
435 return _mpb + offset;
436 else
437 offset += sizeof_imsm_dev(_mpb + offset, 0);
438
439 return NULL;
440 }
441
442 static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index)
443 {
444 struct intel_dev *dv;
445
446 if (index >= super->anchor->num_raid_devs)
447 return NULL;
448 for (dv = super->devlist; dv; dv = dv->next)
449 if (dv->index == index)
450 return dv->dev;
451 return NULL;
452 }
453
454 static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev, int slot)
455 {
456 struct imsm_map *map;
457
458 if (dev->vol.migr_state)
459 map = get_imsm_map(dev, 1);
460 else
461 map = get_imsm_map(dev, 0);
462
463 /* top byte identifies disk under rebuild */
464 return __le32_to_cpu(map->disk_ord_tbl[slot]);
465 }
466
467 #define ord_to_idx(ord) (((ord) << 8) >> 8)
468 static __u32 get_imsm_disk_idx(struct imsm_dev *dev, int slot)
469 {
470 __u32 ord = get_imsm_ord_tbl_ent(dev, slot);
471
472 return ord_to_idx(ord);
473 }
474
475 static void set_imsm_ord_tbl_ent(struct imsm_map *map, int slot, __u32 ord)
476 {
477 map->disk_ord_tbl[slot] = __cpu_to_le32(ord);
478 }
479
480 static int get_imsm_disk_slot(struct imsm_map *map, int idx)
481 {
482 int slot;
483 __u32 ord;
484
485 for (slot = 0; slot < map->num_members; slot++) {
486 ord = __le32_to_cpu(map->disk_ord_tbl[slot]);
487 if (ord_to_idx(ord) == idx)
488 return slot;
489 }
490
491 return -1;
492 }
493
494 static int get_imsm_raid_level(struct imsm_map *map)
495 {
496 if (map->raid_level == 1) {
497 if (map->num_members == 2)
498 return 1;
499 else
500 return 10;
501 }
502
503 return map->raid_level;
504 }
505
506 static int cmp_extent(const void *av, const void *bv)
507 {
508 const struct extent *a = av;
509 const struct extent *b = bv;
510 if (a->start < b->start)
511 return -1;
512 if (a->start > b->start)
513 return 1;
514 return 0;
515 }
516
517 static int count_memberships(struct dl *dl, struct intel_super *super)
518 {
519 int memberships = 0;
520 int i;
521
522 for (i = 0; i < super->anchor->num_raid_devs; i++) {
523 struct imsm_dev *dev = get_imsm_dev(super, i);
524 struct imsm_map *map = get_imsm_map(dev, 0);
525
526 if (get_imsm_disk_slot(map, dl->index) >= 0)
527 memberships++;
528 }
529
530 return memberships;
531 }
532
533 static struct extent *get_extents(struct intel_super *super, struct dl *dl)
534 {
535 /* find a list of used extents on the given physical device */
536 struct extent *rv, *e;
537 int i;
538 int memberships = count_memberships(dl, super);
539 __u32 reservation = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
540
541 rv = malloc(sizeof(struct extent) * (memberships + 1));
542 if (!rv)
543 return NULL;
544 e = rv;
545
546 for (i = 0; i < super->anchor->num_raid_devs; i++) {
547 struct imsm_dev *dev = get_imsm_dev(super, i);
548 struct imsm_map *map = get_imsm_map(dev, 0);
549
550 if (get_imsm_disk_slot(map, dl->index) >= 0) {
551 e->start = __le32_to_cpu(map->pba_of_lba0);
552 e->size = __le32_to_cpu(map->blocks_per_member);
553 e++;
554 }
555 }
556 qsort(rv, memberships, sizeof(*rv), cmp_extent);
557
558 /* determine the start of the metadata
559 * when no raid devices are defined use the default
560 * ...otherwise allow the metadata to truncate the value
561 * as is the case with older versions of imsm
562 */
563 if (memberships) {
564 struct extent *last = &rv[memberships - 1];
565 __u32 remainder;
566
567 remainder = __le32_to_cpu(dl->disk.total_blocks) -
568 (last->start + last->size);
569 /* round down to 1k block to satisfy precision of the kernel
570 * 'size' interface
571 */
572 remainder &= ~1UL;
573 /* make sure remainder is still sane */
574 if (remainder < ROUND_UP(super->len, 512) >> 9)
575 remainder = ROUND_UP(super->len, 512) >> 9;
576 if (reservation > remainder)
577 reservation = remainder;
578 }
579 e->start = __le32_to_cpu(dl->disk.total_blocks) - reservation;
580 e->size = 0;
581 return rv;
582 }
583
584 /* try to determine how much space is reserved for metadata from
585 * the last get_extents() entry, otherwise fallback to the
586 * default
587 */
588 static __u32 imsm_reserved_sectors(struct intel_super *super, struct dl *dl)
589 {
590 struct extent *e;
591 int i;
592 __u32 rv;
593
594 /* for spares just return a minimal reservation which will grow
595 * once the spare is picked up by an array
596 */
597 if (dl->index == -1)
598 return MPB_SECTOR_CNT;
599
600 e = get_extents(super, dl);
601 if (!e)
602 return MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
603
604 /* scroll to last entry */
605 for (i = 0; e[i].size; i++)
606 continue;
607
608 rv = __le32_to_cpu(dl->disk.total_blocks) - e[i].start;
609
610 free(e);
611
612 return rv;
613 }
614
615 #ifndef MDASSEMBLE
616 static void print_imsm_dev(struct imsm_dev *dev, char *uuid, int disk_idx)
617 {
618 __u64 sz;
619 int slot;
620 struct imsm_map *map = get_imsm_map(dev, 0);
621 __u32 ord;
622
623 printf("\n");
624 printf("[%.16s]:\n", dev->volume);
625 printf(" UUID : %s\n", uuid);
626 printf(" RAID Level : %d\n", get_imsm_raid_level(map));
627 printf(" Members : %d\n", map->num_members);
628 slot = get_imsm_disk_slot(map, disk_idx);
629 if (slot >= 0) {
630 ord = get_imsm_ord_tbl_ent(dev, slot);
631 printf(" This Slot : %d%s\n", slot,
632 ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : "");
633 } else
634 printf(" This Slot : ?\n");
635 sz = __le32_to_cpu(dev->size_high);
636 sz <<= 32;
637 sz += __le32_to_cpu(dev->size_low);
638 printf(" Array Size : %llu%s\n", (unsigned long long)sz,
639 human_size(sz * 512));
640 sz = __le32_to_cpu(map->blocks_per_member);
641 printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz,
642 human_size(sz * 512));
643 printf(" Sector Offset : %u\n",
644 __le32_to_cpu(map->pba_of_lba0));
645 printf(" Num Stripes : %u\n",
646 __le32_to_cpu(map->num_data_stripes));
647 printf(" Chunk Size : %u KiB\n",
648 __le16_to_cpu(map->blocks_per_strip) / 2);
649 printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
650 printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle\n");
651 if (dev->vol.migr_state) {
652 if (migr_type(dev) == MIGR_INIT)
653 printf(": initializing\n");
654 else if (migr_type(dev) == MIGR_REBUILD)
655 printf(": rebuilding\n");
656 else if (migr_type(dev) == MIGR_VERIFY)
657 printf(": check\n");
658 else if (migr_type(dev) == MIGR_GEN_MIGR)
659 printf(": general migration\n");
660 else if (migr_type(dev) == MIGR_STATE_CHANGE)
661 printf(": state change\n");
662 else if (migr_type(dev) == MIGR_REPAIR)
663 printf(": repair\n");
664 else
665 printf(": <unknown:%d>\n", migr_type(dev));
666 }
667 printf(" Map State : %s", map_state_str[map->map_state]);
668 if (dev->vol.migr_state) {
669 struct imsm_map *map = get_imsm_map(dev, 1);
670 printf(" <-- %s", map_state_str[map->map_state]);
671 }
672 printf("\n");
673 printf(" Dirty State : %s\n", dev->vol.dirty ? "dirty" : "clean");
674 }
675
676 static void print_imsm_disk(struct imsm_super *mpb, int index, __u32 reserved)
677 {
678 struct imsm_disk *disk = __get_imsm_disk(mpb, index);
679 char str[MAX_RAID_SERIAL_LEN + 1];
680 __u32 s;
681 __u64 sz;
682
683 if (index < 0)
684 return;
685
686 printf("\n");
687 snprintf(str, MAX_RAID_SERIAL_LEN + 1, "%s", disk->serial);
688 printf(" Disk%02d Serial : %s\n", index, str);
689 s = disk->status;
690 printf(" State :%s%s%s%s\n", s&SPARE_DISK ? " spare" : "",
691 s&CONFIGURED_DISK ? " active" : "",
692 s&FAILED_DISK ? " failed" : "",
693 s&USABLE_DISK ? " usable" : "");
694 printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id));
695 sz = __le32_to_cpu(disk->total_blocks) - reserved;
696 printf(" Usable Size : %llu%s\n", (unsigned long long)sz,
697 human_size(sz * 512));
698 }
699
700 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info);
701
702 static void examine_super_imsm(struct supertype *st, char *homehost)
703 {
704 struct intel_super *super = st->sb;
705 struct imsm_super *mpb = super->anchor;
706 char str[MAX_SIGNATURE_LENGTH];
707 int i;
708 struct mdinfo info;
709 char nbuf[64];
710 __u32 sum;
711 __u32 reserved = imsm_reserved_sectors(super, super->disks);
712
713
714 snprintf(str, MPB_SIG_LEN, "%s", mpb->sig);
715 printf(" Magic : %s\n", str);
716 snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
717 printf(" Version : %s\n", get_imsm_version(mpb));
718 printf(" Orig Family : %08x\n", __le32_to_cpu(mpb->orig_family_num));
719 printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
720 printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num));
721 getinfo_super_imsm(st, &info);
722 fname_from_uuid(st, &info, nbuf, ':');
723 printf(" UUID : %s\n", nbuf + 5);
724 sum = __le32_to_cpu(mpb->check_sum);
725 printf(" Checksum : %08x %s\n", sum,
726 __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect");
727 printf(" MPB Sectors : %d\n", mpb_sectors(mpb));
728 printf(" Disks : %d\n", mpb->num_disks);
729 printf(" RAID Devices : %d\n", mpb->num_raid_devs);
730 print_imsm_disk(mpb, super->disks->index, reserved);
731 if (super->bbm_log) {
732 struct bbm_log *log = super->bbm_log;
733
734 printf("\n");
735 printf("Bad Block Management Log:\n");
736 printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size));
737 printf(" Signature : %x\n", __le32_to_cpu(log->signature));
738 printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count));
739 printf(" Spare Blocks : %d\n", __le32_to_cpu(log->reserved_spare_block_count));
740 printf(" First Spare : %llx\n",
741 (unsigned long long) __le64_to_cpu(log->first_spare_lba));
742 }
743 for (i = 0; i < mpb->num_raid_devs; i++) {
744 struct mdinfo info;
745 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
746
747 super->current_vol = i;
748 getinfo_super_imsm(st, &info);
749 fname_from_uuid(st, &info, nbuf, ':');
750 print_imsm_dev(dev, nbuf + 5, super->disks->index);
751 }
752 for (i = 0; i < mpb->num_disks; i++) {
753 if (i == super->disks->index)
754 continue;
755 print_imsm_disk(mpb, i, reserved);
756 }
757 }
758
759 static void brief_examine_super_imsm(struct supertype *st, int verbose)
760 {
761 /* We just write a generic IMSM ARRAY entry */
762 struct mdinfo info;
763 char nbuf[64];
764 char nbuf1[64];
765 struct intel_super *super = st->sb;
766 int i;
767
768 if (!super->anchor->num_raid_devs) {
769 printf("ARRAY metadata=imsm\n");
770 return;
771 }
772
773 getinfo_super_imsm(st, &info);
774 fname_from_uuid(st, &info, nbuf, ':');
775 for (i = 0; i < super->anchor->num_raid_devs; i++) {
776 struct imsm_dev *dev = get_imsm_dev(super, i);
777
778 super->current_vol = i;
779 getinfo_super_imsm(st, &info);
780 fname_from_uuid(st, &info, nbuf1, ':');
781 printf("ARRAY /dev/md/%.16s container=%s member=%d UUID=%s\n",
782 dev->volume, nbuf + 5, i, nbuf1 + 5);
783 }
784 printf("ARRAY metadata=imsm UUID=%s\n", nbuf + 5);
785 }
786
787 static void export_examine_super_imsm(struct supertype *st)
788 {
789 struct intel_super *super = st->sb;
790 struct imsm_super *mpb = super->anchor;
791 struct mdinfo info;
792 char nbuf[64];
793
794 getinfo_super_imsm(st, &info);
795 fname_from_uuid(st, &info, nbuf, ':');
796 printf("MD_METADATA=imsm\n");
797 printf("MD_LEVEL=container\n");
798 printf("MD_UUID=%s\n", nbuf+5);
799 printf("MD_DEVICES=%u\n", mpb->num_disks);
800 }
801
802 static void detail_super_imsm(struct supertype *st, char *homehost)
803 {
804 struct mdinfo info;
805 char nbuf[64];
806
807 getinfo_super_imsm(st, &info);
808 fname_from_uuid(st, &info, nbuf, ':');
809 printf("\n UUID : %s\n", nbuf + 5);
810 }
811
812 static void brief_detail_super_imsm(struct supertype *st)
813 {
814 struct mdinfo info;
815 char nbuf[64];
816 getinfo_super_imsm(st, &info);
817 fname_from_uuid(st, &info, nbuf, ':');
818 printf(" UUID=%s", nbuf + 5);
819 }
820
821 static int imsm_read_serial(int fd, char *devname, __u8 *serial);
822 static void fd2devname(int fd, char *name);
823
824 static int imsm_enumerate_ports(const char *hba_path, int port_count, int host_base, int verbose)
825 {
826 /* dump an unsorted list of devices attached to ahci, as well as
827 * non-connected ports
828 */
829 int hba_len = strlen(hba_path) + 1;
830 struct dirent *ent;
831 DIR *dir;
832 char *path = NULL;
833 int err = 0;
834 unsigned long port_mask = (1 << port_count) - 1;
835
836 if (port_count > sizeof(port_mask) * 8) {
837 if (verbose)
838 fprintf(stderr, Name ": port_count %d out of range\n", port_count);
839 return 2;
840 }
841
842 /* scroll through /sys/dev/block looking for devices attached to
843 * this hba
844 */
845 dir = opendir("/sys/dev/block");
846 for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
847 int fd;
848 char model[64];
849 char vendor[64];
850 char buf[1024];
851 int major, minor;
852 char *device;
853 char *c;
854 int port;
855 int type;
856
857 if (sscanf(ent->d_name, "%d:%d", &major, &minor) != 2)
858 continue;
859 path = devt_to_devpath(makedev(major, minor));
860 if (!path)
861 continue;
862 if (!path_attached_to_hba(path, hba_path)) {
863 free(path);
864 path = NULL;
865 continue;
866 }
867
868 /* retrieve the scsi device type */
869 if (asprintf(&device, "/sys/dev/block/%d:%d/device/xxxxxxx", major, minor) < 0) {
870 if (verbose)
871 fprintf(stderr, Name ": failed to allocate 'device'\n");
872 err = 2;
873 break;
874 }
875 sprintf(device, "/sys/dev/block/%d:%d/device/type", major, minor);
876 if (load_sys(device, buf) != 0) {
877 if (verbose)
878 fprintf(stderr, Name ": failed to read device type for %s\n",
879 path);
880 err = 2;
881 free(device);
882 break;
883 }
884 type = strtoul(buf, NULL, 10);
885
886 /* if it's not a disk print the vendor and model */
887 if (!(type == 0 || type == 7 || type == 14)) {
888 vendor[0] = '\0';
889 model[0] = '\0';
890 sprintf(device, "/sys/dev/block/%d:%d/device/vendor", major, minor);
891 if (load_sys(device, buf) == 0) {
892 strncpy(vendor, buf, sizeof(vendor));
893 vendor[sizeof(vendor) - 1] = '\0';
894 c = (char *) &vendor[sizeof(vendor) - 1];
895 while (isspace(*c) || *c == '\0')
896 *c-- = '\0';
897
898 }
899 sprintf(device, "/sys/dev/block/%d:%d/device/model", major, minor);
900 if (load_sys(device, buf) == 0) {
901 strncpy(model, buf, sizeof(model));
902 model[sizeof(model) - 1] = '\0';
903 c = (char *) &model[sizeof(model) - 1];
904 while (isspace(*c) || *c == '\0')
905 *c-- = '\0';
906 }
907
908 if (vendor[0] && model[0])
909 sprintf(buf, "%.64s %.64s", vendor, model);
910 else
911 switch (type) { /* numbers from hald/linux/device.c */
912 case 1: sprintf(buf, "tape"); break;
913 case 2: sprintf(buf, "printer"); break;
914 case 3: sprintf(buf, "processor"); break;
915 case 4:
916 case 5: sprintf(buf, "cdrom"); break;
917 case 6: sprintf(buf, "scanner"); break;
918 case 8: sprintf(buf, "media_changer"); break;
919 case 9: sprintf(buf, "comm"); break;
920 case 12: sprintf(buf, "raid"); break;
921 default: sprintf(buf, "unknown");
922 }
923 } else
924 buf[0] = '\0';
925 free(device);
926
927 /* chop device path to 'host%d' and calculate the port number */
928 c = strchr(&path[hba_len], '/');
929 *c = '\0';
930 if (sscanf(&path[hba_len], "host%d", &port) == 1)
931 port -= host_base;
932 else {
933 if (verbose) {
934 *c = '/'; /* repair the full string */
935 fprintf(stderr, Name ": failed to determine port number for %s\n",
936 path);
937 }
938 err = 2;
939 break;
940 }
941
942 /* mark this port as used */
943 port_mask &= ~(1 << port);
944
945 /* print out the device information */
946 if (buf[0]) {
947 printf(" Port%d : - non-disk device (%s) -\n", port, buf);
948 continue;
949 }
950
951 fd = dev_open(ent->d_name, O_RDONLY);
952 if (fd < 0)
953 printf(" Port%d : - disk info unavailable -\n", port);
954 else {
955 fd2devname(fd, buf);
956 printf(" Port%d : %s", port, buf);
957 if (imsm_read_serial(fd, NULL, (__u8 *) buf) == 0)
958 printf(" (%s)\n", buf);
959 else
960 printf("()\n");
961 }
962 close(fd);
963 free(path);
964 path = NULL;
965 }
966 if (path)
967 free(path);
968 if (dir)
969 closedir(dir);
970 if (err == 0) {
971 int i;
972
973 for (i = 0; i < port_count; i++)
974 if (port_mask & (1 << i))
975 printf(" Port%d : - no device attached -\n", i);
976 }
977
978 return err;
979 }
980
981 static int detail_platform_imsm(int verbose, int enumerate_only)
982 {
983 /* There are two components to imsm platform support, the ahci SATA
984 * controller and the option-rom. To find the SATA controller we
985 * simply look in /sys/bus/pci/drivers/ahci to see if an ahci
986 * controller with the Intel vendor id is present. This approach
987 * allows mdadm to leverage the kernel's ahci detection logic, with the
988 * caveat that if ahci.ko is not loaded mdadm will not be able to
989 * detect platform raid capabilities. The option-rom resides in a
990 * platform "Adapter ROM". We scan for its signature to retrieve the
991 * platform capabilities. If raid support is disabled in the BIOS the
992 * option-rom capability structure will not be available.
993 */
994 const struct imsm_orom *orom;
995 struct sys_dev *list, *hba;
996 DIR *dir;
997 struct dirent *ent;
998 const char *hba_path;
999 int host_base = 0;
1000 int port_count = 0;
1001
1002 if (enumerate_only) {
1003 if (check_env("IMSM_NO_PLATFORM") || find_imsm_orom())
1004 return 0;
1005 return 2;
1006 }
1007
1008 list = find_driver_devices("pci", "ahci");
1009 for (hba = list; hba; hba = hba->next)
1010 if (devpath_to_vendor(hba->path) == 0x8086)
1011 break;
1012
1013 if (!hba) {
1014 if (verbose)
1015 fprintf(stderr, Name ": unable to find active ahci controller\n");
1016 free_sys_dev(&list);
1017 return 2;
1018 } else if (verbose)
1019 fprintf(stderr, Name ": found Intel SATA AHCI Controller\n");
1020 hba_path = hba->path;
1021 hba->path = NULL;
1022 free_sys_dev(&list);
1023
1024 orom = find_imsm_orom();
1025 if (!orom) {
1026 if (verbose)
1027 fprintf(stderr, Name ": imsm option-rom not found\n");
1028 return 2;
1029 }
1030
1031 printf(" Platform : Intel(R) Matrix Storage Manager\n");
1032 printf(" Version : %d.%d.%d.%d\n", orom->major_ver, orom->minor_ver,
1033 orom->hotfix_ver, orom->build);
1034 printf(" RAID Levels :%s%s%s%s%s\n",
1035 imsm_orom_has_raid0(orom) ? " raid0" : "",
1036 imsm_orom_has_raid1(orom) ? " raid1" : "",
1037 imsm_orom_has_raid1e(orom) ? " raid1e" : "",
1038 imsm_orom_has_raid10(orom) ? " raid10" : "",
1039 imsm_orom_has_raid5(orom) ? " raid5" : "");
1040 printf(" Chunk Sizes :%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
1041 imsm_orom_has_chunk(orom, 2) ? " 2k" : "",
1042 imsm_orom_has_chunk(orom, 4) ? " 4k" : "",
1043 imsm_orom_has_chunk(orom, 8) ? " 8k" : "",
1044 imsm_orom_has_chunk(orom, 16) ? " 16k" : "",
1045 imsm_orom_has_chunk(orom, 32) ? " 32k" : "",
1046 imsm_orom_has_chunk(orom, 64) ? " 64k" : "",
1047 imsm_orom_has_chunk(orom, 128) ? " 128k" : "",
1048 imsm_orom_has_chunk(orom, 256) ? " 256k" : "",
1049 imsm_orom_has_chunk(orom, 512) ? " 512k" : "",
1050 imsm_orom_has_chunk(orom, 1024*1) ? " 1M" : "",
1051 imsm_orom_has_chunk(orom, 1024*2) ? " 2M" : "",
1052 imsm_orom_has_chunk(orom, 1024*4) ? " 4M" : "",
1053 imsm_orom_has_chunk(orom, 1024*8) ? " 8M" : "",
1054 imsm_orom_has_chunk(orom, 1024*16) ? " 16M" : "",
1055 imsm_orom_has_chunk(orom, 1024*32) ? " 32M" : "",
1056 imsm_orom_has_chunk(orom, 1024*64) ? " 64M" : "");
1057 printf(" Max Disks : %d\n", orom->tds);
1058 printf(" Max Volumes : %d\n", orom->vpa);
1059 printf(" I/O Controller : %s\n", hba_path);
1060
1061 /* find the smallest scsi host number to determine a port number base */
1062 dir = opendir(hba_path);
1063 for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
1064 int host;
1065
1066 if (sscanf(ent->d_name, "host%d", &host) != 1)
1067 continue;
1068 if (port_count == 0)
1069 host_base = host;
1070 else if (host < host_base)
1071 host_base = host;
1072
1073 if (host + 1 > port_count + host_base)
1074 port_count = host + 1 - host_base;
1075
1076 }
1077 if (dir)
1078 closedir(dir);
1079
1080 if (!port_count || imsm_enumerate_ports(hba_path, port_count,
1081 host_base, verbose) != 0) {
1082 if (verbose)
1083 fprintf(stderr, Name ": failed to enumerate ports\n");
1084 return 2;
1085 }
1086
1087 return 0;
1088 }
1089 #endif
1090
1091 static int match_home_imsm(struct supertype *st, char *homehost)
1092 {
1093 /* the imsm metadata format does not specify any host
1094 * identification information. We return -1 since we can never
1095 * confirm nor deny whether a given array is "meant" for this
1096 * host. We rely on compare_super and the 'family_num' fields to
1097 * exclude member disks that do not belong, and we rely on
1098 * mdadm.conf to specify the arrays that should be assembled.
1099 * Auto-assembly may still pick up "foreign" arrays.
1100 */
1101
1102 return -1;
1103 }
1104
1105 static void uuid_from_super_imsm(struct supertype *st, int uuid[4])
1106 {
1107 /* The uuid returned here is used for:
1108 * uuid to put into bitmap file (Create, Grow)
1109 * uuid for backup header when saving critical section (Grow)
1110 * comparing uuids when re-adding a device into an array
1111 * In these cases the uuid required is that of the data-array,
1112 * not the device-set.
1113 * uuid to recognise same set when adding a missing device back
1114 * to an array. This is a uuid for the device-set.
1115 *
1116 * For each of these we can make do with a truncated
1117 * or hashed uuid rather than the original, as long as
1118 * everyone agrees.
1119 * In each case the uuid required is that of the data-array,
1120 * not the device-set.
1121 */
1122 /* imsm does not track uuid's so we synthesis one using sha1 on
1123 * - The signature (Which is constant for all imsm array, but no matter)
1124 * - the orig_family_num of the container
1125 * - the index number of the volume
1126 * - the 'serial' number of the volume.
1127 * Hopefully these are all constant.
1128 */
1129 struct intel_super *super = st->sb;
1130
1131 char buf[20];
1132 struct sha1_ctx ctx;
1133 struct imsm_dev *dev = NULL;
1134 __u32 family_num;
1135
1136 /* some mdadm versions failed to set ->orig_family_num, in which
1137 * case fall back to ->family_num. orig_family_num will be
1138 * fixed up with the first metadata update.
1139 */
1140 family_num = super->anchor->orig_family_num;
1141 if (family_num == 0)
1142 family_num = super->anchor->family_num;
1143 sha1_init_ctx(&ctx);
1144 sha1_process_bytes(super->anchor->sig, MPB_SIG_LEN, &ctx);
1145 sha1_process_bytes(&family_num, sizeof(__u32), &ctx);
1146 if (super->current_vol >= 0)
1147 dev = get_imsm_dev(super, super->current_vol);
1148 if (dev) {
1149 __u32 vol = super->current_vol;
1150 sha1_process_bytes(&vol, sizeof(vol), &ctx);
1151 sha1_process_bytes(dev->volume, MAX_RAID_SERIAL_LEN, &ctx);
1152 }
1153 sha1_finish_ctx(&ctx, buf);
1154 memcpy(uuid, buf, 4*4);
1155 }
1156
1157 #if 0
1158 static void
1159 get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p)
1160 {
1161 __u8 *v = get_imsm_version(mpb);
1162 __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH;
1163 char major[] = { 0, 0, 0 };
1164 char minor[] = { 0 ,0, 0 };
1165 char patch[] = { 0, 0, 0 };
1166 char *ver_parse[] = { major, minor, patch };
1167 int i, j;
1168
1169 i = j = 0;
1170 while (*v != '\0' && v < end) {
1171 if (*v != '.' && j < 2)
1172 ver_parse[i][j++] = *v;
1173 else {
1174 i++;
1175 j = 0;
1176 }
1177 v++;
1178 }
1179
1180 *m = strtol(minor, NULL, 0);
1181 *p = strtol(patch, NULL, 0);
1182 }
1183 #endif
1184
1185 static int imsm_level_to_layout(int level)
1186 {
1187 switch (level) {
1188 case 0:
1189 case 1:
1190 return 0;
1191 case 5:
1192 case 6:
1193 return ALGORITHM_LEFT_ASYMMETRIC;
1194 case 10:
1195 return 0x102;
1196 }
1197 return UnSet;
1198 }
1199
1200 static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info)
1201 {
1202 struct intel_super *super = st->sb;
1203 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
1204 struct imsm_map *map = get_imsm_map(dev, 0);
1205 struct dl *dl;
1206
1207 for (dl = super->disks; dl; dl = dl->next)
1208 if (dl->raiddisk == info->disk.raid_disk)
1209 break;
1210 info->container_member = super->current_vol;
1211 info->array.raid_disks = map->num_members;
1212 info->array.level = get_imsm_raid_level(map);
1213 info->array.layout = imsm_level_to_layout(info->array.level);
1214 info->array.md_minor = -1;
1215 info->array.ctime = 0;
1216 info->array.utime = 0;
1217 info->array.chunk_size = __le16_to_cpu(map->blocks_per_strip) << 9;
1218 info->array.state = !dev->vol.dirty;
1219 info->custom_array_size = __le32_to_cpu(dev->size_high);
1220 info->custom_array_size <<= 32;
1221 info->custom_array_size |= __le32_to_cpu(dev->size_low);
1222
1223 info->disk.major = 0;
1224 info->disk.minor = 0;
1225 if (dl) {
1226 info->disk.major = dl->major;
1227 info->disk.minor = dl->minor;
1228 }
1229
1230 info->data_offset = __le32_to_cpu(map->pba_of_lba0);
1231 info->component_size = __le32_to_cpu(map->blocks_per_member);
1232 memset(info->uuid, 0, sizeof(info->uuid));
1233
1234 if (map->map_state == IMSM_T_STATE_UNINITIALIZED || dev->vol.dirty)
1235 info->resync_start = 0;
1236 else if (dev->vol.migr_state)
1237 /* FIXME add curr_migr_unit to resync_start conversion */
1238 info->resync_start = 0;
1239 else
1240 info->resync_start = ~0ULL;
1241
1242 strncpy(info->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
1243 info->name[MAX_RAID_SERIAL_LEN] = 0;
1244
1245 info->array.major_version = -1;
1246 info->array.minor_version = -2;
1247 sprintf(info->text_version, "/%s/%d",
1248 devnum2devname(st->container_dev),
1249 info->container_member);
1250 info->safe_mode_delay = 4000; /* 4 secs like the Matrix driver */
1251 uuid_from_super_imsm(st, info->uuid);
1252 }
1253
1254 /* check the config file to see if we can return a real uuid for this spare */
1255 static void fixup_container_spare_uuid(struct mdinfo *inf)
1256 {
1257 struct mddev_ident_s *array_list;
1258
1259 if (inf->array.level != LEVEL_CONTAINER ||
1260 memcmp(inf->uuid, uuid_match_any, sizeof(int[4])) != 0)
1261 return;
1262
1263 array_list = conf_get_ident(NULL);
1264
1265 for (; array_list; array_list = array_list->next) {
1266 if (array_list->uuid_set) {
1267 struct supertype *_sst; /* spare supertype */
1268 struct supertype *_cst; /* container supertype */
1269
1270 _cst = array_list->st;
1271 _sst = _cst->ss->match_metadata_desc(inf->text_version);
1272 if (_sst) {
1273 memcpy(inf->uuid, array_list->uuid, sizeof(int[4]));
1274 free(_sst);
1275 break;
1276 }
1277 }
1278 }
1279 }
1280
1281 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info)
1282 {
1283 struct intel_super *super = st->sb;
1284 struct imsm_disk *disk;
1285 __u32 s;
1286
1287 if (super->current_vol >= 0) {
1288 getinfo_super_imsm_volume(st, info);
1289 return;
1290 }
1291
1292 /* Set raid_disks to zero so that Assemble will always pull in valid
1293 * spares
1294 */
1295 info->array.raid_disks = 0;
1296 info->array.level = LEVEL_CONTAINER;
1297 info->array.layout = 0;
1298 info->array.md_minor = -1;
1299 info->array.ctime = 0; /* N/A for imsm */
1300 info->array.utime = 0;
1301 info->array.chunk_size = 0;
1302
1303 info->disk.major = 0;
1304 info->disk.minor = 0;
1305 info->disk.raid_disk = -1;
1306 info->reshape_active = 0;
1307 info->array.major_version = -1;
1308 info->array.minor_version = -2;
1309 strcpy(info->text_version, "imsm");
1310 info->safe_mode_delay = 0;
1311 info->disk.number = -1;
1312 info->disk.state = 0;
1313 info->name[0] = 0;
1314
1315 if (super->disks) {
1316 __u32 reserved = imsm_reserved_sectors(super, super->disks);
1317
1318 disk = &super->disks->disk;
1319 info->data_offset = __le32_to_cpu(disk->total_blocks) - reserved;
1320 info->component_size = reserved;
1321 s = disk->status;
1322 info->disk.state = s & CONFIGURED_DISK ? (1 << MD_DISK_ACTIVE) : 0;
1323 /* we don't change info->disk.raid_disk here because
1324 * this state will be finalized in mdmon after we have
1325 * found the 'most fresh' version of the metadata
1326 */
1327 info->disk.state |= s & FAILED_DISK ? (1 << MD_DISK_FAULTY) : 0;
1328 info->disk.state |= s & SPARE_DISK ? 0 : (1 << MD_DISK_SYNC);
1329 }
1330
1331 /* only call uuid_from_super_imsm when this disk is part of a populated container,
1332 * ->compare_super may have updated the 'num_raid_devs' field for spares
1333 */
1334 if (info->disk.state & (1 << MD_DISK_SYNC) || super->anchor->num_raid_devs)
1335 uuid_from_super_imsm(st, info->uuid);
1336 else {
1337 memcpy(info->uuid, uuid_match_any, sizeof(int[4]));
1338 fixup_container_spare_uuid(info);
1339 }
1340 }
1341
1342 static int update_super_imsm(struct supertype *st, struct mdinfo *info,
1343 char *update, char *devname, int verbose,
1344 int uuid_set, char *homehost)
1345 {
1346 /* FIXME */
1347
1348 /* For 'assemble' and 'force' we need to return non-zero if any
1349 * change was made. For others, the return value is ignored.
1350 * Update options are:
1351 * force-one : This device looks a bit old but needs to be included,
1352 * update age info appropriately.
1353 * assemble: clear any 'faulty' flag to allow this device to
1354 * be assembled.
1355 * force-array: Array is degraded but being forced, mark it clean
1356 * if that will be needed to assemble it.
1357 *
1358 * newdev: not used ????
1359 * grow: Array has gained a new device - this is currently for
1360 * linear only
1361 * resync: mark as dirty so a resync will happen.
1362 * name: update the name - preserving the homehost
1363 *
1364 * Following are not relevant for this imsm:
1365 * sparc2.2 : update from old dodgey metadata
1366 * super-minor: change the preferred_minor number
1367 * summaries: update redundant counters.
1368 * uuid: Change the uuid of the array to match watch is given
1369 * homehost: update the recorded homehost
1370 * _reshape_progress: record new reshape_progress position.
1371 */
1372 int rv = 0;
1373 //struct intel_super *super = st->sb;
1374 //struct imsm_super *mpb = super->mpb;
1375
1376 if (strcmp(update, "grow") == 0) {
1377 }
1378 if (strcmp(update, "resync") == 0) {
1379 /* dev->vol.dirty = 1; */
1380 }
1381
1382 /* IMSM has no concept of UUID or homehost */
1383
1384 return rv;
1385 }
1386
1387 static size_t disks_to_mpb_size(int disks)
1388 {
1389 size_t size;
1390
1391 size = sizeof(struct imsm_super);
1392 size += (disks - 1) * sizeof(struct imsm_disk);
1393 size += 2 * sizeof(struct imsm_dev);
1394 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
1395 size += (4 - 2) * sizeof(struct imsm_map);
1396 /* 4 possible disk_ord_tbl's */
1397 size += 4 * (disks - 1) * sizeof(__u32);
1398
1399 return size;
1400 }
1401
1402 static __u64 avail_size_imsm(struct supertype *st, __u64 devsize)
1403 {
1404 if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS))
1405 return 0;
1406
1407 return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
1408 }
1409
1410 static void free_devlist(struct intel_super *super)
1411 {
1412 struct intel_dev *dv;
1413
1414 while (super->devlist) {
1415 dv = super->devlist->next;
1416 free(super->devlist->dev);
1417 free(super->devlist);
1418 super->devlist = dv;
1419 }
1420 }
1421
1422 static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src)
1423 {
1424 memcpy(dest, src, sizeof_imsm_dev(src, 0));
1425 }
1426
1427 static int compare_super_imsm(struct supertype *st, struct supertype *tst)
1428 {
1429 /*
1430 * return:
1431 * 0 same, or first was empty, and second was copied
1432 * 1 second had wrong number
1433 * 2 wrong uuid
1434 * 3 wrong other info
1435 */
1436 struct intel_super *first = st->sb;
1437 struct intel_super *sec = tst->sb;
1438
1439 if (!first) {
1440 st->sb = tst->sb;
1441 tst->sb = NULL;
1442 return 0;
1443 }
1444
1445 if (memcmp(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH) != 0)
1446 return 3;
1447
1448 /* if an anchor does not have num_raid_devs set then it is a free
1449 * floating spare
1450 */
1451 if (first->anchor->num_raid_devs > 0 &&
1452 sec->anchor->num_raid_devs > 0) {
1453 if (first->anchor->orig_family_num != sec->anchor->orig_family_num ||
1454 first->anchor->family_num != sec->anchor->family_num)
1455 return 3;
1456 }
1457
1458 /* if 'first' is a spare promote it to a populated mpb with sec's
1459 * family number
1460 */
1461 if (first->anchor->num_raid_devs == 0 &&
1462 sec->anchor->num_raid_devs > 0) {
1463 int i;
1464 struct intel_dev *dv;
1465 struct imsm_dev *dev;
1466
1467 /* we need to copy raid device info from sec if an allocation
1468 * fails here we don't associate the spare
1469 */
1470 for (i = 0; i < sec->anchor->num_raid_devs; i++) {
1471 dv = malloc(sizeof(*dv));
1472 if (!dv)
1473 break;
1474 dev = malloc(sizeof_imsm_dev(get_imsm_dev(sec, i), 1));
1475 if (!dev) {
1476 free(dv);
1477 break;
1478 }
1479 dv->dev = dev;
1480 dv->index = i;
1481 dv->next = first->devlist;
1482 first->devlist = dv;
1483 }
1484 if (i <= sec->anchor->num_raid_devs) {
1485 /* allocation failure */
1486 free_devlist(first);
1487 fprintf(stderr, "imsm: failed to associate spare\n");
1488 return 3;
1489 }
1490 for (i = 0; i < sec->anchor->num_raid_devs; i++)
1491 imsm_copy_dev(get_imsm_dev(first, i), get_imsm_dev(sec, i));
1492
1493 first->anchor->num_raid_devs = sec->anchor->num_raid_devs;
1494 first->anchor->orig_family_num = sec->anchor->orig_family_num;
1495 first->anchor->family_num = sec->anchor->family_num;
1496 }
1497
1498 return 0;
1499 }
1500
1501 static void fd2devname(int fd, char *name)
1502 {
1503 struct stat st;
1504 char path[256];
1505 char dname[100];
1506 char *nm;
1507 int rv;
1508
1509 name[0] = '\0';
1510 if (fstat(fd, &st) != 0)
1511 return;
1512 sprintf(path, "/sys/dev/block/%d:%d",
1513 major(st.st_rdev), minor(st.st_rdev));
1514
1515 rv = readlink(path, dname, sizeof(dname));
1516 if (rv <= 0)
1517 return;
1518
1519 dname[rv] = '\0';
1520 nm = strrchr(dname, '/');
1521 nm++;
1522 snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
1523 }
1524
1525
1526 extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
1527
1528 static int imsm_read_serial(int fd, char *devname,
1529 __u8 serial[MAX_RAID_SERIAL_LEN])
1530 {
1531 unsigned char scsi_serial[255];
1532 int rv;
1533 int rsp_len;
1534 int len;
1535 char *dest;
1536 char *src;
1537 char *rsp_buf;
1538 int i;
1539
1540 memset(scsi_serial, 0, sizeof(scsi_serial));
1541
1542 rv = scsi_get_serial(fd, scsi_serial, sizeof(scsi_serial));
1543
1544 if (rv && check_env("IMSM_DEVNAME_AS_SERIAL")) {
1545 memset(serial, 0, MAX_RAID_SERIAL_LEN);
1546 fd2devname(fd, (char *) serial);
1547 return 0;
1548 }
1549
1550 if (rv != 0) {
1551 if (devname)
1552 fprintf(stderr,
1553 Name ": Failed to retrieve serial for %s\n",
1554 devname);
1555 return rv;
1556 }
1557
1558 rsp_len = scsi_serial[3];
1559 if (!rsp_len) {
1560 if (devname)
1561 fprintf(stderr,
1562 Name ": Failed to retrieve serial for %s\n",
1563 devname);
1564 return 2;
1565 }
1566 rsp_buf = (char *) &scsi_serial[4];
1567
1568 /* trim all whitespace and non-printable characters and convert
1569 * ':' to ';'
1570 */
1571 for (i = 0, dest = rsp_buf; i < rsp_len; i++) {
1572 src = &rsp_buf[i];
1573 if (*src > 0x20) {
1574 /* ':' is reserved for use in placeholder serial
1575 * numbers for missing disks
1576 */
1577 if (*src == ':')
1578 *dest++ = ';';
1579 else
1580 *dest++ = *src;
1581 }
1582 }
1583 len = dest - rsp_buf;
1584 dest = rsp_buf;
1585
1586 /* truncate leading characters */
1587 if (len > MAX_RAID_SERIAL_LEN) {
1588 dest += len - MAX_RAID_SERIAL_LEN;
1589 len = MAX_RAID_SERIAL_LEN;
1590 }
1591
1592 memset(serial, 0, MAX_RAID_SERIAL_LEN);
1593 memcpy(serial, dest, len);
1594
1595 return 0;
1596 }
1597
1598 static int serialcmp(__u8 *s1, __u8 *s2)
1599 {
1600 return strncmp((char *) s1, (char *) s2, MAX_RAID_SERIAL_LEN);
1601 }
1602
1603 static void serialcpy(__u8 *dest, __u8 *src)
1604 {
1605 strncpy((char *) dest, (char *) src, MAX_RAID_SERIAL_LEN);
1606 }
1607
1608 static struct dl *serial_to_dl(__u8 *serial, struct intel_super *super)
1609 {
1610 struct dl *dl;
1611
1612 for (dl = super->disks; dl; dl = dl->next)
1613 if (serialcmp(dl->serial, serial) == 0)
1614 break;
1615
1616 return dl;
1617 }
1618
1619 static int
1620 load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
1621 {
1622 struct dl *dl;
1623 struct stat stb;
1624 int rv;
1625 int i;
1626 int alloc = 1;
1627 __u8 serial[MAX_RAID_SERIAL_LEN];
1628
1629 rv = imsm_read_serial(fd, devname, serial);
1630
1631 if (rv != 0)
1632 return 2;
1633
1634 /* check if this is a disk we have seen before. it may be a spare in
1635 * super->disks while the current anchor believes it is a raid member,
1636 * check if we need to update dl->index
1637 */
1638 dl = serial_to_dl(serial, super);
1639 if (!dl)
1640 dl = malloc(sizeof(*dl));
1641 else
1642 alloc = 0;
1643
1644 if (!dl) {
1645 if (devname)
1646 fprintf(stderr,
1647 Name ": failed to allocate disk buffer for %s\n",
1648 devname);
1649 return 2;
1650 }
1651
1652 if (alloc) {
1653 fstat(fd, &stb);
1654 dl->major = major(stb.st_rdev);
1655 dl->minor = minor(stb.st_rdev);
1656 dl->next = super->disks;
1657 dl->fd = keep_fd ? fd : -1;
1658 dl->devname = devname ? strdup(devname) : NULL;
1659 serialcpy(dl->serial, serial);
1660 dl->index = -2;
1661 dl->e = NULL;
1662 } else if (keep_fd) {
1663 close(dl->fd);
1664 dl->fd = fd;
1665 }
1666
1667 /* look up this disk's index in the current anchor */
1668 for (i = 0; i < super->anchor->num_disks; i++) {
1669 struct imsm_disk *disk_iter;
1670
1671 disk_iter = __get_imsm_disk(super->anchor, i);
1672
1673 if (serialcmp(disk_iter->serial, dl->serial) == 0) {
1674 dl->disk = *disk_iter;
1675 /* only set index on disks that are a member of a
1676 * populated contianer, i.e. one with raid_devs
1677 */
1678 if (dl->disk.status & FAILED_DISK)
1679 dl->index = -2;
1680 else if (dl->disk.status & SPARE_DISK)
1681 dl->index = -1;
1682 else
1683 dl->index = i;
1684
1685 break;
1686 }
1687 }
1688
1689 /* no match, maybe a stale failed drive */
1690 if (i == super->anchor->num_disks && dl->index >= 0) {
1691 dl->disk = *__get_imsm_disk(super->anchor, dl->index);
1692 if (dl->disk.status & FAILED_DISK)
1693 dl->index = -2;
1694 }
1695
1696 if (alloc)
1697 super->disks = dl;
1698
1699 return 0;
1700 }
1701
1702 #ifndef MDASSEMBLE
1703 /* When migrating map0 contains the 'destination' state while map1
1704 * contains the current state. When not migrating map0 contains the
1705 * current state. This routine assumes that map[0].map_state is set to
1706 * the current array state before being called.
1707 *
1708 * Migration is indicated by one of the following states
1709 * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed)
1710 * 2/ Initialize (migr_state=1 migr_type=MIGR_INIT map0state=normal
1711 * map1state=unitialized)
1712 * 3/ Repair (Resync) (migr_state=1 migr_type=MIGR_REPAIR map0state=normal
1713 * map1state=normal)
1714 * 4/ Rebuild (migr_state=1 migr_type=MIGR_REBUILD map0state=normal
1715 * map1state=degraded)
1716 */
1717 static void migrate(struct imsm_dev *dev, __u8 to_state, int migr_type)
1718 {
1719 struct imsm_map *dest;
1720 struct imsm_map *src = get_imsm_map(dev, 0);
1721
1722 dev->vol.migr_state = 1;
1723 set_migr_type(dev, migr_type);
1724 dev->vol.curr_migr_unit = 0;
1725 dest = get_imsm_map(dev, 1);
1726
1727 /* duplicate and then set the target end state in map[0] */
1728 memcpy(dest, src, sizeof_imsm_map(src));
1729 if (migr_type == MIGR_REBUILD) {
1730 __u32 ord;
1731 int i;
1732
1733 for (i = 0; i < src->num_members; i++) {
1734 ord = __le32_to_cpu(src->disk_ord_tbl[i]);
1735 set_imsm_ord_tbl_ent(src, i, ord_to_idx(ord));
1736 }
1737 }
1738
1739 src->map_state = to_state;
1740 }
1741
1742 static void end_migration(struct imsm_dev *dev, __u8 map_state)
1743 {
1744 struct imsm_map *map = get_imsm_map(dev, 0);
1745 struct imsm_map *prev = get_imsm_map(dev, dev->vol.migr_state);
1746 int i;
1747
1748 /* merge any IMSM_ORD_REBUILD bits that were not successfully
1749 * completed in the last migration.
1750 *
1751 * FIXME add support for online capacity expansion and
1752 * raid-level-migration
1753 */
1754 for (i = 0; i < prev->num_members; i++)
1755 map->disk_ord_tbl[i] |= prev->disk_ord_tbl[i];
1756
1757 dev->vol.migr_state = 0;
1758 dev->vol.curr_migr_unit = 0;
1759 map->map_state = map_state;
1760 }
1761 #endif
1762
1763 static int parse_raid_devices(struct intel_super *super)
1764 {
1765 int i;
1766 struct imsm_dev *dev_new;
1767 size_t len, len_migr;
1768 size_t space_needed = 0;
1769 struct imsm_super *mpb = super->anchor;
1770
1771 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1772 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
1773 struct intel_dev *dv;
1774
1775 len = sizeof_imsm_dev(dev_iter, 0);
1776 len_migr = sizeof_imsm_dev(dev_iter, 1);
1777 if (len_migr > len)
1778 space_needed += len_migr - len;
1779
1780 dv = malloc(sizeof(*dv));
1781 if (!dv)
1782 return 1;
1783 dev_new = malloc(len_migr);
1784 if (!dev_new) {
1785 free(dv);
1786 return 1;
1787 }
1788 imsm_copy_dev(dev_new, dev_iter);
1789 dv->dev = dev_new;
1790 dv->index = i;
1791 dv->next = super->devlist;
1792 super->devlist = dv;
1793 }
1794
1795 /* ensure that super->buf is large enough when all raid devices
1796 * are migrating
1797 */
1798 if (__le32_to_cpu(mpb->mpb_size) + space_needed > super->len) {
1799 void *buf;
1800
1801 len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + space_needed, 512);
1802 if (posix_memalign(&buf, 512, len) != 0)
1803 return 1;
1804
1805 memcpy(buf, super->buf, super->len);
1806 memset(buf + super->len, 0, len - super->len);
1807 free(super->buf);
1808 super->buf = buf;
1809 super->len = len;
1810 }
1811
1812 return 0;
1813 }
1814
1815 /* retrieve a pointer to the bbm log which starts after all raid devices */
1816 struct bbm_log *__get_imsm_bbm_log(struct imsm_super *mpb)
1817 {
1818 void *ptr = NULL;
1819
1820 if (__le32_to_cpu(mpb->bbm_log_size)) {
1821 ptr = mpb;
1822 ptr += mpb->mpb_size - __le32_to_cpu(mpb->bbm_log_size);
1823 }
1824
1825 return ptr;
1826 }
1827
1828 static void __free_imsm(struct intel_super *super, int free_disks);
1829
1830 /* load_imsm_mpb - read matrix metadata
1831 * allocates super->mpb to be freed by free_super
1832 */
1833 static int load_imsm_mpb(int fd, struct intel_super *super, char *devname)
1834 {
1835 unsigned long long dsize;
1836 unsigned long long sectors;
1837 struct stat;
1838 struct imsm_super *anchor;
1839 __u32 check_sum;
1840 int rc;
1841
1842 get_dev_size(fd, NULL, &dsize);
1843
1844 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) {
1845 if (devname)
1846 fprintf(stderr,
1847 Name ": Cannot seek to anchor block on %s: %s\n",
1848 devname, strerror(errno));
1849 return 1;
1850 }
1851
1852 if (posix_memalign((void**)&anchor, 512, 512) != 0) {
1853 if (devname)
1854 fprintf(stderr,
1855 Name ": Failed to allocate imsm anchor buffer"
1856 " on %s\n", devname);
1857 return 1;
1858 }
1859 if (read(fd, anchor, 512) != 512) {
1860 if (devname)
1861 fprintf(stderr,
1862 Name ": Cannot read anchor block on %s: %s\n",
1863 devname, strerror(errno));
1864 free(anchor);
1865 return 1;
1866 }
1867
1868 if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) {
1869 if (devname)
1870 fprintf(stderr,
1871 Name ": no IMSM anchor on %s\n", devname);
1872 free(anchor);
1873 return 2;
1874 }
1875
1876 __free_imsm(super, 0);
1877 super->len = ROUND_UP(anchor->mpb_size, 512);
1878 if (posix_memalign(&super->buf, 512, super->len) != 0) {
1879 if (devname)
1880 fprintf(stderr,
1881 Name ": unable to allocate %zu byte mpb buffer\n",
1882 super->len);
1883 free(anchor);
1884 return 2;
1885 }
1886 memcpy(super->buf, anchor, 512);
1887
1888 sectors = mpb_sectors(anchor) - 1;
1889 free(anchor);
1890 if (!sectors) {
1891 check_sum = __gen_imsm_checksum(super->anchor);
1892 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
1893 if (devname)
1894 fprintf(stderr,
1895 Name ": IMSM checksum %x != %x on %s\n",
1896 check_sum,
1897 __le32_to_cpu(super->anchor->check_sum),
1898 devname);
1899 return 2;
1900 }
1901
1902 rc = load_imsm_disk(fd, super, devname, 0);
1903 if (rc == 0)
1904 rc = parse_raid_devices(super);
1905 return rc;
1906 }
1907
1908 /* read the extended mpb */
1909 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0) {
1910 if (devname)
1911 fprintf(stderr,
1912 Name ": Cannot seek to extended mpb on %s: %s\n",
1913 devname, strerror(errno));
1914 return 1;
1915 }
1916
1917 if (read(fd, super->buf + 512, super->len - 512) != super->len - 512) {
1918 if (devname)
1919 fprintf(stderr,
1920 Name ": Cannot read extended mpb on %s: %s\n",
1921 devname, strerror(errno));
1922 return 2;
1923 }
1924
1925 check_sum = __gen_imsm_checksum(super->anchor);
1926 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
1927 if (devname)
1928 fprintf(stderr,
1929 Name ": IMSM checksum %x != %x on %s\n",
1930 check_sum, __le32_to_cpu(super->anchor->check_sum),
1931 devname);
1932 return 3;
1933 }
1934
1935 /* FIXME the BBM log is disk specific so we cannot use this global
1936 * buffer for all disks. Ok for now since we only look at the global
1937 * bbm_log_size parameter to gate assembly
1938 */
1939 super->bbm_log = __get_imsm_bbm_log(super->anchor);
1940
1941 rc = load_imsm_disk(fd, super, devname, 0);
1942 if (rc == 0)
1943 rc = parse_raid_devices(super);
1944
1945 return rc;
1946 }
1947
1948 static void __free_imsm_disk(struct dl *d)
1949 {
1950 if (d->fd >= 0)
1951 close(d->fd);
1952 if (d->devname)
1953 free(d->devname);
1954 if (d->e)
1955 free(d->e);
1956 free(d);
1957
1958 }
1959 static void free_imsm_disks(struct intel_super *super)
1960 {
1961 struct dl *d;
1962
1963 while (super->disks) {
1964 d = super->disks;
1965 super->disks = d->next;
1966 __free_imsm_disk(d);
1967 }
1968 while (super->missing) {
1969 d = super->missing;
1970 super->missing = d->next;
1971 __free_imsm_disk(d);
1972 }
1973
1974 }
1975
1976 /* free all the pieces hanging off of a super pointer */
1977 static void __free_imsm(struct intel_super *super, int free_disks)
1978 {
1979 if (super->buf) {
1980 free(super->buf);
1981 super->buf = NULL;
1982 }
1983 if (free_disks)
1984 free_imsm_disks(super);
1985 free_devlist(super);
1986 if (super->hba) {
1987 free((void *) super->hba);
1988 super->hba = NULL;
1989 }
1990 }
1991
1992 static void free_imsm(struct intel_super *super)
1993 {
1994 __free_imsm(super, 1);
1995 free(super);
1996 }
1997
1998 static void free_super_imsm(struct supertype *st)
1999 {
2000 struct intel_super *super = st->sb;
2001
2002 if (!super)
2003 return;
2004
2005 free_imsm(super);
2006 st->sb = NULL;
2007 }
2008
2009 static struct intel_super *alloc_super(int creating_imsm)
2010 {
2011 struct intel_super *super = malloc(sizeof(*super));
2012
2013 if (super) {
2014 memset(super, 0, sizeof(*super));
2015 super->creating_imsm = creating_imsm;
2016 super->current_vol = -1;
2017 super->create_offset = ~((__u32 ) 0);
2018 if (!check_env("IMSM_NO_PLATFORM"))
2019 super->orom = find_imsm_orom();
2020 if (super->orom && !check_env("IMSM_TEST_OROM")) {
2021 struct sys_dev *list, *ent;
2022
2023 /* find the first intel ahci controller */
2024 list = find_driver_devices("pci", "ahci");
2025 for (ent = list; ent; ent = ent->next)
2026 if (devpath_to_vendor(ent->path) == 0x8086)
2027 break;
2028 if (ent) {
2029 super->hba = ent->path;
2030 ent->path = NULL;
2031 }
2032 free_sys_dev(&list);
2033 }
2034 }
2035
2036 return super;
2037 }
2038
2039 #ifndef MDASSEMBLE
2040 /* find_missing - helper routine for load_super_imsm_all that identifies
2041 * disks that have disappeared from the system. This routine relies on
2042 * the mpb being uptodate, which it is at load time.
2043 */
2044 static int find_missing(struct intel_super *super)
2045 {
2046 int i;
2047 struct imsm_super *mpb = super->anchor;
2048 struct dl *dl;
2049 struct imsm_disk *disk;
2050
2051 for (i = 0; i < mpb->num_disks; i++) {
2052 disk = __get_imsm_disk(mpb, i);
2053 dl = serial_to_dl(disk->serial, super);
2054 if (dl)
2055 continue;
2056
2057 dl = malloc(sizeof(*dl));
2058 if (!dl)
2059 return 1;
2060 dl->major = 0;
2061 dl->minor = 0;
2062 dl->fd = -1;
2063 dl->devname = strdup("missing");
2064 dl->index = i;
2065 serialcpy(dl->serial, disk->serial);
2066 dl->disk = *disk;
2067 dl->e = NULL;
2068 dl->next = super->missing;
2069 super->missing = dl;
2070 }
2071
2072 return 0;
2073 }
2074
2075 static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
2076 char *devname, int keep_fd)
2077 {
2078 struct mdinfo *sra;
2079 struct intel_super *super;
2080 struct mdinfo *sd, *best = NULL;
2081 __u32 bestgen = 0;
2082 __u32 gen;
2083 char nm[20];
2084 int dfd;
2085 int rv;
2086 int devnum = fd2devnum(fd);
2087 int retry;
2088 enum sysfs_read_flags flags;
2089
2090 flags = GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE;
2091 if (mdmon_running(devnum))
2092 flags |= SKIP_GONE_DEVS;
2093
2094 /* check if 'fd' an opened container */
2095 sra = sysfs_read(fd, 0, flags);
2096 if (!sra)
2097 return 1;
2098
2099 if (sra->array.major_version != -1 ||
2100 sra->array.minor_version != -2 ||
2101 strcmp(sra->text_version, "imsm") != 0)
2102 return 1;
2103
2104 super = alloc_super(0);
2105 if (!super)
2106 return 1;
2107
2108 /* find the most up to date disk in this array, skipping spares */
2109 for (sd = sra->devs; sd; sd = sd->next) {
2110 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2111 dfd = dev_open(nm, keep_fd ? O_RDWR : O_RDONLY);
2112 if (dfd < 0) {
2113 free_imsm(super);
2114 return 2;
2115 }
2116 rv = load_imsm_mpb(dfd, super, NULL);
2117
2118 /* retry the load if we might have raced against mdmon */
2119 if (rv == 3 && mdmon_running(devnum))
2120 for (retry = 0; retry < 3; retry++) {
2121 usleep(3000);
2122 rv = load_imsm_mpb(dfd, super, NULL);
2123 if (rv != 3)
2124 break;
2125 }
2126 if (!keep_fd)
2127 close(dfd);
2128 if (rv == 0) {
2129 if (super->anchor->num_raid_devs == 0)
2130 gen = 0;
2131 else
2132 gen = __le32_to_cpu(super->anchor->generation_num);
2133 if (!best || gen > bestgen) {
2134 bestgen = gen;
2135 best = sd;
2136 }
2137 } else {
2138 free_imsm(super);
2139 return rv;
2140 }
2141 }
2142
2143 if (!best) {
2144 free_imsm(super);
2145 return 1;
2146 }
2147
2148 /* load the most up to date anchor */
2149 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
2150 dfd = dev_open(nm, O_RDONLY);
2151 if (dfd < 0) {
2152 free_imsm(super);
2153 return 1;
2154 }
2155 rv = load_imsm_mpb(dfd, super, NULL);
2156 close(dfd);
2157 if (rv != 0) {
2158 free_imsm(super);
2159 return 2;
2160 }
2161
2162 /* re-parse the disk list with the current anchor */
2163 for (sd = sra->devs ; sd ; sd = sd->next) {
2164 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2165 dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
2166 if (dfd < 0) {
2167 free_imsm(super);
2168 return 2;
2169 }
2170 load_imsm_disk(dfd, super, NULL, keep_fd);
2171 if (!keep_fd)
2172 close(dfd);
2173 }
2174
2175
2176 if (find_missing(super) != 0) {
2177 free_imsm(super);
2178 return 2;
2179 }
2180
2181 if (st->subarray[0]) {
2182 if (atoi(st->subarray) <= super->anchor->num_raid_devs)
2183 super->current_vol = atoi(st->subarray);
2184 else {
2185 free_imsm(super);
2186 return 1;
2187 }
2188 }
2189
2190 *sbp = super;
2191 st->container_dev = devnum;
2192 if (st->ss == NULL) {
2193 st->ss = &super_imsm;
2194 st->minor_version = 0;
2195 st->max_devs = IMSM_MAX_DEVICES;
2196 }
2197 st->loaded_container = 1;
2198
2199 return 0;
2200 }
2201 #endif
2202
2203 static int load_super_imsm(struct supertype *st, int fd, char *devname)
2204 {
2205 struct intel_super *super;
2206 int rv;
2207
2208 #ifndef MDASSEMBLE
2209 if (load_super_imsm_all(st, fd, &st->sb, devname, 1) == 0)
2210 return 0;
2211 #endif
2212
2213 free_super_imsm(st);
2214
2215 super = alloc_super(0);
2216 if (!super) {
2217 fprintf(stderr,
2218 Name ": malloc of %zu failed.\n",
2219 sizeof(*super));
2220 return 1;
2221 }
2222
2223 rv = load_imsm_mpb(fd, super, devname);
2224
2225 if (rv) {
2226 if (devname)
2227 fprintf(stderr,
2228 Name ": Failed to load all information "
2229 "sections on %s\n", devname);
2230 free_imsm(super);
2231 return rv;
2232 }
2233
2234 if (st->subarray[0]) {
2235 if (atoi(st->subarray) <= super->anchor->num_raid_devs)
2236 super->current_vol = atoi(st->subarray);
2237 else {
2238 free_imsm(super);
2239 return 1;
2240 }
2241 }
2242
2243 st->sb = super;
2244 if (st->ss == NULL) {
2245 st->ss = &super_imsm;
2246 st->minor_version = 0;
2247 st->max_devs = IMSM_MAX_DEVICES;
2248 }
2249 st->loaded_container = 0;
2250
2251 return 0;
2252 }
2253
2254 static __u16 info_to_blocks_per_strip(mdu_array_info_t *info)
2255 {
2256 if (info->level == 1)
2257 return 128;
2258 return info->chunk_size >> 9;
2259 }
2260
2261 static __u32 info_to_num_data_stripes(mdu_array_info_t *info, int num_domains)
2262 {
2263 __u32 num_stripes;
2264
2265 num_stripes = (info->size * 2) / info_to_blocks_per_strip(info);
2266 num_stripes /= num_domains;
2267
2268 return num_stripes;
2269 }
2270
2271 static __u32 info_to_blocks_per_member(mdu_array_info_t *info)
2272 {
2273 if (info->level == 1)
2274 return info->size * 2;
2275 else
2276 return (info->size * 2) & ~(info_to_blocks_per_strip(info) - 1);
2277 }
2278
2279 static void imsm_update_version_info(struct intel_super *super)
2280 {
2281 /* update the version and attributes */
2282 struct imsm_super *mpb = super->anchor;
2283 char *version;
2284 struct imsm_dev *dev;
2285 struct imsm_map *map;
2286 int i;
2287
2288 for (i = 0; i < mpb->num_raid_devs; i++) {
2289 dev = get_imsm_dev(super, i);
2290 map = get_imsm_map(dev, 0);
2291 if (__le32_to_cpu(dev->size_high) > 0)
2292 mpb->attributes |= MPB_ATTRIB_2TB;
2293
2294 /* FIXME detect when an array spans a port multiplier */
2295 #if 0
2296 mpb->attributes |= MPB_ATTRIB_PM;
2297 #endif
2298
2299 if (mpb->num_raid_devs > 1 ||
2300 mpb->attributes != MPB_ATTRIB_CHECKSUM_VERIFY) {
2301 version = MPB_VERSION_ATTRIBS;
2302 switch (get_imsm_raid_level(map)) {
2303 case 0: mpb->attributes |= MPB_ATTRIB_RAID0; break;
2304 case 1: mpb->attributes |= MPB_ATTRIB_RAID1; break;
2305 case 10: mpb->attributes |= MPB_ATTRIB_RAID10; break;
2306 case 5: mpb->attributes |= MPB_ATTRIB_RAID5; break;
2307 }
2308 } else {
2309 if (map->num_members >= 5)
2310 version = MPB_VERSION_5OR6_DISK_ARRAY;
2311 else if (dev->status == DEV_CLONE_N_GO)
2312 version = MPB_VERSION_CNG;
2313 else if (get_imsm_raid_level(map) == 5)
2314 version = MPB_VERSION_RAID5;
2315 else if (map->num_members >= 3)
2316 version = MPB_VERSION_3OR4_DISK_ARRAY;
2317 else if (get_imsm_raid_level(map) == 1)
2318 version = MPB_VERSION_RAID1;
2319 else
2320 version = MPB_VERSION_RAID0;
2321 }
2322 strcpy(((char *) mpb->sig) + strlen(MPB_SIGNATURE), version);
2323 }
2324 }
2325
2326 static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
2327 unsigned long long size, char *name,
2328 char *homehost, int *uuid)
2329 {
2330 /* We are creating a volume inside a pre-existing container.
2331 * so st->sb is already set.
2332 */
2333 struct intel_super *super = st->sb;
2334 struct imsm_super *mpb = super->anchor;
2335 struct intel_dev *dv;
2336 struct imsm_dev *dev;
2337 struct imsm_vol *vol;
2338 struct imsm_map *map;
2339 int idx = mpb->num_raid_devs;
2340 int i;
2341 unsigned long long array_blocks;
2342 size_t size_old, size_new;
2343 __u32 num_data_stripes;
2344
2345 if (super->orom && mpb->num_raid_devs >= super->orom->vpa) {
2346 fprintf(stderr, Name": This imsm-container already has the "
2347 "maximum of %d volumes\n", super->orom->vpa);
2348 return 0;
2349 }
2350
2351 /* ensure the mpb is large enough for the new data */
2352 size_old = __le32_to_cpu(mpb->mpb_size);
2353 size_new = disks_to_mpb_size(info->nr_disks);
2354 if (size_new > size_old) {
2355 void *mpb_new;
2356 size_t size_round = ROUND_UP(size_new, 512);
2357
2358 if (posix_memalign(&mpb_new, 512, size_round) != 0) {
2359 fprintf(stderr, Name": could not allocate new mpb\n");
2360 return 0;
2361 }
2362 memcpy(mpb_new, mpb, size_old);
2363 free(mpb);
2364 mpb = mpb_new;
2365 super->anchor = mpb_new;
2366 mpb->mpb_size = __cpu_to_le32(size_new);
2367 memset(mpb_new + size_old, 0, size_round - size_old);
2368 }
2369 super->current_vol = idx;
2370 /* when creating the first raid device in this container set num_disks
2371 * to zero, i.e. delete this spare and add raid member devices in
2372 * add_to_super_imsm_volume()
2373 */
2374 if (super->current_vol == 0)
2375 mpb->num_disks = 0;
2376
2377 for (i = 0; i < super->current_vol; i++) {
2378 dev = get_imsm_dev(super, i);
2379 if (strncmp((char *) dev->volume, name,
2380 MAX_RAID_SERIAL_LEN) == 0) {
2381 fprintf(stderr, Name": '%s' is already defined for this container\n",
2382 name);
2383 return 0;
2384 }
2385 }
2386
2387 sprintf(st->subarray, "%d", idx);
2388 dv = malloc(sizeof(*dv));
2389 if (!dv) {
2390 fprintf(stderr, Name ": failed to allocate device list entry\n");
2391 return 0;
2392 }
2393 dev = malloc(sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
2394 if (!dev) {
2395 free(dv);
2396 fprintf(stderr, Name": could not allocate raid device\n");
2397 return 0;
2398 }
2399 strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
2400 if (info->level == 1)
2401 array_blocks = info_to_blocks_per_member(info);
2402 else
2403 array_blocks = calc_array_size(info->level, info->raid_disks,
2404 info->layout, info->chunk_size,
2405 info->size*2);
2406 /* round array size down to closest MB */
2407 array_blocks = (array_blocks >> SECT_PER_MB_SHIFT) << SECT_PER_MB_SHIFT;
2408
2409 dev->size_low = __cpu_to_le32((__u32) array_blocks);
2410 dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
2411 dev->status = __cpu_to_le32(0);
2412 dev->reserved_blocks = __cpu_to_le32(0);
2413 vol = &dev->vol;
2414 vol->migr_state = 0;
2415 set_migr_type(dev, MIGR_INIT);
2416 vol->dirty = 0;
2417 vol->curr_migr_unit = 0;
2418 map = get_imsm_map(dev, 0);
2419 map->pba_of_lba0 = __cpu_to_le32(super->create_offset);
2420 map->blocks_per_member = __cpu_to_le32(info_to_blocks_per_member(info));
2421 map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
2422 map->failed_disk_num = ~0;
2423 map->map_state = info->level ? IMSM_T_STATE_UNINITIALIZED :
2424 IMSM_T_STATE_NORMAL;
2425 map->ddf = 1;
2426
2427 if (info->level == 1 && info->raid_disks > 2) {
2428 fprintf(stderr, Name": imsm does not support more than 2 disks"
2429 "in a raid1 volume\n");
2430 return 0;
2431 }
2432
2433 map->raid_level = info->level;
2434 if (info->level == 10) {
2435 map->raid_level = 1;
2436 map->num_domains = info->raid_disks / 2;
2437 } else if (info->level == 1)
2438 map->num_domains = info->raid_disks;
2439 else
2440 map->num_domains = 1;
2441
2442 num_data_stripes = info_to_num_data_stripes(info, map->num_domains);
2443 map->num_data_stripes = __cpu_to_le32(num_data_stripes);
2444
2445 map->num_members = info->raid_disks;
2446 for (i = 0; i < map->num_members; i++) {
2447 /* initialized in add_to_super */
2448 set_imsm_ord_tbl_ent(map, i, 0);
2449 }
2450 mpb->num_raid_devs++;
2451
2452 dv->dev = dev;
2453 dv->index = super->current_vol;
2454 dv->next = super->devlist;
2455 super->devlist = dv;
2456
2457 imsm_update_version_info(super);
2458
2459 return 1;
2460 }
2461
2462 static int init_super_imsm(struct supertype *st, mdu_array_info_t *info,
2463 unsigned long long size, char *name,
2464 char *homehost, int *uuid)
2465 {
2466 /* This is primarily called by Create when creating a new array.
2467 * We will then get add_to_super called for each component, and then
2468 * write_init_super called to write it out to each device.
2469 * For IMSM, Create can create on fresh devices or on a pre-existing
2470 * array.
2471 * To create on a pre-existing array a different method will be called.
2472 * This one is just for fresh drives.
2473 */
2474 struct intel_super *super;
2475 struct imsm_super *mpb;
2476 size_t mpb_size;
2477 char *version;
2478
2479 if (!info) {
2480 st->sb = NULL;
2481 return 0;
2482 }
2483 if (st->sb)
2484 return init_super_imsm_volume(st, info, size, name, homehost,
2485 uuid);
2486
2487 super = alloc_super(1);
2488 if (!super)
2489 return 0;
2490 mpb_size = disks_to_mpb_size(info->nr_disks);
2491 if (posix_memalign(&super->buf, 512, mpb_size) != 0) {
2492 free(super);
2493 return 0;
2494 }
2495 mpb = super->buf;
2496 memset(mpb, 0, mpb_size);
2497
2498 mpb->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
2499
2500 version = (char *) mpb->sig;
2501 strcpy(version, MPB_SIGNATURE);
2502 version += strlen(MPB_SIGNATURE);
2503 strcpy(version, MPB_VERSION_RAID0);
2504 mpb->mpb_size = mpb_size;
2505
2506 st->sb = super;
2507 return 1;
2508 }
2509
2510 #ifndef MDASSEMBLE
2511 static int add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
2512 int fd, char *devname)
2513 {
2514 struct intel_super *super = st->sb;
2515 struct imsm_super *mpb = super->anchor;
2516 struct dl *dl;
2517 struct imsm_dev *dev;
2518 struct imsm_map *map;
2519
2520 dev = get_imsm_dev(super, super->current_vol);
2521 map = get_imsm_map(dev, 0);
2522
2523 if (! (dk->state & (1<<MD_DISK_SYNC))) {
2524 fprintf(stderr, Name ": %s: Cannot add spare devices to IMSM volume\n",
2525 devname);
2526 return 1;
2527 }
2528
2529 if (fd == -1) {
2530 /* we're doing autolayout so grab the pre-marked (in
2531 * validate_geometry) raid_disk
2532 */
2533 for (dl = super->disks; dl; dl = dl->next)
2534 if (dl->raiddisk == dk->raid_disk)
2535 break;
2536 } else {
2537 for (dl = super->disks; dl ; dl = dl->next)
2538 if (dl->major == dk->major &&
2539 dl->minor == dk->minor)
2540 break;
2541 }
2542
2543 if (!dl) {
2544 fprintf(stderr, Name ": %s is not a member of the same container\n", devname);
2545 return 1;
2546 }
2547
2548 /* add a pristine spare to the metadata */
2549 if (dl->index < 0) {
2550 dl->index = super->anchor->num_disks;
2551 super->anchor->num_disks++;
2552 }
2553 set_imsm_ord_tbl_ent(map, dk->number, dl->index);
2554 dl->disk.status = CONFIGURED_DISK | USABLE_DISK;
2555
2556 /* if we are creating the first raid device update the family number */
2557 if (super->current_vol == 0) {
2558 __u32 sum;
2559 struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
2560 struct imsm_disk *_disk = __get_imsm_disk(mpb, dl->index);
2561
2562 *_dev = *dev;
2563 *_disk = dl->disk;
2564 sum = random32();
2565 sum += __gen_imsm_checksum(mpb);
2566 mpb->family_num = __cpu_to_le32(sum);
2567 mpb->orig_family_num = mpb->family_num;
2568 }
2569
2570 return 0;
2571 }
2572
2573 static int add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
2574 int fd, char *devname)
2575 {
2576 struct intel_super *super = st->sb;
2577 struct dl *dd;
2578 unsigned long long size;
2579 __u32 id;
2580 int rv;
2581 struct stat stb;
2582
2583 /* if we are on an RAID enabled platform check that the disk is
2584 * attached to the raid controller
2585 */
2586 if (super->hba && !disk_attached_to_hba(fd, super->hba)) {
2587 fprintf(stderr,
2588 Name ": %s is not attached to the raid controller: %s\n",
2589 devname ? : "disk", super->hba);
2590 return 1;
2591 }
2592
2593 if (super->current_vol >= 0)
2594 return add_to_super_imsm_volume(st, dk, fd, devname);
2595
2596 fstat(fd, &stb);
2597 dd = malloc(sizeof(*dd));
2598 if (!dd) {
2599 fprintf(stderr,
2600 Name ": malloc failed %s:%d.\n", __func__, __LINE__);
2601 return 1;
2602 }
2603 memset(dd, 0, sizeof(*dd));
2604 dd->major = major(stb.st_rdev);
2605 dd->minor = minor(stb.st_rdev);
2606 dd->index = -1;
2607 dd->devname = devname ? strdup(devname) : NULL;
2608 dd->fd = fd;
2609 dd->e = NULL;
2610 rv = imsm_read_serial(fd, devname, dd->serial);
2611 if (rv) {
2612 fprintf(stderr,
2613 Name ": failed to retrieve scsi serial, aborting\n");
2614 free(dd);
2615 abort();
2616 }
2617
2618 get_dev_size(fd, NULL, &size);
2619 size /= 512;
2620 serialcpy(dd->disk.serial, dd->serial);
2621 dd->disk.total_blocks = __cpu_to_le32(size);
2622 dd->disk.status = USABLE_DISK | SPARE_DISK;
2623 if (sysfs_disk_to_scsi_id(fd, &id) == 0)
2624 dd->disk.scsi_id = __cpu_to_le32(id);
2625 else
2626 dd->disk.scsi_id = __cpu_to_le32(0);
2627
2628 if (st->update_tail) {
2629 dd->next = super->add;
2630 super->add = dd;
2631 } else {
2632 dd->next = super->disks;
2633 super->disks = dd;
2634 }
2635
2636 return 0;
2637 }
2638
2639 static int store_imsm_mpb(int fd, struct intel_super *super);
2640
2641 /* spare records have their own family number and do not have any defined raid
2642 * devices
2643 */
2644 static int write_super_imsm_spares(struct intel_super *super, int doclose)
2645 {
2646 struct imsm_super mpb_save;
2647 struct imsm_super *mpb = super->anchor;
2648 __u32 sum;
2649 struct dl *d;
2650
2651 mpb_save = *mpb;
2652 mpb->num_raid_devs = 0;
2653 mpb->num_disks = 1;
2654 mpb->mpb_size = sizeof(struct imsm_super);
2655 mpb->generation_num = __cpu_to_le32(1UL);
2656
2657 for (d = super->disks; d; d = d->next) {
2658 if (d->index != -1)
2659 continue;
2660
2661 mpb->disk[0] = d->disk;
2662 sum = __gen_imsm_checksum(mpb);
2663 mpb->family_num = __cpu_to_le32(sum);
2664 mpb->orig_family_num = 0;
2665 sum = __gen_imsm_checksum(mpb);
2666 mpb->check_sum = __cpu_to_le32(sum);
2667
2668 if (store_imsm_mpb(d->fd, super)) {
2669 fprintf(stderr, "%s: failed for device %d:%d %s\n",
2670 __func__, d->major, d->minor, strerror(errno));
2671 *mpb = mpb_save;
2672 return 1;
2673 }
2674 if (doclose) {
2675 close(d->fd);
2676 d->fd = -1;
2677 }
2678 }
2679
2680 *mpb = mpb_save;
2681 return 0;
2682 }
2683
2684 static int write_super_imsm(struct intel_super *super, int doclose)
2685 {
2686 struct imsm_super *mpb = super->anchor;
2687 struct dl *d;
2688 __u32 generation;
2689 __u32 sum;
2690 int spares = 0;
2691 int i;
2692 __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk);
2693
2694 /* 'generation' is incremented everytime the metadata is written */
2695 generation = __le32_to_cpu(mpb->generation_num);
2696 generation++;
2697 mpb->generation_num = __cpu_to_le32(generation);
2698
2699 /* fix up cases where previous mdadm releases failed to set
2700 * orig_family_num
2701 */
2702 if (mpb->orig_family_num == 0)
2703 mpb->orig_family_num = mpb->family_num;
2704
2705 mpb_size += sizeof(struct imsm_disk) * mpb->num_disks;
2706 for (d = super->disks; d; d = d->next) {
2707 if (d->index == -1)
2708 spares++;
2709 else
2710 mpb->disk[d->index] = d->disk;
2711 }
2712 for (d = super->missing; d; d = d->next)
2713 mpb->disk[d->index] = d->disk;
2714
2715 for (i = 0; i < mpb->num_raid_devs; i++) {
2716 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
2717
2718 imsm_copy_dev(dev, get_imsm_dev(super, i));
2719 mpb_size += sizeof_imsm_dev(dev, 0);
2720 }
2721 mpb_size += __le32_to_cpu(mpb->bbm_log_size);
2722 mpb->mpb_size = __cpu_to_le32(mpb_size);
2723
2724 /* recalculate checksum */
2725 sum = __gen_imsm_checksum(mpb);
2726 mpb->check_sum = __cpu_to_le32(sum);
2727
2728 /* write the mpb for disks that compose raid devices */
2729 for (d = super->disks; d ; d = d->next) {
2730 if (d->index < 0)
2731 continue;
2732 if (store_imsm_mpb(d->fd, super))
2733 fprintf(stderr, "%s: failed for device %d:%d %s\n",
2734 __func__, d->major, d->minor, strerror(errno));
2735 if (doclose) {
2736 close(d->fd);
2737 d->fd = -1;
2738 }
2739 }
2740
2741 if (spares)
2742 return write_super_imsm_spares(super, doclose);
2743
2744 return 0;
2745 }
2746
2747
2748 static int create_array(struct supertype *st, int dev_idx)
2749 {
2750 size_t len;
2751 struct imsm_update_create_array *u;
2752 struct intel_super *super = st->sb;
2753 struct imsm_dev *dev = get_imsm_dev(super, dev_idx);
2754 struct imsm_map *map = get_imsm_map(dev, 0);
2755 struct disk_info *inf;
2756 struct imsm_disk *disk;
2757 int i;
2758
2759 len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0) +
2760 sizeof(*inf) * map->num_members;
2761 u = malloc(len);
2762 if (!u) {
2763 fprintf(stderr, "%s: failed to allocate update buffer\n",
2764 __func__);
2765 return 1;
2766 }
2767
2768 u->type = update_create_array;
2769 u->dev_idx = dev_idx;
2770 imsm_copy_dev(&u->dev, dev);
2771 inf = get_disk_info(u);
2772 for (i = 0; i < map->num_members; i++) {
2773 int idx = get_imsm_disk_idx(dev, i);
2774
2775 disk = get_imsm_disk(super, idx);
2776 serialcpy(inf[i].serial, disk->serial);
2777 }
2778 append_metadata_update(st, u, len);
2779
2780 return 0;
2781 }
2782
2783 static int _add_disk(struct supertype *st)
2784 {
2785 struct intel_super *super = st->sb;
2786 size_t len;
2787 struct imsm_update_add_disk *u;
2788
2789 if (!super->add)
2790 return 0;
2791
2792 len = sizeof(*u);
2793 u = malloc(len);
2794 if (!u) {
2795 fprintf(stderr, "%s: failed to allocate update buffer\n",
2796 __func__);
2797 return 1;
2798 }
2799
2800 u->type = update_add_disk;
2801 append_metadata_update(st, u, len);
2802
2803 return 0;
2804 }
2805
2806 static int write_init_super_imsm(struct supertype *st)
2807 {
2808 struct intel_super *super = st->sb;
2809 int current_vol = super->current_vol;
2810
2811 /* we are done with current_vol reset it to point st at the container */
2812 super->current_vol = -1;
2813
2814 if (st->update_tail) {
2815 /* queue the recently created array / added disk
2816 * as a metadata update */
2817 struct dl *d;
2818 int rv;
2819
2820 /* determine if we are creating a volume or adding a disk */
2821 if (current_vol < 0) {
2822 /* in the add disk case we are running in mdmon
2823 * context, so don't close fd's
2824 */
2825 return _add_disk(st);
2826 } else
2827 rv = create_array(st, current_vol);
2828
2829 for (d = super->disks; d ; d = d->next) {
2830 close(d->fd);
2831 d->fd = -1;
2832 }
2833
2834 return rv;
2835 } else
2836 return write_super_imsm(st->sb, 1);
2837 }
2838 #endif
2839
2840 static int store_zero_imsm(struct supertype *st, int fd)
2841 {
2842 unsigned long long dsize;
2843 void *buf;
2844
2845 get_dev_size(fd, NULL, &dsize);
2846
2847 /* first block is stored on second to last sector of the disk */
2848 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
2849 return 1;
2850
2851 if (posix_memalign(&buf, 512, 512) != 0)
2852 return 1;
2853
2854 memset(buf, 0, 512);
2855 if (write(fd, buf, 512) != 512)
2856 return 1;
2857 return 0;
2858 }
2859
2860 static int imsm_bbm_log_size(struct imsm_super *mpb)
2861 {
2862 return __le32_to_cpu(mpb->bbm_log_size);
2863 }
2864
2865 #ifndef MDASSEMBLE
2866 static int validate_geometry_imsm_container(struct supertype *st, int level,
2867 int layout, int raiddisks, int chunk,
2868 unsigned long long size, char *dev,
2869 unsigned long long *freesize,
2870 int verbose)
2871 {
2872 int fd;
2873 unsigned long long ldsize;
2874 const struct imsm_orom *orom;
2875
2876 if (level != LEVEL_CONTAINER)
2877 return 0;
2878 if (!dev)
2879 return 1;
2880
2881 if (check_env("IMSM_NO_PLATFORM"))
2882 orom = NULL;
2883 else
2884 orom = find_imsm_orom();
2885 if (orom && raiddisks > orom->tds) {
2886 if (verbose)
2887 fprintf(stderr, Name ": %d exceeds maximum number of"
2888 " platform supported disks: %d\n",
2889 raiddisks, orom->tds);
2890 return 0;
2891 }
2892
2893 fd = open(dev, O_RDONLY|O_EXCL, 0);
2894 if (fd < 0) {
2895 if (verbose)
2896 fprintf(stderr, Name ": imsm: Cannot open %s: %s\n",
2897 dev, strerror(errno));
2898 return 0;
2899 }
2900 if (!get_dev_size(fd, dev, &ldsize)) {
2901 close(fd);
2902 return 0;
2903 }
2904 close(fd);
2905
2906 *freesize = avail_size_imsm(st, ldsize >> 9);
2907
2908 return 1;
2909 }
2910
2911 static unsigned long long find_size(struct extent *e, int *idx, int num_extents)
2912 {
2913 const unsigned long long base_start = e[*idx].start;
2914 unsigned long long end = base_start + e[*idx].size;
2915 int i;
2916
2917 if (base_start == end)
2918 return 0;
2919
2920 *idx = *idx + 1;
2921 for (i = *idx; i < num_extents; i++) {
2922 /* extend overlapping extents */
2923 if (e[i].start >= base_start &&
2924 e[i].start <= end) {
2925 if (e[i].size == 0)
2926 return 0;
2927 if (e[i].start + e[i].size > end)
2928 end = e[i].start + e[i].size;
2929 } else if (e[i].start > end) {
2930 *idx = i;
2931 break;
2932 }
2933 }
2934
2935 return end - base_start;
2936 }
2937
2938 static unsigned long long merge_extents(struct intel_super *super, int sum_extents)
2939 {
2940 /* build a composite disk with all known extents and generate a new
2941 * 'maxsize' given the "all disks in an array must share a common start
2942 * offset" constraint
2943 */
2944 struct extent *e = calloc(sum_extents, sizeof(*e));
2945 struct dl *dl;
2946 int i, j;
2947 int start_extent;
2948 unsigned long long pos;
2949 unsigned long long start = 0;
2950 unsigned long long maxsize;
2951 unsigned long reserve;
2952
2953 if (!e)
2954 return ~0ULL; /* error */
2955
2956 /* coalesce and sort all extents. also, check to see if we need to
2957 * reserve space between member arrays
2958 */
2959 j = 0;
2960 for (dl = super->disks; dl; dl = dl->next) {
2961 if (!dl->e)
2962 continue;
2963 for (i = 0; i < dl->extent_cnt; i++)
2964 e[j++] = dl->e[i];
2965 }
2966 qsort(e, sum_extents, sizeof(*e), cmp_extent);
2967
2968 /* merge extents */
2969 i = 0;
2970 j = 0;
2971 while (i < sum_extents) {
2972 e[j].start = e[i].start;
2973 e[j].size = find_size(e, &i, sum_extents);
2974 j++;
2975 if (e[j-1].size == 0)
2976 break;
2977 }
2978
2979 pos = 0;
2980 maxsize = 0;
2981 start_extent = 0;
2982 i = 0;
2983 do {
2984 unsigned long long esize;
2985
2986 esize = e[i].start - pos;
2987 if (esize >= maxsize) {
2988 maxsize = esize;
2989 start = pos;
2990 start_extent = i;
2991 }
2992 pos = e[i].start + e[i].size;
2993 i++;
2994 } while (e[i-1].size);
2995 free(e);
2996
2997 if (start_extent > 0)
2998 reserve = IMSM_RESERVED_SECTORS; /* gap between raid regions */
2999 else
3000 reserve = 0;
3001
3002 if (maxsize < reserve)
3003 return ~0ULL;
3004
3005 super->create_offset = ~((__u32) 0);
3006 if (start + reserve > super->create_offset)
3007 return ~0ULL; /* start overflows create_offset */
3008 super->create_offset = start + reserve;
3009
3010 return maxsize - reserve;
3011 }
3012
3013 static int is_raid_level_supported(const struct imsm_orom *orom, int level, int raiddisks)
3014 {
3015 if (level < 0 || level == 6 || level == 4)
3016 return 0;
3017
3018 /* if we have an orom prevent invalid raid levels */
3019 if (orom)
3020 switch (level) {
3021 case 0: return imsm_orom_has_raid0(orom);
3022 case 1:
3023 if (raiddisks > 2)
3024 return imsm_orom_has_raid1e(orom);
3025 return imsm_orom_has_raid1(orom) && raiddisks == 2;
3026 case 10: return imsm_orom_has_raid10(orom) && raiddisks == 4;
3027 case 5: return imsm_orom_has_raid5(orom) && raiddisks > 2;
3028 }
3029 else
3030 return 1; /* not on an Intel RAID platform so anything goes */
3031
3032 return 0;
3033 }
3034
3035 #define pr_vrb(fmt, arg...) (void) (verbose && fprintf(stderr, Name fmt, ##arg))
3036 /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
3037 * FIX ME add ahci details
3038 */
3039 static int validate_geometry_imsm_volume(struct supertype *st, int level,
3040 int layout, int raiddisks, int chunk,
3041 unsigned long long size, char *dev,
3042 unsigned long long *freesize,
3043 int verbose)
3044 {
3045 struct stat stb;
3046 struct intel_super *super = st->sb;
3047 struct imsm_super *mpb = super->anchor;
3048 struct dl *dl;
3049 unsigned long long pos = 0;
3050 unsigned long long maxsize;
3051 struct extent *e;
3052 int i;
3053
3054 /* We must have the container info already read in. */
3055 if (!super)
3056 return 0;
3057
3058 if (!is_raid_level_supported(super->orom, level, raiddisks)) {
3059 pr_vrb(": platform does not support raid%d with %d disk%s\n",
3060 level, raiddisks, raiddisks > 1 ? "s" : "");
3061 return 0;
3062 }
3063 if (super->orom && level != 1 &&
3064 !imsm_orom_has_chunk(super->orom, chunk)) {
3065 pr_vrb(": platform does not support a chunk size of: %d\n", chunk);
3066 return 0;
3067 }
3068 if (layout != imsm_level_to_layout(level)) {
3069 if (level == 5)
3070 pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
3071 else if (level == 10)
3072 pr_vrb(": imsm raid 10 only supports the n2 layout\n");
3073 else
3074 pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
3075 layout, level);
3076 return 0;
3077 }
3078
3079 if (!dev) {
3080 /* General test: make sure there is space for
3081 * 'raiddisks' device extents of size 'size' at a given
3082 * offset
3083 */
3084 unsigned long long minsize = size;
3085 unsigned long long start_offset = ~0ULL;
3086 int dcnt = 0;
3087 if (minsize == 0)
3088 minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
3089 for (dl = super->disks; dl ; dl = dl->next) {
3090 int found = 0;
3091
3092 pos = 0;
3093 i = 0;
3094 e = get_extents(super, dl);
3095 if (!e) continue;
3096 do {
3097 unsigned long long esize;
3098 esize = e[i].start - pos;
3099 if (esize >= minsize)
3100 found = 1;
3101 if (found && start_offset == ~0ULL) {
3102 start_offset = pos;
3103 break;
3104 } else if (found && pos != start_offset) {
3105 found = 0;
3106 break;
3107 }
3108 pos = e[i].start + e[i].size;
3109 i++;
3110 } while (e[i-1].size);
3111 if (found)
3112 dcnt++;
3113 free(e);
3114 }
3115 if (dcnt < raiddisks) {
3116 if (verbose)
3117 fprintf(stderr, Name ": imsm: Not enough "
3118 "devices with space for this array "
3119 "(%d < %d)\n",
3120 dcnt, raiddisks);
3121 return 0;
3122 }
3123 return 1;
3124 }
3125
3126 /* This device must be a member of the set */
3127 if (stat(dev, &stb) < 0)
3128 return 0;
3129 if ((S_IFMT & stb.st_mode) != S_IFBLK)
3130 return 0;
3131 for (dl = super->disks ; dl ; dl = dl->next) {
3132 if (dl->major == major(stb.st_rdev) &&
3133 dl->minor == minor(stb.st_rdev))
3134 break;
3135 }
3136 if (!dl) {
3137 if (verbose)
3138 fprintf(stderr, Name ": %s is not in the "
3139 "same imsm set\n", dev);
3140 return 0;
3141 } else if (super->orom && dl->index < 0 && mpb->num_raid_devs) {
3142 /* If a volume is present then the current creation attempt
3143 * cannot incorporate new spares because the orom may not
3144 * understand this configuration (all member disks must be
3145 * members of each array in the container).
3146 */
3147 fprintf(stderr, Name ": %s is a spare and a volume"
3148 " is already defined for this container\n", dev);
3149 fprintf(stderr, Name ": The option-rom requires all member"
3150 " disks to be a member of all volumes\n");
3151 return 0;
3152 }
3153
3154 /* retrieve the largest free space block */
3155 e = get_extents(super, dl);
3156 maxsize = 0;
3157 i = 0;
3158 if (e) {
3159 do {
3160 unsigned long long esize;
3161
3162 esize = e[i].start - pos;
3163 if (esize >= maxsize)
3164 maxsize = esize;
3165 pos = e[i].start + e[i].size;
3166 i++;
3167 } while (e[i-1].size);
3168 dl->e = e;
3169 dl->extent_cnt = i;
3170 } else {
3171 if (verbose)
3172 fprintf(stderr, Name ": unable to determine free space for: %s\n",
3173 dev);
3174 return 0;
3175 }
3176 if (maxsize < size) {
3177 if (verbose)
3178 fprintf(stderr, Name ": %s not enough space (%llu < %llu)\n",
3179 dev, maxsize, size);
3180 return 0;
3181 }
3182
3183 /* count total number of extents for merge */
3184 i = 0;
3185 for (dl = super->disks; dl; dl = dl->next)
3186 if (dl->e)
3187 i += dl->extent_cnt;
3188
3189 maxsize = merge_extents(super, i);
3190 if (maxsize < size) {
3191 if (verbose)
3192 fprintf(stderr, Name ": not enough space after merge (%llu < %llu)\n",
3193 maxsize, size);
3194 return 0;
3195 } else if (maxsize == ~0ULL) {
3196 if (verbose)
3197 fprintf(stderr, Name ": failed to merge %d extents\n", i);
3198 return 0;
3199 }
3200
3201 *freesize = maxsize;
3202
3203 return 1;
3204 }
3205
3206 static int reserve_space(struct supertype *st, int raiddisks,
3207 unsigned long long size, int chunk,
3208 unsigned long long *freesize)
3209 {
3210 struct intel_super *super = st->sb;
3211 struct imsm_super *mpb = super->anchor;
3212 struct dl *dl;
3213 int i;
3214 int extent_cnt;
3215 struct extent *e;
3216 unsigned long long maxsize;
3217 unsigned long long minsize;
3218 int cnt;
3219 int used;
3220
3221 /* find the largest common start free region of the possible disks */
3222 used = 0;
3223 extent_cnt = 0;
3224 cnt = 0;
3225 for (dl = super->disks; dl; dl = dl->next) {
3226 dl->raiddisk = -1;
3227
3228 if (dl->index >= 0)
3229 used++;
3230
3231 /* don't activate new spares if we are orom constrained
3232 * and there is already a volume active in the container
3233 */
3234 if (super->orom && dl->index < 0 && mpb->num_raid_devs)
3235 continue;
3236
3237 e = get_extents(super, dl);
3238 if (!e)
3239 continue;
3240 for (i = 1; e[i-1].size; i++)
3241 ;
3242 dl->e = e;
3243 dl->extent_cnt = i;
3244 extent_cnt += i;
3245 cnt++;
3246 }
3247
3248 maxsize = merge_extents(super, extent_cnt);
3249 minsize = size;
3250 if (size == 0)
3251 minsize = chunk;
3252
3253 if (cnt < raiddisks ||
3254 (super->orom && used && used != raiddisks) ||
3255 maxsize < minsize) {
3256 fprintf(stderr, Name ": not enough devices with space to create array.\n");
3257 return 0; /* No enough free spaces large enough */
3258 }
3259
3260 if (size == 0) {
3261 size = maxsize;
3262 if (chunk) {
3263 size /= chunk;
3264 size *= chunk;
3265 }
3266 }
3267
3268 cnt = 0;
3269 for (dl = super->disks; dl; dl = dl->next)
3270 if (dl->e)
3271 dl->raiddisk = cnt++;
3272
3273 *freesize = size;
3274
3275 return 1;
3276 }
3277
3278 static int validate_geometry_imsm(struct supertype *st, int level, int layout,
3279 int raiddisks, int chunk, unsigned long long size,
3280 char *dev, unsigned long long *freesize,
3281 int verbose)
3282 {
3283 int fd, cfd;
3284 struct mdinfo *sra;
3285
3286 /* if given unused devices create a container
3287 * if given given devices in a container create a member volume
3288 */
3289 if (level == LEVEL_CONTAINER) {
3290 /* Must be a fresh device to add to a container */
3291 return validate_geometry_imsm_container(st, level, layout,
3292 raiddisks, chunk, size,
3293 dev, freesize,
3294 verbose);
3295 }
3296
3297 if (!dev) {
3298 if (st->sb && freesize) {
3299 /* we are being asked to automatically layout a
3300 * new volume based on the current contents of
3301 * the container. If the the parameters can be
3302 * satisfied reserve_space will record the disks,
3303 * start offset, and size of the volume to be
3304 * created. add_to_super and getinfo_super
3305 * detect when autolayout is in progress.
3306 */
3307 return reserve_space(st, raiddisks, size, chunk, freesize);
3308 }
3309 return 1;
3310 }
3311 if (st->sb) {
3312 /* creating in a given container */
3313 return validate_geometry_imsm_volume(st, level, layout,
3314 raiddisks, chunk, size,
3315 dev, freesize, verbose);
3316 }
3317
3318 /* limit creation to the following levels */
3319 if (!dev)
3320 switch (level) {
3321 case 0:
3322 case 1:
3323 case 10:
3324 case 5:
3325 break;
3326 default:
3327 return 1;
3328 }
3329
3330 /* This device needs to be a device in an 'imsm' container */
3331 fd = open(dev, O_RDONLY|O_EXCL, 0);
3332 if (fd >= 0) {
3333 if (verbose)
3334 fprintf(stderr,
3335 Name ": Cannot create this array on device %s\n",
3336 dev);
3337 close(fd);
3338 return 0;
3339 }
3340 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3341 if (verbose)
3342 fprintf(stderr, Name ": Cannot open %s: %s\n",
3343 dev, strerror(errno));
3344 return 0;
3345 }
3346 /* Well, it is in use by someone, maybe an 'imsm' container. */
3347 cfd = open_container(fd);
3348 if (cfd < 0) {
3349 close(fd);
3350 if (verbose)
3351 fprintf(stderr, Name ": Cannot use %s: It is busy\n",
3352 dev);
3353 return 0;
3354 }
3355 sra = sysfs_read(cfd, 0, GET_VERSION);
3356 close(fd);
3357 if (sra && sra->array.major_version == -1 &&
3358 strcmp(sra->text_version, "imsm") == 0) {
3359 /* This is a member of a imsm container. Load the container
3360 * and try to create a volume
3361 */
3362 struct intel_super *super;
3363
3364 if (load_super_imsm_all(st, cfd, (void **) &super, NULL, 1) == 0) {
3365 st->sb = super;
3366 st->container_dev = fd2devnum(cfd);
3367 close(cfd);
3368 return validate_geometry_imsm_volume(st, level, layout,
3369 raiddisks, chunk,
3370 size, dev,
3371 freesize, verbose);
3372 }
3373 close(cfd);
3374 } else /* may belong to another container */
3375 return 0;
3376
3377 return 1;
3378 }
3379 #endif /* MDASSEMBLE */
3380
3381 static struct mdinfo *container_content_imsm(struct supertype *st)
3382 {
3383 /* Given a container loaded by load_super_imsm_all,
3384 * extract information about all the arrays into
3385 * an mdinfo tree.
3386 *
3387 * For each imsm_dev create an mdinfo, fill it in,
3388 * then look for matching devices in super->disks
3389 * and create appropriate device mdinfo.
3390 */
3391 struct intel_super *super = st->sb;
3392 struct imsm_super *mpb = super->anchor;
3393 struct mdinfo *rest = NULL;
3394 int i;
3395
3396 /* do not assemble arrays that might have bad blocks */
3397 if (imsm_bbm_log_size(super->anchor)) {
3398 fprintf(stderr, Name ": BBM log found in metadata. "
3399 "Cannot activate array(s).\n");
3400 return NULL;
3401 }
3402
3403 for (i = 0; i < mpb->num_raid_devs; i++) {
3404 struct imsm_dev *dev = get_imsm_dev(super, i);
3405 struct imsm_map *map = get_imsm_map(dev, 0);
3406 struct mdinfo *this;
3407 int slot;
3408
3409 /* do not publish arrays that are in the middle of an
3410 * unsupported migration
3411 */
3412 if (dev->vol.migr_state &&
3413 (migr_type(dev) == MIGR_GEN_MIGR ||
3414 migr_type(dev) == MIGR_STATE_CHANGE)) {
3415 fprintf(stderr, Name ": cannot assemble volume '%.16s':"
3416 " unsupported migration in progress\n",
3417 dev->volume);
3418 continue;
3419 }
3420
3421 this = malloc(sizeof(*this));
3422 memset(this, 0, sizeof(*this));
3423 this->next = rest;
3424
3425 super->current_vol = i;
3426 getinfo_super_imsm_volume(st, this);
3427 for (slot = 0 ; slot < map->num_members; slot++) {
3428 struct mdinfo *info_d;
3429 struct dl *d;
3430 int idx;
3431 int skip;
3432 __u32 s;
3433 __u32 ord;
3434
3435 skip = 0;
3436 idx = get_imsm_disk_idx(dev, slot);
3437 ord = get_imsm_ord_tbl_ent(dev, slot);
3438 for (d = super->disks; d ; d = d->next)
3439 if (d->index == idx)
3440 break;
3441
3442 if (d == NULL)
3443 skip = 1;
3444
3445 s = d ? d->disk.status : 0;
3446 if (s & FAILED_DISK)
3447 skip = 1;
3448 if (!(s & USABLE_DISK))
3449 skip = 1;
3450 if (ord & IMSM_ORD_REBUILD)
3451 skip = 1;
3452
3453 /*
3454 * if we skip some disks the array will be assmebled degraded;
3455 * reset resync start to avoid a dirty-degraded situation
3456 *
3457 * FIXME handle dirty degraded
3458 */
3459 if (skip && !dev->vol.dirty)
3460 this->resync_start = ~0ULL;
3461 if (skip)
3462 continue;
3463
3464 info_d = malloc(sizeof(*info_d));
3465 if (!info_d) {
3466 fprintf(stderr, Name ": failed to allocate disk"
3467 " for volume %.16s\n", dev->volume);
3468 free(this);
3469 this = rest;
3470 break;
3471 }
3472 memset(info_d, 0, sizeof(*info_d));
3473 info_d->next = this->devs;
3474 this->devs = info_d;
3475
3476 info_d->disk.number = d->index;
3477 info_d->disk.major = d->major;
3478 info_d->disk.minor = d->minor;
3479 info_d->disk.raid_disk = slot;
3480
3481 this->array.working_disks++;
3482
3483 info_d->events = __le32_to_cpu(mpb->generation_num);
3484 info_d->data_offset = __le32_to_cpu(map->pba_of_lba0);
3485 info_d->component_size = __le32_to_cpu(map->blocks_per_member);
3486 if (d->devname)
3487 strcpy(info_d->name, d->devname);
3488 }
3489 rest = this;
3490 }
3491
3492 return rest;
3493 }
3494
3495
3496 #ifndef MDASSEMBLE
3497 static int imsm_open_new(struct supertype *c, struct active_array *a,
3498 char *inst)
3499 {
3500 struct intel_super *super = c->sb;
3501 struct imsm_super *mpb = super->anchor;
3502
3503 if (atoi(inst) >= mpb->num_raid_devs) {
3504 fprintf(stderr, "%s: subarry index %d, out of range\n",
3505 __func__, atoi(inst));
3506 return -ENODEV;
3507 }
3508
3509 dprintf("imsm: open_new %s\n", inst);
3510 a->info.container_member = atoi(inst);
3511 return 0;
3512 }
3513
3514 static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev, int failed)
3515 {
3516 struct imsm_map *map = get_imsm_map(dev, 0);
3517
3518 if (!failed)
3519 return map->map_state == IMSM_T_STATE_UNINITIALIZED ?
3520 IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL;
3521
3522 switch (get_imsm_raid_level(map)) {
3523 case 0:
3524 return IMSM_T_STATE_FAILED;
3525 break;
3526 case 1:
3527 if (failed < map->num_members)
3528 return IMSM_T_STATE_DEGRADED;
3529 else
3530 return IMSM_T_STATE_FAILED;
3531 break;
3532 case 10:
3533 {
3534 /**
3535 * check to see if any mirrors have failed, otherwise we
3536 * are degraded. Even numbered slots are mirrored on
3537 * slot+1
3538 */
3539 int i;
3540 /* gcc -Os complains that this is unused */
3541 int insync = insync;
3542
3543 for (i = 0; i < map->num_members; i++) {
3544 __u32 ord = get_imsm_ord_tbl_ent(dev, i);
3545 int idx = ord_to_idx(ord);
3546 struct imsm_disk *disk;
3547
3548 /* reset the potential in-sync count on even-numbered
3549 * slots. num_copies is always 2 for imsm raid10
3550 */
3551 if ((i & 1) == 0)
3552 insync = 2;
3553
3554 disk = get_imsm_disk(super, idx);
3555 if (!disk || disk->status & FAILED_DISK ||
3556 ord & IMSM_ORD_REBUILD)
3557 insync--;
3558
3559 /* no in-sync disks left in this mirror the
3560 * array has failed
3561 */
3562 if (insync == 0)
3563 return IMSM_T_STATE_FAILED;
3564 }
3565
3566 return IMSM_T_STATE_DEGRADED;
3567 }
3568 case 5:
3569 if (failed < 2)
3570 return IMSM_T_STATE_DEGRADED;
3571 else
3572 return IMSM_T_STATE_FAILED;
3573 break;
3574 default:
3575 break;
3576 }
3577
3578 return map->map_state;
3579 }
3580
3581 static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev)
3582 {
3583 int i;
3584 int failed = 0;
3585 struct imsm_disk *disk;
3586 struct imsm_map *map = get_imsm_map(dev, 0);
3587 struct imsm_map *prev = get_imsm_map(dev, dev->vol.migr_state);
3588 __u32 ord;
3589 int idx;
3590
3591 /* at the beginning of migration we set IMSM_ORD_REBUILD on
3592 * disks that are being rebuilt. New failures are recorded to
3593 * map[0]. So we look through all the disks we started with and
3594 * see if any failures are still present, or if any new ones
3595 * have arrived
3596 *
3597 * FIXME add support for online capacity expansion and
3598 * raid-level-migration
3599 */
3600 for (i = 0; i < prev->num_members; i++) {
3601 ord = __le32_to_cpu(prev->disk_ord_tbl[i]);
3602 ord |= __le32_to_cpu(map->disk_ord_tbl[i]);
3603 idx = ord_to_idx(ord);
3604
3605 disk = get_imsm_disk(super, idx);
3606 if (!disk || disk->status & FAILED_DISK ||
3607 ord & IMSM_ORD_REBUILD)
3608 failed++;
3609 }
3610
3611 return failed;
3612 }
3613
3614 static int is_resyncing(struct imsm_dev *dev)
3615 {
3616 struct imsm_map *migr_map;
3617
3618 if (!dev->vol.migr_state)
3619 return 0;
3620
3621 if (migr_type(dev) == MIGR_INIT ||
3622 migr_type(dev) == MIGR_REPAIR)
3623 return 1;
3624
3625 migr_map = get_imsm_map(dev, 1);
3626
3627 if (migr_map->map_state == IMSM_T_STATE_NORMAL)
3628 return 1;
3629 else
3630 return 0;
3631 }
3632
3633 static int is_rebuilding(struct imsm_dev *dev)
3634 {
3635 struct imsm_map *migr_map;
3636
3637 if (!dev->vol.migr_state)
3638 return 0;
3639
3640 if (migr_type(dev) != MIGR_REBUILD)
3641 return 0;
3642
3643 migr_map = get_imsm_map(dev, 1);
3644
3645 if (migr_map->map_state == IMSM_T_STATE_DEGRADED)
3646 return 1;
3647 else
3648 return 0;
3649 }
3650
3651 /* return true if we recorded new information */
3652 static int mark_failure(struct imsm_dev *dev, struct imsm_disk *disk, int idx)
3653 {
3654 __u32 ord;
3655 int slot;
3656 struct imsm_map *map;
3657
3658 /* new failures are always set in map[0] */
3659 map = get_imsm_map(dev, 0);
3660
3661 slot = get_imsm_disk_slot(map, idx);
3662 if (slot < 0)
3663 return 0;
3664
3665 ord = __le32_to_cpu(map->disk_ord_tbl[slot]);
3666 if ((disk->status & FAILED_DISK) && (ord & IMSM_ORD_REBUILD))
3667 return 0;
3668
3669 disk->status |= FAILED_DISK;
3670 set_imsm_ord_tbl_ent(map, slot, idx | IMSM_ORD_REBUILD);
3671 if (~map->failed_disk_num == 0)
3672 map->failed_disk_num = slot;
3673 return 1;
3674 }
3675
3676 static void mark_missing(struct imsm_dev *dev, struct imsm_disk *disk, int idx)
3677 {
3678 mark_failure(dev, disk, idx);
3679
3680 if (disk->scsi_id == __cpu_to_le32(~(__u32)0))
3681 return;
3682
3683 disk->scsi_id = __cpu_to_le32(~(__u32)0);
3684 memmove(&disk->serial[0], &disk->serial[1], MAX_RAID_SERIAL_LEN - 1);
3685 }
3686
3687 /* Handle dirty -> clean transititions and resync. Degraded and rebuild
3688 * states are handled in imsm_set_disk() with one exception, when a
3689 * resync is stopped due to a new failure this routine will set the
3690 * 'degraded' state for the array.
3691 */
3692 static int imsm_set_array_state(struct active_array *a, int consistent)
3693 {
3694 int inst = a->info.container_member;
3695 struct intel_super *super = a->container->sb;
3696 struct imsm_dev *dev = get_imsm_dev(super, inst);
3697 struct imsm_map *map = get_imsm_map(dev, 0);
3698 int failed = imsm_count_failed(super, dev);
3699 __u8 map_state = imsm_check_degraded(super, dev, failed);
3700
3701 /* before we activate this array handle any missing disks */
3702 if (consistent == 2 && super->missing) {
3703 struct dl *dl;
3704
3705 dprintf("imsm: mark missing\n");
3706 end_migration(dev, map_state);
3707 for (dl = super->missing; dl; dl = dl->next)
3708 mark_missing(dev, &dl->disk, dl->index);
3709 super->updates_pending++;
3710 }
3711
3712 if (consistent == 2 &&
3713 (!is_resync_complete(a) ||
3714 map_state != IMSM_T_STATE_NORMAL ||
3715 dev->vol.migr_state))
3716 consistent = 0;
3717
3718 if (is_resync_complete(a)) {
3719 /* complete intialization / resync,
3720 * recovery and interrupted recovery is completed in
3721 * ->set_disk
3722 */
3723 if (is_resyncing(dev)) {
3724 dprintf("imsm: mark resync done\n");
3725 end_migration(dev, map_state);
3726 super->updates_pending++;
3727 }
3728 } else if (!is_resyncing(dev) && !failed) {
3729 /* mark the start of the init process if nothing is failed */
3730 dprintf("imsm: mark resync start (%llu)\n", a->resync_start);
3731 if (map->map_state == IMSM_T_STATE_UNINITIALIZED)
3732 migrate(dev, IMSM_T_STATE_NORMAL, MIGR_INIT);
3733 else
3734 migrate(dev, IMSM_T_STATE_NORMAL, MIGR_REPAIR);
3735 super->updates_pending++;
3736 }
3737
3738 /* FIXME check if we can update curr_migr_unit from resync_start */
3739
3740 /* mark dirty / clean */
3741 if (dev->vol.dirty != !consistent) {
3742 dprintf("imsm: mark '%s' (%llu)\n",
3743 consistent ? "clean" : "dirty", a->resync_start);
3744 if (consistent)
3745 dev->vol.dirty = 0;
3746 else
3747 dev->vol.dirty = 1;
3748 super->updates_pending++;
3749 }
3750 return consistent;
3751 }
3752
3753 static void imsm_set_disk(struct active_array *a, int n, int state)
3754 {
3755 int inst = a->info.container_member;
3756 struct intel_super *super = a->container->sb;
3757 struct imsm_dev *dev = get_imsm_dev(super, inst);
3758 struct imsm_map *map = get_imsm_map(dev, 0);
3759 struct imsm_disk *disk;
3760 int failed;
3761 __u32 ord;
3762 __u8 map_state;
3763
3764 if (n > map->num_members)
3765 fprintf(stderr, "imsm: set_disk %d out of range 0..%d\n",
3766 n, map->num_members - 1);
3767
3768 if (n < 0)
3769 return;
3770
3771 dprintf("imsm: set_disk %d:%x\n", n, state);
3772
3773 ord = get_imsm_ord_tbl_ent(dev, n);
3774 disk = get_imsm_disk(super, ord_to_idx(ord));
3775
3776 /* check for new failures */
3777 if (state & DS_FAULTY) {
3778 if (mark_failure(dev, disk, ord_to_idx(ord)))
3779 super->updates_pending++;
3780 }
3781
3782 /* check if in_sync */
3783 if (state & DS_INSYNC && ord & IMSM_ORD_REBUILD && is_rebuilding(dev)) {
3784 struct imsm_map *migr_map = get_imsm_map(dev, 1);
3785
3786 set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord));
3787 super->updates_pending++;
3788 }
3789
3790 failed = imsm_count_failed(super, dev);
3791 map_state = imsm_check_degraded(super, dev, failed);
3792
3793 /* check if recovery complete, newly degraded, or failed */
3794 if (map_state == IMSM_T_STATE_NORMAL && is_rebuilding(dev)) {
3795 end_migration(dev, map_state);
3796 map = get_imsm_map(dev, 0);
3797 map->failed_disk_num = ~0;
3798 super->updates_pending++;
3799 } else if (map_state == IMSM_T_STATE_DEGRADED &&
3800 map->map_state != map_state &&
3801 !dev->vol.migr_state) {
3802 dprintf("imsm: mark degraded\n");
3803 map->map_state = map_state;
3804 super->updates_pending++;
3805 } else if (map_state == IMSM_T_STATE_FAILED &&
3806 map->map_state != map_state) {
3807 dprintf("imsm: mark failed\n");
3808 end_migration(dev, map_state);
3809 super->updates_pending++;
3810 }
3811 }
3812
3813 static int store_imsm_mpb(int fd, struct intel_super *super)
3814 {
3815 struct imsm_super *mpb = super->anchor;
3816 __u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
3817 unsigned long long dsize;
3818 unsigned long long sectors;
3819
3820 get_dev_size(fd, NULL, &dsize);
3821
3822 if (mpb_size > 512) {
3823 /* -1 to account for anchor */
3824 sectors = mpb_sectors(mpb) - 1;
3825
3826 /* write the extended mpb to the sectors preceeding the anchor */
3827 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0)
3828 return 1;
3829
3830 if (write(fd, super->buf + 512, 512 * sectors) != 512 * sectors)
3831 return 1;
3832 }
3833
3834 /* first block is stored on second to last sector of the disk */
3835 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
3836 return 1;
3837
3838 if (write(fd, super->buf, 512) != 512)
3839 return 1;
3840
3841 return 0;
3842 }
3843
3844 static void imsm_sync_metadata(struct supertype *container)
3845 {
3846 struct intel_super *super = container->sb;
3847
3848 if (!super->updates_pending)
3849 return;
3850
3851 write_super_imsm(super, 0);
3852
3853 super->updates_pending = 0;
3854 }
3855
3856 static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a)
3857 {
3858 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
3859 int i = get_imsm_disk_idx(dev, idx);
3860 struct dl *dl;
3861
3862 for (dl = super->disks; dl; dl = dl->next)
3863 if (dl->index == i)
3864 break;
3865
3866 if (dl && dl->disk.status & FAILED_DISK)
3867 dl = NULL;
3868
3869 if (dl)
3870 dprintf("%s: found %x:%x\n", __func__, dl->major, dl->minor);
3871
3872 return dl;
3873 }
3874
3875 static struct dl *imsm_add_spare(struct intel_super *super, int slot,
3876 struct active_array *a, int activate_new)
3877 {
3878 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
3879 int idx = get_imsm_disk_idx(dev, slot);
3880 struct imsm_super *mpb = super->anchor;
3881 struct imsm_map *map;
3882 unsigned long long pos;
3883 struct mdinfo *d;
3884 struct extent *ex;
3885 int i, j;
3886 int found;
3887 __u32 array_start;
3888 __u32 array_end;
3889 struct dl *dl;
3890
3891 for (dl = super->disks; dl; dl = dl->next) {
3892 /* If in this array, skip */
3893 for (d = a->info.devs ; d ; d = d->next)
3894 if (d->state_fd >= 0 &&
3895 d->disk.major == dl->major &&
3896 d->disk.minor == dl->minor) {
3897 dprintf("%x:%x already in array\n", dl->major, dl->minor);
3898 break;
3899 }
3900 if (d)
3901 continue;
3902
3903 /* skip in use or failed drives */
3904 if (dl->disk.status & FAILED_DISK || idx == dl->index ||
3905 dl->index == -2) {
3906 dprintf("%x:%x status (failed: %d index: %d)\n",
3907 dl->major, dl->minor,
3908 (dl->disk.status & FAILED_DISK) == FAILED_DISK, idx);
3909 continue;
3910 }
3911
3912 /* skip pure spares when we are looking for partially
3913 * assimilated drives
3914 */
3915 if (dl->index == -1 && !activate_new)
3916 continue;
3917
3918 /* Does this unused device have the requisite free space?
3919 * It needs to be able to cover all member volumes
3920 */
3921 ex = get_extents(super, dl);
3922 if (!ex) {
3923 dprintf("cannot get extents\n");
3924 continue;
3925 }
3926 for (i = 0; i < mpb->num_raid_devs; i++) {
3927 dev = get_imsm_dev(super, i);
3928 map = get_imsm_map(dev, 0);
3929
3930 /* check if this disk is already a member of
3931 * this array
3932 */
3933 if (get_imsm_disk_slot(map, dl->index) >= 0)
3934 continue;
3935
3936 found = 0;
3937 j = 0;
3938 pos = 0;
3939 array_start = __le32_to_cpu(map->pba_of_lba0);
3940 array_end = array_start +
3941 __le32_to_cpu(map->blocks_per_member) - 1;
3942
3943 do {
3944 /* check that we can start at pba_of_lba0 with
3945 * blocks_per_member of space
3946 */
3947 if (array_start >= pos && array_end < ex[j].start) {
3948 found = 1;
3949 break;
3950 }
3951 pos = ex[j].start + ex[j].size;
3952 j++;
3953 } while (ex[j-1].size);
3954
3955 if (!found)
3956 break;
3957 }
3958
3959 free(ex);
3960 if (i < mpb->num_raid_devs) {
3961 dprintf("%x:%x does not have %u to %u available\n",
3962 dl->major, dl->minor, array_start, array_end);
3963 /* No room */
3964 continue;
3965 }
3966 return dl;
3967 }
3968
3969 return dl;
3970 }
3971
3972 static struct mdinfo *imsm_activate_spare(struct active_array *a,
3973 struct metadata_update **updates)
3974 {
3975 /**
3976 * Find a device with unused free space and use it to replace a
3977 * failed/vacant region in an array. We replace failed regions one a
3978 * array at a time. The result is that a new spare disk will be added
3979 * to the first failed array and after the monitor has finished
3980 * propagating failures the remainder will be consumed.
3981 *
3982 * FIXME add a capability for mdmon to request spares from another
3983 * container.
3984 */
3985
3986 struct intel_super *super = a->container->sb;
3987 int inst = a->info.container_member;
3988 struct imsm_dev *dev = get_imsm_dev(super, inst);
3989 struct imsm_map *map = get_imsm_map(dev, 0);
3990 int failed = a->info.array.raid_disks;
3991 struct mdinfo *rv = NULL;
3992 struct mdinfo *d;
3993 struct mdinfo *di;
3994 struct metadata_update *mu;
3995 struct dl *dl;
3996 struct imsm_update_activate_spare *u;
3997 int num_spares = 0;
3998 int i;
3999
4000 for (d = a->info.devs ; d ; d = d->next) {
4001 if ((d->curr_state & DS_FAULTY) &&
4002 d->state_fd >= 0)
4003 /* wait for Removal to happen */
4004 return NULL;
4005 if (d->state_fd >= 0)
4006 failed--;
4007 }
4008
4009 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
4010 inst, failed, a->info.array.raid_disks, a->info.array.level);
4011 if (imsm_check_degraded(super, dev, failed) != IMSM_T_STATE_DEGRADED)
4012 return NULL;
4013
4014 /* For each slot, if it is not working, find a spare */
4015 for (i = 0; i < a->info.array.raid_disks; i++) {
4016 for (d = a->info.devs ; d ; d = d->next)
4017 if (d->disk.raid_disk == i)
4018 break;
4019 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
4020 if (d && (d->state_fd >= 0))
4021 continue;
4022
4023 /*
4024 * OK, this device needs recovery. Try to re-add the
4025 * previous occupant of this slot, if this fails see if
4026 * we can continue the assimilation of a spare that was
4027 * partially assimilated, finally try to activate a new
4028 * spare.
4029 */
4030 dl = imsm_readd(super, i, a);
4031 if (!dl)
4032 dl = imsm_add_spare(super, i, a, 0);
4033 if (!dl)
4034 dl = imsm_add_spare(super, i, a, 1);
4035 if (!dl)
4036 continue;
4037
4038 /* found a usable disk with enough space */
4039 di = malloc(sizeof(*di));
4040 if (!di)
4041 continue;
4042 memset(di, 0, sizeof(*di));
4043
4044 /* dl->index will be -1 in the case we are activating a
4045 * pristine spare. imsm_process_update() will create a
4046 * new index in this case. Once a disk is found to be
4047 * failed in all member arrays it is kicked from the
4048 * metadata
4049 */
4050 di->disk.number = dl->index;
4051
4052 /* (ab)use di->devs to store a pointer to the device
4053 * we chose
4054 */
4055 di->devs = (struct mdinfo *) dl;
4056
4057 di->disk.raid_disk = i;
4058 di->disk.major = dl->major;
4059 di->disk.minor = dl->minor;
4060 di->disk.state = 0;
4061 di->data_offset = __le32_to_cpu(map->pba_of_lba0);
4062 di->component_size = a->info.component_size;
4063 di->container_member = inst;
4064 super->random = random32();
4065 di->next = rv;
4066 rv = di;
4067 num_spares++;
4068 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
4069 i, di->data_offset);
4070
4071 break;
4072 }
4073
4074 if (!rv)
4075 /* No spares found */
4076 return rv;
4077 /* Now 'rv' has a list of devices to return.
4078 * Create a metadata_update record to update the
4079 * disk_ord_tbl for the array
4080 */
4081 mu = malloc(sizeof(*mu));
4082 if (mu) {
4083 mu->buf = malloc(sizeof(struct imsm_update_activate_spare) * num_spares);
4084 if (mu->buf == NULL) {
4085 free(mu);
4086 mu = NULL;
4087 }
4088 }
4089 if (!mu) {
4090 while (rv) {
4091 struct mdinfo *n = rv->next;
4092
4093 free(rv);
4094 rv = n;
4095 }
4096 return NULL;
4097 }
4098
4099 mu->space = NULL;
4100 mu->len = sizeof(struct imsm_update_activate_spare) * num_spares;
4101 mu->next = *updates;
4102 u = (struct imsm_update_activate_spare *) mu->buf;
4103
4104 for (di = rv ; di ; di = di->next) {
4105 u->type = update_activate_spare;
4106 u->dl = (struct dl *) di->devs;
4107 di->devs = NULL;
4108 u->slot = di->disk.raid_disk;
4109 u->array = inst;
4110 u->next = u + 1;
4111 u++;
4112 }
4113 (u-1)->next = NULL;
4114 *updates = mu;
4115
4116 return rv;
4117 }
4118
4119 static int disks_overlap(struct intel_super *super, int idx, struct imsm_update_create_array *u)
4120 {
4121 struct imsm_dev *dev = get_imsm_dev(super, idx);
4122 struct imsm_map *map = get_imsm_map(dev, 0);
4123 struct imsm_map *new_map = get_imsm_map(&u->dev, 0);
4124 struct disk_info *inf = get_disk_info(u);
4125 struct imsm_disk *disk;
4126 int i;
4127 int j;
4128
4129 for (i = 0; i < map->num_members; i++) {
4130 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i));
4131 for (j = 0; j < new_map->num_members; j++)
4132 if (serialcmp(disk->serial, inf[j].serial) == 0)
4133 return 1;
4134 }
4135
4136 return 0;
4137 }
4138
4139 static void imsm_delete(struct intel_super *super, struct dl **dlp, int index);
4140
4141 static void imsm_process_update(struct supertype *st,
4142 struct metadata_update *update)
4143 {
4144 /**
4145 * crack open the metadata_update envelope to find the update record
4146 * update can be one of:
4147 * update_activate_spare - a spare device has replaced a failed
4148 * device in an array, update the disk_ord_tbl. If this disk is
4149 * present in all member arrays then also clear the SPARE_DISK
4150 * flag
4151 */
4152 struct intel_super *super = st->sb;
4153 struct imsm_super *mpb;
4154 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
4155
4156 /* update requires a larger buf but the allocation failed */
4157 if (super->next_len && !super->next_buf) {
4158 super->next_len = 0;
4159 return;
4160 }
4161
4162 if (super->next_buf) {
4163 memcpy(super->next_buf, super->buf, super->len);
4164 free(super->buf);
4165 super->len = super->next_len;
4166 super->buf = super->next_buf;
4167
4168 super->next_len = 0;
4169 super->next_buf = NULL;
4170 }
4171
4172 mpb = super->anchor;
4173
4174 switch (type) {
4175 case update_activate_spare: {
4176 struct imsm_update_activate_spare *u = (void *) update->buf;
4177 struct imsm_dev *dev = get_imsm_dev(super, u->array);
4178 struct imsm_map *map = get_imsm_map(dev, 0);
4179 struct imsm_map *migr_map;
4180 struct active_array *a;
4181 struct imsm_disk *disk;
4182 __u8 to_state;
4183 struct dl *dl;
4184 unsigned int found;
4185 int failed;
4186 int victim = get_imsm_disk_idx(dev, u->slot);
4187 int i;
4188
4189 for (dl = super->disks; dl; dl = dl->next)
4190 if (dl == u->dl)
4191 break;
4192
4193 if (!dl) {
4194 fprintf(stderr, "error: imsm_activate_spare passed "
4195 "an unknown disk (index: %d)\n",
4196 u->dl->index);
4197 return;
4198 }
4199
4200 super->updates_pending++;
4201
4202 /* count failures (excluding rebuilds and the victim)
4203 * to determine map[0] state
4204 */
4205 failed = 0;
4206 for (i = 0; i < map->num_members; i++) {
4207 if (i == u->slot)
4208 continue;
4209 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i));
4210 if (!disk || disk->status & FAILED_DISK)
4211 failed++;
4212 }
4213
4214 /* adding a pristine spare, assign a new index */
4215 if (dl->index < 0) {
4216 dl->index = super->anchor->num_disks;
4217 super->anchor->num_disks++;
4218 }
4219 disk = &dl->disk;
4220 disk->status |= CONFIGURED_DISK;
4221 disk->status &= ~SPARE_DISK;
4222
4223 /* mark rebuild */
4224 to_state = imsm_check_degraded(super, dev, failed);
4225 map->map_state = IMSM_T_STATE_DEGRADED;
4226 migrate(dev, to_state, MIGR_REBUILD);
4227 migr_map = get_imsm_map(dev, 1);
4228 set_imsm_ord_tbl_ent(map, u->slot, dl->index);
4229 set_imsm_ord_tbl_ent(migr_map, u->slot, dl->index | IMSM_ORD_REBUILD);
4230
4231 /* update the family_num to mark a new container
4232 * generation, being careful to record the existing
4233 * family_num in orig_family_num to clean up after
4234 * earlier mdadm versions that neglected to set it.
4235 */
4236 if (mpb->orig_family_num == 0)
4237 mpb->orig_family_num = mpb->family_num;
4238 mpb->family_num += super->random;
4239
4240 /* count arrays using the victim in the metadata */
4241 found = 0;
4242 for (a = st->arrays; a ; a = a->next) {
4243 dev = get_imsm_dev(super, a->info.container_member);
4244 map = get_imsm_map(dev, 0);
4245
4246 if (get_imsm_disk_slot(map, victim) >= 0)
4247 found++;
4248 }
4249
4250 /* delete the victim if it is no longer being
4251 * utilized anywhere
4252 */
4253 if (!found) {
4254 struct dl **dlp;
4255
4256 /* We know that 'manager' isn't touching anything,
4257 * so it is safe to delete
4258 */
4259 for (dlp = &super->disks; *dlp; dlp = &(*dlp)->next)
4260 if ((*dlp)->index == victim)
4261 break;
4262
4263 /* victim may be on the missing list */
4264 if (!*dlp)
4265 for (dlp = &super->missing; *dlp; dlp = &(*dlp)->next)
4266 if ((*dlp)->index == victim)
4267 break;
4268 imsm_delete(super, dlp, victim);
4269 }
4270 break;
4271 }
4272 case update_create_array: {
4273 /* someone wants to create a new array, we need to be aware of
4274 * a few races/collisions:
4275 * 1/ 'Create' called by two separate instances of mdadm
4276 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
4277 * devices that have since been assimilated via
4278 * activate_spare.
4279 * In the event this update can not be carried out mdadm will
4280 * (FIX ME) notice that its update did not take hold.
4281 */
4282 struct imsm_update_create_array *u = (void *) update->buf;
4283 struct intel_dev *dv;
4284 struct imsm_dev *dev;
4285 struct imsm_map *map, *new_map;
4286 unsigned long long start, end;
4287 unsigned long long new_start, new_end;
4288 int i;
4289 struct disk_info *inf;
4290 struct dl *dl;
4291
4292 /* handle racing creates: first come first serve */
4293 if (u->dev_idx < mpb->num_raid_devs) {
4294 dprintf("%s: subarray %d already defined\n",
4295 __func__, u->dev_idx);
4296 goto create_error;
4297 }
4298
4299 /* check update is next in sequence */
4300 if (u->dev_idx != mpb->num_raid_devs) {
4301 dprintf("%s: can not create array %d expected index %d\n",
4302 __func__, u->dev_idx, mpb->num_raid_devs);
4303 goto create_error;
4304 }
4305
4306 new_map = get_imsm_map(&u->dev, 0);
4307 new_start = __le32_to_cpu(new_map->pba_of_lba0);
4308 new_end = new_start + __le32_to_cpu(new_map->blocks_per_member);
4309 inf = get_disk_info(u);
4310
4311 /* handle activate_spare versus create race:
4312 * check to make sure that overlapping arrays do not include
4313 * overalpping disks
4314 */
4315 for (i = 0; i < mpb->num_raid_devs; i++) {
4316 dev = get_imsm_dev(super, i);
4317 map = get_imsm_map(dev, 0);
4318 start = __le32_to_cpu(map->pba_of_lba0);
4319 end = start + __le32_to_cpu(map->blocks_per_member);
4320 if ((new_start >= start && new_start <= end) ||
4321 (start >= new_start && start <= new_end))
4322 /* overlap */;
4323 else
4324 continue;
4325
4326 if (disks_overlap(super, i, u)) {
4327 dprintf("%s: arrays overlap\n", __func__);
4328 goto create_error;
4329 }
4330 }
4331
4332 /* check that prepare update was successful */
4333 if (!update->space) {
4334 dprintf("%s: prepare update failed\n", __func__);
4335 goto create_error;
4336 }
4337
4338 /* check that all disks are still active before committing
4339 * changes. FIXME: could we instead handle this by creating a
4340 * degraded array? That's probably not what the user expects,
4341 * so better to drop this update on the floor.
4342 */
4343 for (i = 0; i < new_map->num_members; i++) {
4344 dl = serial_to_dl(inf[i].serial, super);
4345 if (!dl) {
4346 dprintf("%s: disk disappeared\n", __func__);
4347 goto create_error;
4348 }
4349 }
4350
4351 super->updates_pending++;
4352
4353 /* convert spares to members and fixup ord_tbl */
4354 for (i = 0; i < new_map->num_members; i++) {
4355 dl = serial_to_dl(inf[i].serial, super);
4356 if (dl->index == -1) {
4357 dl->index = mpb->num_disks;
4358 mpb->num_disks++;
4359 dl->disk.status |= CONFIGURED_DISK;
4360 dl->disk.status &= ~SPARE_DISK;
4361 }
4362 set_imsm_ord_tbl_ent(new_map, i, dl->index);
4363 }
4364
4365 dv = update->space;
4366 dev = dv->dev;
4367 update->space = NULL;
4368 imsm_copy_dev(dev, &u->dev);
4369 dv->index = u->dev_idx;
4370 dv->next = super->devlist;
4371 super->devlist = dv;
4372 mpb->num_raid_devs++;
4373
4374 imsm_update_version_info(super);
4375 break;
4376 create_error:
4377 /* mdmon knows how to release update->space, but not
4378 * ((struct intel_dev *) update->space)->dev
4379 */
4380 if (update->space) {
4381 dv = update->space;
4382 free(dv->dev);
4383 }
4384 break;
4385 }
4386 case update_add_disk:
4387
4388 /* we may be able to repair some arrays if disks are
4389 * being added */
4390 if (super->add) {
4391 struct active_array *a;
4392
4393 super->updates_pending++;
4394 for (a = st->arrays; a; a = a->next)
4395 a->check_degraded = 1;
4396 }
4397 /* add some spares to the metadata */
4398 while (super->add) {
4399 struct dl *al;
4400
4401 al = super->add;
4402 super->add = al->next;
4403 al->next = super->disks;
4404 super->disks = al;
4405 dprintf("%s: added %x:%x\n",
4406 __func__, al->major, al->minor);
4407 }
4408
4409 break;
4410 }
4411 }
4412
4413 static void imsm_prepare_update(struct supertype *st,
4414 struct metadata_update *update)
4415 {
4416 /**
4417 * Allocate space to hold new disk entries, raid-device entries or a new
4418 * mpb if necessary. The manager synchronously waits for updates to
4419 * complete in the monitor, so new mpb buffers allocated here can be
4420 * integrated by the monitor thread without worrying about live pointers
4421 * in the manager thread.
4422 */
4423 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
4424 struct intel_super *super = st->sb;
4425 struct imsm_super *mpb = super->anchor;
4426 size_t buf_len;
4427 size_t len = 0;
4428
4429 switch (type) {
4430 case update_create_array: {
4431 struct imsm_update_create_array *u = (void *) update->buf;
4432 struct intel_dev *dv;
4433 struct imsm_dev *dev = &u->dev;
4434 struct imsm_map *map = get_imsm_map(dev, 0);
4435 struct dl *dl;
4436 struct disk_info *inf;
4437 int i;
4438 int activate = 0;
4439
4440 inf = get_disk_info(u);
4441 len = sizeof_imsm_dev(dev, 1);
4442 /* allocate a new super->devlist entry */
4443 dv = malloc(sizeof(*dv));
4444 if (dv) {
4445 dv->dev = malloc(len);
4446 if (dv->dev)
4447 update->space = dv;
4448 else {
4449 free(dv);
4450 update->space = NULL;
4451 }
4452 }
4453
4454 /* count how many spares will be converted to members */
4455 for (i = 0; i < map->num_members; i++) {
4456 dl = serial_to_dl(inf[i].serial, super);
4457 if (!dl) {
4458 /* hmm maybe it failed?, nothing we can do about
4459 * it here
4460 */
4461 continue;
4462 }
4463 if (count_memberships(dl, super) == 0)
4464 activate++;
4465 }
4466 len += activate * sizeof(struct imsm_disk);
4467 break;
4468 default:
4469 break;
4470 }
4471 }
4472
4473 /* check if we need a larger metadata buffer */
4474 if (super->next_buf)
4475 buf_len = super->next_len;
4476 else
4477 buf_len = super->len;
4478
4479 if (__le32_to_cpu(mpb->mpb_size) + len > buf_len) {
4480 /* ok we need a larger buf than what is currently allocated
4481 * if this allocation fails process_update will notice that
4482 * ->next_len is set and ->next_buf is NULL
4483 */
4484 buf_len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + len, 512);
4485 if (super->next_buf)
4486 free(super->next_buf);
4487
4488 super->next_len = buf_len;
4489 if (posix_memalign(&super->next_buf, 512, buf_len) == 0)
4490 memset(super->next_buf, 0, buf_len);
4491 else
4492 super->next_buf = NULL;
4493 }
4494 }
4495
4496 /* must be called while manager is quiesced */
4497 static void imsm_delete(struct intel_super *super, struct dl **dlp, int index)
4498 {
4499 struct imsm_super *mpb = super->anchor;
4500 struct dl *iter;
4501 struct imsm_dev *dev;
4502 struct imsm_map *map;
4503 int i, j, num_members;
4504 __u32 ord;
4505
4506 dprintf("%s: deleting device[%d] from imsm_super\n",
4507 __func__, index);
4508
4509 /* shift all indexes down one */
4510 for (iter = super->disks; iter; iter = iter->next)
4511 if (iter->index > index)
4512 iter->index--;
4513 for (iter = super->missing; iter; iter = iter->next)
4514 if (iter->index > index)
4515 iter->index--;
4516
4517 for (i = 0; i < mpb->num_raid_devs; i++) {
4518 dev = get_imsm_dev(super, i);
4519 map = get_imsm_map(dev, 0);
4520 num_members = map->num_members;
4521 for (j = 0; j < num_members; j++) {
4522 /* update ord entries being careful not to propagate
4523 * ord-flags to the first map
4524 */
4525 ord = get_imsm_ord_tbl_ent(dev, j);
4526
4527 if (ord_to_idx(ord) <= index)
4528 continue;
4529
4530 map = get_imsm_map(dev, 0);
4531 set_imsm_ord_tbl_ent(map, j, ord_to_idx(ord - 1));
4532 map = get_imsm_map(dev, 1);
4533 if (map)
4534 set_imsm_ord_tbl_ent(map, j, ord - 1);
4535 }
4536 }
4537
4538 mpb->num_disks--;
4539 super->updates_pending++;
4540 if (*dlp) {
4541 struct dl *dl = *dlp;
4542
4543 *dlp = (*dlp)->next;
4544 __free_imsm_disk(dl);
4545 }
4546 }
4547 #endif /* MDASSEMBLE */
4548
4549 struct superswitch super_imsm = {
4550 #ifndef MDASSEMBLE
4551 .examine_super = examine_super_imsm,
4552 .brief_examine_super = brief_examine_super_imsm,
4553 .export_examine_super = export_examine_super_imsm,
4554 .detail_super = detail_super_imsm,
4555 .brief_detail_super = brief_detail_super_imsm,
4556 .write_init_super = write_init_super_imsm,
4557 .validate_geometry = validate_geometry_imsm,
4558 .add_to_super = add_to_super_imsm,
4559 .detail_platform = detail_platform_imsm,
4560 #endif
4561 .match_home = match_home_imsm,
4562 .uuid_from_super= uuid_from_super_imsm,
4563 .getinfo_super = getinfo_super_imsm,
4564 .update_super = update_super_imsm,
4565
4566 .avail_size = avail_size_imsm,
4567
4568 .compare_super = compare_super_imsm,
4569
4570 .load_super = load_super_imsm,
4571 .init_super = init_super_imsm,
4572 .store_super = store_zero_imsm,
4573 .free_super = free_super_imsm,
4574 .match_metadata_desc = match_metadata_desc_imsm,
4575 .container_content = container_content_imsm,
4576 .default_layout = imsm_level_to_layout,
4577
4578 .external = 1,
4579 .name = "imsm",
4580
4581 #ifndef MDASSEMBLE
4582 /* for mdmon */
4583 .open_new = imsm_open_new,
4584 .load_super = load_super_imsm,
4585 .set_array_state= imsm_set_array_state,
4586 .set_disk = imsm_set_disk,
4587 .sync_metadata = imsm_sync_metadata,
4588 .activate_spare = imsm_activate_spare,
4589 .process_update = imsm_process_update,
4590 .prepare_update = imsm_prepare_update,
4591 #endif /* MDASSEMBLE */
4592 };