2 * mdadm - Intel(R) Matrix Storage Manager Support
4 * Copyright (C) 2002-2008 Intel Corporation
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #define HAVE_STDINT_H 1
24 #include "platform-intel.h"
30 /* MPB == Metadata Parameter Block */
31 #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
32 #define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
33 #define MPB_VERSION_RAID0 "1.0.00"
34 #define MPB_VERSION_RAID1 "1.1.00"
35 #define MPB_VERSION_MANY_VOLUMES_PER_ARRAY "1.2.00"
36 #define MPB_VERSION_3OR4_DISK_ARRAY "1.2.01"
37 #define MPB_VERSION_RAID5 "1.2.02"
38 #define MPB_VERSION_5OR6_DISK_ARRAY "1.2.04"
39 #define MPB_VERSION_CNG "1.2.06"
40 #define MPB_VERSION_ATTRIBS "1.3.00"
41 #define MAX_SIGNATURE_LENGTH 32
42 #define MAX_RAID_SERIAL_LEN 16
44 #define MPB_ATTRIB_CHECKSUM_VERIFY __cpu_to_le32(0x80000000)
45 #define MPB_ATTRIB_PM __cpu_to_le32(0x40000000)
46 #define MPB_ATTRIB_2TB __cpu_to_le32(0x20000000)
47 #define MPB_ATTRIB_RAID0 __cpu_to_le32(0x00000001)
48 #define MPB_ATTRIB_RAID1 __cpu_to_le32(0x00000002)
49 #define MPB_ATTRIB_RAID10 __cpu_to_le32(0x00000004)
50 #define MPB_ATTRIB_RAID1E __cpu_to_le32(0x00000008)
51 #define MPB_ATTRIB_RAID5 __cpu_to_le32(0x00000010)
52 #define MPB_ATTRIB_RAIDCNG __cpu_to_le32(0x00000020)
54 #define MPB_SECTOR_CNT 418
55 #define IMSM_RESERVED_SECTORS 4096
56 #define SECT_PER_MB_SHIFT 11
58 /* Disk configuration info. */
59 #define IMSM_MAX_DEVICES 255
61 __u8 serial
[MAX_RAID_SERIAL_LEN
];/* 0xD8 - 0xE7 ascii serial number */
62 __u32 total_blocks
; /* 0xE8 - 0xEB total blocks */
63 __u32 scsi_id
; /* 0xEC - 0xEF scsi ID */
64 #define SPARE_DISK __cpu_to_le32(0x01) /* Spare */
65 #define CONFIGURED_DISK __cpu_to_le32(0x02) /* Member of some RaidDev */
66 #define FAILED_DISK __cpu_to_le32(0x04) /* Permanent failure */
67 __u32 status
; /* 0xF0 - 0xF3 */
68 __u32 owner_cfg_num
; /* which config 0,1,2... owns this disk */
69 #define IMSM_DISK_FILLERS 4
70 __u32 filler
[IMSM_DISK_FILLERS
]; /* 0xF4 - 0x107 MPB_DISK_FILLERS for future expansion */
73 /* RAID map configuration infos. */
75 __u32 pba_of_lba0
; /* start address of partition */
76 __u32 blocks_per_member
;/* blocks per member */
77 __u32 num_data_stripes
; /* number of data stripes */
78 __u16 blocks_per_strip
;
79 __u8 map_state
; /* Normal, Uninitialized, Degraded, Failed */
80 #define IMSM_T_STATE_NORMAL 0
81 #define IMSM_T_STATE_UNINITIALIZED 1
82 #define IMSM_T_STATE_DEGRADED 2
83 #define IMSM_T_STATE_FAILED 3
85 #define IMSM_T_RAID0 0
86 #define IMSM_T_RAID1 1
87 #define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
88 __u8 num_members
; /* number of member disks */
89 __u8 num_domains
; /* number of parity domains */
90 __u8 failed_disk_num
; /* valid only when state is degraded */
92 __u32 filler
[7]; /* expansion area */
93 #define IMSM_ORD_REBUILD (1 << 24)
94 __u32 disk_ord_tbl
[1]; /* disk_ord_tbl[num_members],
95 * top byte contains some flags
97 } __attribute__ ((packed
));
100 __u32 curr_migr_unit
;
101 __u32 checkpoint_id
; /* id to access curr_migr_unit */
102 __u8 migr_state
; /* Normal or Migrating */
104 #define MIGR_REBUILD 1
105 #define MIGR_VERIFY 2 /* analagous to echo check > sync_action */
106 #define MIGR_GEN_MIGR 3
107 #define MIGR_STATE_CHANGE 4
108 #define MIGR_REPAIR 5
109 __u8 migr_type
; /* Initializing, Rebuilding, ... */
111 __u8 fs_state
; /* fast-sync state for CnG (0xff == disabled) */
112 __u16 verify_errors
; /* number of mismatches */
113 __u16 bad_blocks
; /* number of bad blocks during verify */
115 struct imsm_map map
[1];
116 /* here comes another one if migr_state */
117 } __attribute__ ((packed
));
120 __u8 volume
[MAX_RAID_SERIAL_LEN
];
123 #define DEV_BOOTABLE __cpu_to_le32(0x01)
124 #define DEV_BOOT_DEVICE __cpu_to_le32(0x02)
125 #define DEV_READ_COALESCING __cpu_to_le32(0x04)
126 #define DEV_WRITE_COALESCING __cpu_to_le32(0x08)
127 #define DEV_LAST_SHUTDOWN_DIRTY __cpu_to_le32(0x10)
128 #define DEV_HIDDEN_AT_BOOT __cpu_to_le32(0x20)
129 #define DEV_CURRENTLY_HIDDEN __cpu_to_le32(0x40)
130 #define DEV_VERIFY_AND_FIX __cpu_to_le32(0x80)
131 #define DEV_MAP_STATE_UNINIT __cpu_to_le32(0x100)
132 #define DEV_NO_AUTO_RECOVERY __cpu_to_le32(0x200)
133 #define DEV_CLONE_N_GO __cpu_to_le32(0x400)
134 #define DEV_CLONE_MAN_SYNC __cpu_to_le32(0x800)
135 #define DEV_CNG_MASTER_DISK_NUM __cpu_to_le32(0x1000)
136 __u32 status
; /* Persistent RaidDev status */
137 __u32 reserved_blocks
; /* Reserved blocks at beginning of volume */
141 __u8 cng_master_disk
;
145 #define IMSM_DEV_FILLERS 10
146 __u32 filler
[IMSM_DEV_FILLERS
];
148 } __attribute__ ((packed
));
151 __u8 sig
[MAX_SIGNATURE_LENGTH
]; /* 0x00 - 0x1F */
152 __u32 check_sum
; /* 0x20 - 0x23 MPB Checksum */
153 __u32 mpb_size
; /* 0x24 - 0x27 Size of MPB */
154 __u32 family_num
; /* 0x28 - 0x2B Checksum from first time this config was written */
155 __u32 generation_num
; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
156 __u32 error_log_size
; /* 0x30 - 0x33 in bytes */
157 __u32 attributes
; /* 0x34 - 0x37 */
158 __u8 num_disks
; /* 0x38 Number of configured disks */
159 __u8 num_raid_devs
; /* 0x39 Number of configured volumes */
160 __u8 error_log_pos
; /* 0x3A */
161 __u8 fill
[1]; /* 0x3B */
162 __u32 cache_size
; /* 0x3c - 0x40 in mb */
163 __u32 orig_family_num
; /* 0x40 - 0x43 original family num */
164 __u32 pwr_cycle_count
; /* 0x44 - 0x47 simulated power cycle count for array */
165 __u32 bbm_log_size
; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
166 #define IMSM_FILLERS 35
167 __u32 filler
[IMSM_FILLERS
]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */
168 struct imsm_disk disk
[1]; /* 0xD8 diskTbl[numDisks] */
169 /* here comes imsm_dev[num_raid_devs] */
170 /* here comes BBM logs */
171 } __attribute__ ((packed
));
173 #define BBM_LOG_MAX_ENTRIES 254
175 struct bbm_log_entry
{
176 __u64 defective_block_start
;
177 #define UNREADABLE 0xFFFFFFFF
178 __u32 spare_block_offset
;
179 __u16 remapped_marked_count
;
181 } __attribute__ ((__packed__
));
184 __u32 signature
; /* 0xABADB10C */
186 __u32 reserved_spare_block_count
; /* 0 */
187 __u32 reserved
; /* 0xFFFF */
188 __u64 first_spare_lba
;
189 struct bbm_log_entry mapped_block_entries
[BBM_LOG_MAX_ENTRIES
];
190 } __attribute__ ((__packed__
));
194 static char *map_state_str
[] = { "normal", "uninitialized", "degraded", "failed" };
197 static __u8
migr_type(struct imsm_dev
*dev
)
199 if (dev
->vol
.migr_type
== MIGR_VERIFY
&&
200 dev
->status
& DEV_VERIFY_AND_FIX
)
203 return dev
->vol
.migr_type
;
206 static void set_migr_type(struct imsm_dev
*dev
, __u8 migr_type
)
208 /* for compatibility with older oroms convert MIGR_REPAIR, into
209 * MIGR_VERIFY w/ DEV_VERIFY_AND_FIX status
211 if (migr_type
== MIGR_REPAIR
) {
212 dev
->vol
.migr_type
= MIGR_VERIFY
;
213 dev
->status
|= DEV_VERIFY_AND_FIX
;
215 dev
->vol
.migr_type
= migr_type
;
216 dev
->status
&= ~DEV_VERIFY_AND_FIX
;
220 static unsigned int sector_count(__u32 bytes
)
222 return ((bytes
+ (512-1)) & (~(512-1))) / 512;
225 static unsigned int mpb_sectors(struct imsm_super
*mpb
)
227 return sector_count(__le32_to_cpu(mpb
->mpb_size
));
231 struct imsm_dev
*dev
;
232 struct intel_dev
*next
;
236 /* internal representation of IMSM metadata */
239 void *buf
; /* O_DIRECT buffer for reading/writing metadata */
240 struct imsm_super
*anchor
; /* immovable parameters */
242 size_t len
; /* size of the 'buf' allocation */
243 void *next_buf
; /* for realloc'ing buf from the manager */
245 int updates_pending
; /* count of pending updates for mdmon */
246 int creating_imsm
; /* flag to indicate container creation */
247 int current_vol
; /* index of raid device undergoing creation */
248 __u32 create_offset
; /* common start for 'current_vol' */
249 __u32 random
; /* random data for seeding new family numbers */
250 struct intel_dev
*devlist
;
254 __u8 serial
[MAX_RAID_SERIAL_LEN
];
257 struct imsm_disk disk
;
260 struct extent
*e
; /* for determining freespace @ create */
261 int raiddisk
; /* slot to fill in autolayout */
263 struct dl
*add
; /* list of disks to add while mdmon active */
264 struct dl
*missing
; /* disks removed while we weren't looking */
265 struct bbm_log
*bbm_log
;
266 const char *hba
; /* device path of the raid controller for this metadata */
267 const struct imsm_orom
*orom
; /* platform firmware support */
268 struct intel_super
*next
; /* (temp) list for disambiguating family_num */
272 struct imsm_disk disk
;
273 #define IMSM_UNKNOWN_OWNER (-1)
275 struct intel_disk
*next
;
279 unsigned long long start
, size
;
282 /* definition of messages passed to imsm_process_update */
283 enum imsm_update_type
{
284 update_activate_spare
,
289 struct imsm_update_activate_spare
{
290 enum imsm_update_type type
;
294 struct imsm_update_activate_spare
*next
;
298 __u8 serial
[MAX_RAID_SERIAL_LEN
];
301 struct imsm_update_create_array
{
302 enum imsm_update_type type
;
307 struct imsm_update_add_disk
{
308 enum imsm_update_type type
;
311 static struct supertype
*match_metadata_desc_imsm(char *arg
)
313 struct supertype
*st
;
315 if (strcmp(arg
, "imsm") != 0 &&
316 strcmp(arg
, "default") != 0
320 st
= malloc(sizeof(*st
));
321 memset(st
, 0, sizeof(*st
));
322 st
->ss
= &super_imsm
;
323 st
->max_devs
= IMSM_MAX_DEVICES
;
324 st
->minor_version
= 0;
330 static __u8
*get_imsm_version(struct imsm_super
*mpb
)
332 return &mpb
->sig
[MPB_SIG_LEN
];
336 /* retrieve a disk directly from the anchor when the anchor is known to be
337 * up-to-date, currently only at load time
339 static struct imsm_disk
*__get_imsm_disk(struct imsm_super
*mpb
, __u8 index
)
341 if (index
>= mpb
->num_disks
)
343 return &mpb
->disk
[index
];
347 /* retrieve a disk from the parsed metadata */
348 static struct imsm_disk
*get_imsm_disk(struct intel_super
*super
, __u8 index
)
352 for (d
= super
->disks
; d
; d
= d
->next
)
353 if (d
->index
== index
)
360 /* generate a checksum directly from the anchor when the anchor is known to be
361 * up-to-date, currently only at load or write_super after coalescing
363 static __u32
__gen_imsm_checksum(struct imsm_super
*mpb
)
365 __u32 end
= mpb
->mpb_size
/ sizeof(end
);
366 __u32
*p
= (__u32
*) mpb
;
370 sum
+= __le32_to_cpu(*p
);
374 return sum
- __le32_to_cpu(mpb
->check_sum
);
377 static size_t sizeof_imsm_map(struct imsm_map
*map
)
379 return sizeof(struct imsm_map
) + sizeof(__u32
) * (map
->num_members
- 1);
382 struct imsm_map
*get_imsm_map(struct imsm_dev
*dev
, int second_map
)
384 struct imsm_map
*map
= &dev
->vol
.map
[0];
386 if (second_map
&& !dev
->vol
.migr_state
)
388 else if (second_map
) {
391 return ptr
+ sizeof_imsm_map(map
);
397 /* return the size of the device.
398 * migr_state increases the returned size if map[0] were to be duplicated
400 static size_t sizeof_imsm_dev(struct imsm_dev
*dev
, int migr_state
)
402 size_t size
= sizeof(*dev
) - sizeof(struct imsm_map
) +
403 sizeof_imsm_map(get_imsm_map(dev
, 0));
405 /* migrating means an additional map */
406 if (dev
->vol
.migr_state
)
407 size
+= sizeof_imsm_map(get_imsm_map(dev
, 1));
409 size
+= sizeof_imsm_map(get_imsm_map(dev
, 0));
415 /* retrieve disk serial number list from a metadata update */
416 static struct disk_info
*get_disk_info(struct imsm_update_create_array
*update
)
419 struct disk_info
*inf
;
421 inf
= u
+ sizeof(*update
) - sizeof(struct imsm_dev
) +
422 sizeof_imsm_dev(&update
->dev
, 0);
428 static struct imsm_dev
*__get_imsm_dev(struct imsm_super
*mpb
, __u8 index
)
434 if (index
>= mpb
->num_raid_devs
)
437 /* devices start after all disks */
438 offset
= ((void *) &mpb
->disk
[mpb
->num_disks
]) - _mpb
;
440 for (i
= 0; i
<= index
; i
++)
442 return _mpb
+ offset
;
444 offset
+= sizeof_imsm_dev(_mpb
+ offset
, 0);
449 static struct imsm_dev
*get_imsm_dev(struct intel_super
*super
, __u8 index
)
451 struct intel_dev
*dv
;
453 if (index
>= super
->anchor
->num_raid_devs
)
455 for (dv
= super
->devlist
; dv
; dv
= dv
->next
)
456 if (dv
->index
== index
)
461 static __u32
get_imsm_ord_tbl_ent(struct imsm_dev
*dev
, int slot
)
463 struct imsm_map
*map
;
465 if (dev
->vol
.migr_state
)
466 map
= get_imsm_map(dev
, 1);
468 map
= get_imsm_map(dev
, 0);
470 /* top byte identifies disk under rebuild */
471 return __le32_to_cpu(map
->disk_ord_tbl
[slot
]);
474 #define ord_to_idx(ord) (((ord) << 8) >> 8)
475 static __u32
get_imsm_disk_idx(struct imsm_dev
*dev
, int slot
)
477 __u32 ord
= get_imsm_ord_tbl_ent(dev
, slot
);
479 return ord_to_idx(ord
);
482 static void set_imsm_ord_tbl_ent(struct imsm_map
*map
, int slot
, __u32 ord
)
484 map
->disk_ord_tbl
[slot
] = __cpu_to_le32(ord
);
487 static int get_imsm_disk_slot(struct imsm_map
*map
, int idx
)
492 for (slot
= 0; slot
< map
->num_members
; slot
++) {
493 ord
= __le32_to_cpu(map
->disk_ord_tbl
[slot
]);
494 if (ord_to_idx(ord
) == idx
)
501 static int get_imsm_raid_level(struct imsm_map
*map
)
503 if (map
->raid_level
== 1) {
504 if (map
->num_members
== 2)
510 return map
->raid_level
;
513 static int cmp_extent(const void *av
, const void *bv
)
515 const struct extent
*a
= av
;
516 const struct extent
*b
= bv
;
517 if (a
->start
< b
->start
)
519 if (a
->start
> b
->start
)
524 static int count_memberships(struct dl
*dl
, struct intel_super
*super
)
529 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
530 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
531 struct imsm_map
*map
= get_imsm_map(dev
, 0);
533 if (get_imsm_disk_slot(map
, dl
->index
) >= 0)
540 static struct extent
*get_extents(struct intel_super
*super
, struct dl
*dl
)
542 /* find a list of used extents on the given physical device */
543 struct extent
*rv
, *e
;
545 int memberships
= count_memberships(dl
, super
);
546 __u32 reservation
= MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
;
548 rv
= malloc(sizeof(struct extent
) * (memberships
+ 1));
553 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
554 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
555 struct imsm_map
*map
= get_imsm_map(dev
, 0);
557 if (get_imsm_disk_slot(map
, dl
->index
) >= 0) {
558 e
->start
= __le32_to_cpu(map
->pba_of_lba0
);
559 e
->size
= __le32_to_cpu(map
->blocks_per_member
);
563 qsort(rv
, memberships
, sizeof(*rv
), cmp_extent
);
565 /* determine the start of the metadata
566 * when no raid devices are defined use the default
567 * ...otherwise allow the metadata to truncate the value
568 * as is the case with older versions of imsm
571 struct extent
*last
= &rv
[memberships
- 1];
574 remainder
= __le32_to_cpu(dl
->disk
.total_blocks
) -
575 (last
->start
+ last
->size
);
576 /* round down to 1k block to satisfy precision of the kernel
580 /* make sure remainder is still sane */
581 if (remainder
< ROUND_UP(super
->len
, 512) >> 9)
582 remainder
= ROUND_UP(super
->len
, 512) >> 9;
583 if (reservation
> remainder
)
584 reservation
= remainder
;
586 e
->start
= __le32_to_cpu(dl
->disk
.total_blocks
) - reservation
;
591 /* try to determine how much space is reserved for metadata from
592 * the last get_extents() entry, otherwise fallback to the
595 static __u32
imsm_reserved_sectors(struct intel_super
*super
, struct dl
*dl
)
601 /* for spares just return a minimal reservation which will grow
602 * once the spare is picked up by an array
605 return MPB_SECTOR_CNT
;
607 e
= get_extents(super
, dl
);
609 return MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
;
611 /* scroll to last entry */
612 for (i
= 0; e
[i
].size
; i
++)
615 rv
= __le32_to_cpu(dl
->disk
.total_blocks
) - e
[i
].start
;
622 static int is_spare(struct imsm_disk
*disk
)
624 return (disk
->status
& SPARE_DISK
) == SPARE_DISK
;
627 static int is_configured(struct imsm_disk
*disk
)
629 return (disk
->status
& CONFIGURED_DISK
) == CONFIGURED_DISK
;
632 static int is_failed(struct imsm_disk
*disk
)
634 return (disk
->status
& FAILED_DISK
) == FAILED_DISK
;
638 static void print_imsm_dev(struct imsm_dev
*dev
, char *uuid
, int disk_idx
)
642 struct imsm_map
*map
= get_imsm_map(dev
, 0);
646 printf("[%.16s]:\n", dev
->volume
);
647 printf(" UUID : %s\n", uuid
);
648 printf(" RAID Level : %d\n", get_imsm_raid_level(map
));
649 printf(" Members : %d\n", map
->num_members
);
650 slot
= get_imsm_disk_slot(map
, disk_idx
);
652 ord
= get_imsm_ord_tbl_ent(dev
, slot
);
653 printf(" This Slot : %d%s\n", slot
,
654 ord
& IMSM_ORD_REBUILD
? " (out-of-sync)" : "");
656 printf(" This Slot : ?\n");
657 sz
= __le32_to_cpu(dev
->size_high
);
659 sz
+= __le32_to_cpu(dev
->size_low
);
660 printf(" Array Size : %llu%s\n", (unsigned long long)sz
,
661 human_size(sz
* 512));
662 sz
= __le32_to_cpu(map
->blocks_per_member
);
663 printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz
,
664 human_size(sz
* 512));
665 printf(" Sector Offset : %u\n",
666 __le32_to_cpu(map
->pba_of_lba0
));
667 printf(" Num Stripes : %u\n",
668 __le32_to_cpu(map
->num_data_stripes
));
669 printf(" Chunk Size : %u KiB\n",
670 __le16_to_cpu(map
->blocks_per_strip
) / 2);
671 printf(" Reserved : %d\n", __le32_to_cpu(dev
->reserved_blocks
));
672 printf(" Migrate State : %s", dev
->vol
.migr_state
? "migrating" : "idle\n");
673 if (dev
->vol
.migr_state
) {
674 if (migr_type(dev
) == MIGR_INIT
)
675 printf(": initializing\n");
676 else if (migr_type(dev
) == MIGR_REBUILD
)
677 printf(": rebuilding\n");
678 else if (migr_type(dev
) == MIGR_VERIFY
)
680 else if (migr_type(dev
) == MIGR_GEN_MIGR
)
681 printf(": general migration\n");
682 else if (migr_type(dev
) == MIGR_STATE_CHANGE
)
683 printf(": state change\n");
684 else if (migr_type(dev
) == MIGR_REPAIR
)
685 printf(": repair\n");
687 printf(": <unknown:%d>\n", migr_type(dev
));
689 printf(" Map State : %s", map_state_str
[map
->map_state
]);
690 if (dev
->vol
.migr_state
) {
691 struct imsm_map
*map
= get_imsm_map(dev
, 1);
692 printf(" <-- %s", map_state_str
[map
->map_state
]);
695 printf(" Dirty State : %s\n", dev
->vol
.dirty
? "dirty" : "clean");
698 static void print_imsm_disk(struct imsm_super
*mpb
, int index
, __u32 reserved
)
700 struct imsm_disk
*disk
= __get_imsm_disk(mpb
, index
);
701 char str
[MAX_RAID_SERIAL_LEN
+ 1];
708 snprintf(str
, MAX_RAID_SERIAL_LEN
+ 1, "%s", disk
->serial
);
709 printf(" Disk%02d Serial : %s\n", index
, str
);
710 printf(" State :%s%s%s\n", is_spare(disk
) ? " spare" : "",
711 is_configured(disk
) ? " active" : "",
712 is_failed(disk
) ? " failed" : "");
713 printf(" Id : %08x\n", __le32_to_cpu(disk
->scsi_id
));
714 sz
= __le32_to_cpu(disk
->total_blocks
) - reserved
;
715 printf(" Usable Size : %llu%s\n", (unsigned long long)sz
,
716 human_size(sz
* 512));
719 static void getinfo_super_imsm(struct supertype
*st
, struct mdinfo
*info
);
721 static void examine_super_imsm(struct supertype
*st
, char *homehost
)
723 struct intel_super
*super
= st
->sb
;
724 struct imsm_super
*mpb
= super
->anchor
;
725 char str
[MAX_SIGNATURE_LENGTH
];
730 __u32 reserved
= imsm_reserved_sectors(super
, super
->disks
);
733 snprintf(str
, MPB_SIG_LEN
, "%s", mpb
->sig
);
734 printf(" Magic : %s\n", str
);
735 snprintf(str
, strlen(MPB_VERSION_RAID0
), "%s", get_imsm_version(mpb
));
736 printf(" Version : %s\n", get_imsm_version(mpb
));
737 printf(" Orig Family : %08x\n", __le32_to_cpu(mpb
->orig_family_num
));
738 printf(" Family : %08x\n", __le32_to_cpu(mpb
->family_num
));
739 printf(" Generation : %08x\n", __le32_to_cpu(mpb
->generation_num
));
740 getinfo_super_imsm(st
, &info
);
741 fname_from_uuid(st
, &info
, nbuf
, ':');
742 printf(" UUID : %s\n", nbuf
+ 5);
743 sum
= __le32_to_cpu(mpb
->check_sum
);
744 printf(" Checksum : %08x %s\n", sum
,
745 __gen_imsm_checksum(mpb
) == sum
? "correct" : "incorrect");
746 printf(" MPB Sectors : %d\n", mpb_sectors(mpb
));
747 printf(" Disks : %d\n", mpb
->num_disks
);
748 printf(" RAID Devices : %d\n", mpb
->num_raid_devs
);
749 print_imsm_disk(mpb
, super
->disks
->index
, reserved
);
750 if (super
->bbm_log
) {
751 struct bbm_log
*log
= super
->bbm_log
;
754 printf("Bad Block Management Log:\n");
755 printf(" Log Size : %d\n", __le32_to_cpu(mpb
->bbm_log_size
));
756 printf(" Signature : %x\n", __le32_to_cpu(log
->signature
));
757 printf(" Entry Count : %d\n", __le32_to_cpu(log
->entry_count
));
758 printf(" Spare Blocks : %d\n", __le32_to_cpu(log
->reserved_spare_block_count
));
759 printf(" First Spare : %llx\n",
760 (unsigned long long) __le64_to_cpu(log
->first_spare_lba
));
762 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
764 struct imsm_dev
*dev
= __get_imsm_dev(mpb
, i
);
766 super
->current_vol
= i
;
767 getinfo_super_imsm(st
, &info
);
768 fname_from_uuid(st
, &info
, nbuf
, ':');
769 print_imsm_dev(dev
, nbuf
+ 5, super
->disks
->index
);
771 for (i
= 0; i
< mpb
->num_disks
; i
++) {
772 if (i
== super
->disks
->index
)
774 print_imsm_disk(mpb
, i
, reserved
);
778 static void brief_examine_super_imsm(struct supertype
*st
, int verbose
)
780 /* We just write a generic IMSM ARRAY entry */
783 struct intel_super
*super
= st
->sb
;
785 if (!super
->anchor
->num_raid_devs
) {
786 printf("ARRAY metadata=imsm\n");
790 getinfo_super_imsm(st
, &info
);
791 fname_from_uuid(st
, &info
, nbuf
, ':');
792 printf("ARRAY metadata=imsm UUID=%s\n", nbuf
+ 5);
795 static void brief_examine_subarrays_imsm(struct supertype
*st
, int verbose
)
797 /* We just write a generic IMSM ARRAY entry */
801 struct intel_super
*super
= st
->sb
;
804 if (!super
->anchor
->num_raid_devs
)
807 getinfo_super_imsm(st
, &info
);
808 fname_from_uuid(st
, &info
, nbuf
, ':');
809 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
810 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
812 super
->current_vol
= i
;
813 getinfo_super_imsm(st
, &info
);
814 fname_from_uuid(st
, &info
, nbuf1
, ':');
815 printf("ARRAY /dev/md/%.16s container=%s member=%d UUID=%s\n",
816 dev
->volume
, nbuf
+ 5, i
, nbuf1
+ 5);
820 static void export_examine_super_imsm(struct supertype
*st
)
822 struct intel_super
*super
= st
->sb
;
823 struct imsm_super
*mpb
= super
->anchor
;
827 getinfo_super_imsm(st
, &info
);
828 fname_from_uuid(st
, &info
, nbuf
, ':');
829 printf("MD_METADATA=imsm\n");
830 printf("MD_LEVEL=container\n");
831 printf("MD_UUID=%s\n", nbuf
+5);
832 printf("MD_DEVICES=%u\n", mpb
->num_disks
);
835 static void detail_super_imsm(struct supertype
*st
, char *homehost
)
840 getinfo_super_imsm(st
, &info
);
841 fname_from_uuid(st
, &info
, nbuf
, ':');
842 printf("\n UUID : %s\n", nbuf
+ 5);
845 static void brief_detail_super_imsm(struct supertype
*st
)
849 getinfo_super_imsm(st
, &info
);
850 fname_from_uuid(st
, &info
, nbuf
, ':');
851 printf(" UUID=%s", nbuf
+ 5);
854 static int imsm_read_serial(int fd
, char *devname
, __u8
*serial
);
855 static void fd2devname(int fd
, char *name
);
857 static int imsm_enumerate_ports(const char *hba_path
, int port_count
, int host_base
, int verbose
)
859 /* dump an unsorted list of devices attached to ahci, as well as
860 * non-connected ports
862 int hba_len
= strlen(hba_path
) + 1;
867 unsigned long port_mask
= (1 << port_count
) - 1;
869 if (port_count
> sizeof(port_mask
) * 8) {
871 fprintf(stderr
, Name
": port_count %d out of range\n", port_count
);
875 /* scroll through /sys/dev/block looking for devices attached to
878 dir
= opendir("/sys/dev/block");
879 for (ent
= dir
? readdir(dir
) : NULL
; ent
; ent
= readdir(dir
)) {
890 if (sscanf(ent
->d_name
, "%d:%d", &major
, &minor
) != 2)
892 path
= devt_to_devpath(makedev(major
, minor
));
895 if (!path_attached_to_hba(path
, hba_path
)) {
901 /* retrieve the scsi device type */
902 if (asprintf(&device
, "/sys/dev/block/%d:%d/device/xxxxxxx", major
, minor
) < 0) {
904 fprintf(stderr
, Name
": failed to allocate 'device'\n");
908 sprintf(device
, "/sys/dev/block/%d:%d/device/type", major
, minor
);
909 if (load_sys(device
, buf
) != 0) {
911 fprintf(stderr
, Name
": failed to read device type for %s\n",
917 type
= strtoul(buf
, NULL
, 10);
919 /* if it's not a disk print the vendor and model */
920 if (!(type
== 0 || type
== 7 || type
== 14)) {
923 sprintf(device
, "/sys/dev/block/%d:%d/device/vendor", major
, minor
);
924 if (load_sys(device
, buf
) == 0) {
925 strncpy(vendor
, buf
, sizeof(vendor
));
926 vendor
[sizeof(vendor
) - 1] = '\0';
927 c
= (char *) &vendor
[sizeof(vendor
) - 1];
928 while (isspace(*c
) || *c
== '\0')
932 sprintf(device
, "/sys/dev/block/%d:%d/device/model", major
, minor
);
933 if (load_sys(device
, buf
) == 0) {
934 strncpy(model
, buf
, sizeof(model
));
935 model
[sizeof(model
) - 1] = '\0';
936 c
= (char *) &model
[sizeof(model
) - 1];
937 while (isspace(*c
) || *c
== '\0')
941 if (vendor
[0] && model
[0])
942 sprintf(buf
, "%.64s %.64s", vendor
, model
);
944 switch (type
) { /* numbers from hald/linux/device.c */
945 case 1: sprintf(buf
, "tape"); break;
946 case 2: sprintf(buf
, "printer"); break;
947 case 3: sprintf(buf
, "processor"); break;
949 case 5: sprintf(buf
, "cdrom"); break;
950 case 6: sprintf(buf
, "scanner"); break;
951 case 8: sprintf(buf
, "media_changer"); break;
952 case 9: sprintf(buf
, "comm"); break;
953 case 12: sprintf(buf
, "raid"); break;
954 default: sprintf(buf
, "unknown");
960 /* chop device path to 'host%d' and calculate the port number */
961 c
= strchr(&path
[hba_len
], '/');
963 if (sscanf(&path
[hba_len
], "host%d", &port
) == 1)
967 *c
= '/'; /* repair the full string */
968 fprintf(stderr
, Name
": failed to determine port number for %s\n",
975 /* mark this port as used */
976 port_mask
&= ~(1 << port
);
978 /* print out the device information */
980 printf(" Port%d : - non-disk device (%s) -\n", port
, buf
);
984 fd
= dev_open(ent
->d_name
, O_RDONLY
);
986 printf(" Port%d : - disk info unavailable -\n", port
);
989 printf(" Port%d : %s", port
, buf
);
990 if (imsm_read_serial(fd
, NULL
, (__u8
*) buf
) == 0)
991 printf(" (%s)\n", buf
);
1006 for (i
= 0; i
< port_count
; i
++)
1007 if (port_mask
& (1 << i
))
1008 printf(" Port%d : - no device attached -\n", i
);
1014 static int detail_platform_imsm(int verbose
, int enumerate_only
)
1016 /* There are two components to imsm platform support, the ahci SATA
1017 * controller and the option-rom. To find the SATA controller we
1018 * simply look in /sys/bus/pci/drivers/ahci to see if an ahci
1019 * controller with the Intel vendor id is present. This approach
1020 * allows mdadm to leverage the kernel's ahci detection logic, with the
1021 * caveat that if ahci.ko is not loaded mdadm will not be able to
1022 * detect platform raid capabilities. The option-rom resides in a
1023 * platform "Adapter ROM". We scan for its signature to retrieve the
1024 * platform capabilities. If raid support is disabled in the BIOS the
1025 * option-rom capability structure will not be available.
1027 const struct imsm_orom
*orom
;
1028 struct sys_dev
*list
, *hba
;
1031 const char *hba_path
;
1035 if (enumerate_only
) {
1036 if (check_env("IMSM_NO_PLATFORM") || find_imsm_orom())
1041 list
= find_driver_devices("pci", "ahci");
1042 for (hba
= list
; hba
; hba
= hba
->next
)
1043 if (devpath_to_vendor(hba
->path
) == 0x8086)
1048 fprintf(stderr
, Name
": unable to find active ahci controller\n");
1049 free_sys_dev(&list
);
1052 fprintf(stderr
, Name
": found Intel SATA AHCI Controller\n");
1053 hba_path
= hba
->path
;
1055 free_sys_dev(&list
);
1057 orom
= find_imsm_orom();
1060 fprintf(stderr
, Name
": imsm option-rom not found\n");
1064 printf(" Platform : Intel(R) Matrix Storage Manager\n");
1065 printf(" Version : %d.%d.%d.%d\n", orom
->major_ver
, orom
->minor_ver
,
1066 orom
->hotfix_ver
, orom
->build
);
1067 printf(" RAID Levels :%s%s%s%s%s\n",
1068 imsm_orom_has_raid0(orom
) ? " raid0" : "",
1069 imsm_orom_has_raid1(orom
) ? " raid1" : "",
1070 imsm_orom_has_raid1e(orom
) ? " raid1e" : "",
1071 imsm_orom_has_raid10(orom
) ? " raid10" : "",
1072 imsm_orom_has_raid5(orom
) ? " raid5" : "");
1073 printf(" Chunk Sizes :%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
1074 imsm_orom_has_chunk(orom
, 2) ? " 2k" : "",
1075 imsm_orom_has_chunk(orom
, 4) ? " 4k" : "",
1076 imsm_orom_has_chunk(orom
, 8) ? " 8k" : "",
1077 imsm_orom_has_chunk(orom
, 16) ? " 16k" : "",
1078 imsm_orom_has_chunk(orom
, 32) ? " 32k" : "",
1079 imsm_orom_has_chunk(orom
, 64) ? " 64k" : "",
1080 imsm_orom_has_chunk(orom
, 128) ? " 128k" : "",
1081 imsm_orom_has_chunk(orom
, 256) ? " 256k" : "",
1082 imsm_orom_has_chunk(orom
, 512) ? " 512k" : "",
1083 imsm_orom_has_chunk(orom
, 1024*1) ? " 1M" : "",
1084 imsm_orom_has_chunk(orom
, 1024*2) ? " 2M" : "",
1085 imsm_orom_has_chunk(orom
, 1024*4) ? " 4M" : "",
1086 imsm_orom_has_chunk(orom
, 1024*8) ? " 8M" : "",
1087 imsm_orom_has_chunk(orom
, 1024*16) ? " 16M" : "",
1088 imsm_orom_has_chunk(orom
, 1024*32) ? " 32M" : "",
1089 imsm_orom_has_chunk(orom
, 1024*64) ? " 64M" : "");
1090 printf(" Max Disks : %d\n", orom
->tds
);
1091 printf(" Max Volumes : %d\n", orom
->vpa
);
1092 printf(" I/O Controller : %s\n", hba_path
);
1094 /* find the smallest scsi host number to determine a port number base */
1095 dir
= opendir(hba_path
);
1096 for (ent
= dir
? readdir(dir
) : NULL
; ent
; ent
= readdir(dir
)) {
1099 if (sscanf(ent
->d_name
, "host%d", &host
) != 1)
1101 if (port_count
== 0)
1103 else if (host
< host_base
)
1106 if (host
+ 1 > port_count
+ host_base
)
1107 port_count
= host
+ 1 - host_base
;
1113 if (!port_count
|| imsm_enumerate_ports(hba_path
, port_count
,
1114 host_base
, verbose
) != 0) {
1116 fprintf(stderr
, Name
": failed to enumerate ports\n");
1124 static int match_home_imsm(struct supertype
*st
, char *homehost
)
1126 /* the imsm metadata format does not specify any host
1127 * identification information. We return -1 since we can never
1128 * confirm nor deny whether a given array is "meant" for this
1129 * host. We rely on compare_super and the 'family_num' fields to
1130 * exclude member disks that do not belong, and we rely on
1131 * mdadm.conf to specify the arrays that should be assembled.
1132 * Auto-assembly may still pick up "foreign" arrays.
1138 static void uuid_from_super_imsm(struct supertype
*st
, int uuid
[4])
1140 /* The uuid returned here is used for:
1141 * uuid to put into bitmap file (Create, Grow)
1142 * uuid for backup header when saving critical section (Grow)
1143 * comparing uuids when re-adding a device into an array
1144 * In these cases the uuid required is that of the data-array,
1145 * not the device-set.
1146 * uuid to recognise same set when adding a missing device back
1147 * to an array. This is a uuid for the device-set.
1149 * For each of these we can make do with a truncated
1150 * or hashed uuid rather than the original, as long as
1152 * In each case the uuid required is that of the data-array,
1153 * not the device-set.
1155 /* imsm does not track uuid's so we synthesis one using sha1 on
1156 * - The signature (Which is constant for all imsm array, but no matter)
1157 * - the orig_family_num of the container
1158 * - the index number of the volume
1159 * - the 'serial' number of the volume.
1160 * Hopefully these are all constant.
1162 struct intel_super
*super
= st
->sb
;
1165 struct sha1_ctx ctx
;
1166 struct imsm_dev
*dev
= NULL
;
1169 /* some mdadm versions failed to set ->orig_family_num, in which
1170 * case fall back to ->family_num. orig_family_num will be
1171 * fixed up with the first metadata update.
1173 family_num
= super
->anchor
->orig_family_num
;
1174 if (family_num
== 0)
1175 family_num
= super
->anchor
->family_num
;
1176 sha1_init_ctx(&ctx
);
1177 sha1_process_bytes(super
->anchor
->sig
, MPB_SIG_LEN
, &ctx
);
1178 sha1_process_bytes(&family_num
, sizeof(__u32
), &ctx
);
1179 if (super
->current_vol
>= 0)
1180 dev
= get_imsm_dev(super
, super
->current_vol
);
1182 __u32 vol
= super
->current_vol
;
1183 sha1_process_bytes(&vol
, sizeof(vol
), &ctx
);
1184 sha1_process_bytes(dev
->volume
, MAX_RAID_SERIAL_LEN
, &ctx
);
1186 sha1_finish_ctx(&ctx
, buf
);
1187 memcpy(uuid
, buf
, 4*4);
1192 get_imsm_numerical_version(struct imsm_super
*mpb
, int *m
, int *p
)
1194 __u8
*v
= get_imsm_version(mpb
);
1195 __u8
*end
= mpb
->sig
+ MAX_SIGNATURE_LENGTH
;
1196 char major
[] = { 0, 0, 0 };
1197 char minor
[] = { 0 ,0, 0 };
1198 char patch
[] = { 0, 0, 0 };
1199 char *ver_parse
[] = { major
, minor
, patch
};
1203 while (*v
!= '\0' && v
< end
) {
1204 if (*v
!= '.' && j
< 2)
1205 ver_parse
[i
][j
++] = *v
;
1213 *m
= strtol(minor
, NULL
, 0);
1214 *p
= strtol(patch
, NULL
, 0);
1218 static int imsm_level_to_layout(int level
)
1226 return ALGORITHM_LEFT_ASYMMETRIC
;
1233 static void getinfo_super_imsm_volume(struct supertype
*st
, struct mdinfo
*info
)
1235 struct intel_super
*super
= st
->sb
;
1236 struct imsm_dev
*dev
= get_imsm_dev(super
, super
->current_vol
);
1237 struct imsm_map
*map
= get_imsm_map(dev
, 0);
1240 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
1241 if (dl
->raiddisk
== info
->disk
.raid_disk
)
1243 info
->container_member
= super
->current_vol
;
1244 info
->array
.raid_disks
= map
->num_members
;
1245 info
->array
.level
= get_imsm_raid_level(map
);
1246 info
->array
.layout
= imsm_level_to_layout(info
->array
.level
);
1247 info
->array
.md_minor
= -1;
1248 info
->array
.ctime
= 0;
1249 info
->array
.utime
= 0;
1250 info
->array
.chunk_size
= __le16_to_cpu(map
->blocks_per_strip
) << 9;
1251 info
->array
.state
= !dev
->vol
.dirty
;
1252 info
->custom_array_size
= __le32_to_cpu(dev
->size_high
);
1253 info
->custom_array_size
<<= 32;
1254 info
->custom_array_size
|= __le32_to_cpu(dev
->size_low
);
1256 info
->disk
.major
= 0;
1257 info
->disk
.minor
= 0;
1259 info
->disk
.major
= dl
->major
;
1260 info
->disk
.minor
= dl
->minor
;
1263 info
->data_offset
= __le32_to_cpu(map
->pba_of_lba0
);
1264 info
->component_size
= __le32_to_cpu(map
->blocks_per_member
);
1265 memset(info
->uuid
, 0, sizeof(info
->uuid
));
1267 if (map
->map_state
== IMSM_T_STATE_UNINITIALIZED
|| dev
->vol
.dirty
)
1268 info
->resync_start
= 0;
1269 else if (dev
->vol
.migr_state
)
1270 /* FIXME add curr_migr_unit to resync_start conversion */
1271 info
->resync_start
= 0;
1273 info
->resync_start
= ~0ULL;
1275 strncpy(info
->name
, (char *) dev
->volume
, MAX_RAID_SERIAL_LEN
);
1276 info
->name
[MAX_RAID_SERIAL_LEN
] = 0;
1278 info
->array
.major_version
= -1;
1279 info
->array
.minor_version
= -2;
1280 sprintf(info
->text_version
, "/%s/%d",
1281 devnum2devname(st
->container_dev
),
1282 info
->container_member
);
1283 info
->safe_mode_delay
= 4000; /* 4 secs like the Matrix driver */
1284 uuid_from_super_imsm(st
, info
->uuid
);
1287 /* check the config file to see if we can return a real uuid for this spare */
1288 static void fixup_container_spare_uuid(struct mdinfo
*inf
)
1290 struct mddev_ident_s
*array_list
;
1292 if (inf
->array
.level
!= LEVEL_CONTAINER
||
1293 memcmp(inf
->uuid
, uuid_match_any
, sizeof(int[4])) != 0)
1296 array_list
= conf_get_ident(NULL
);
1298 for (; array_list
; array_list
= array_list
->next
) {
1299 if (array_list
->uuid_set
) {
1300 struct supertype
*_sst
; /* spare supertype */
1301 struct supertype
*_cst
; /* container supertype */
1303 _cst
= array_list
->st
;
1305 _sst
= _cst
->ss
->match_metadata_desc(inf
->text_version
);
1310 memcpy(inf
->uuid
, array_list
->uuid
, sizeof(int[4]));
1318 static void getinfo_super_imsm(struct supertype
*st
, struct mdinfo
*info
)
1320 struct intel_super
*super
= st
->sb
;
1321 struct imsm_disk
*disk
;
1323 if (super
->current_vol
>= 0) {
1324 getinfo_super_imsm_volume(st
, info
);
1328 /* Set raid_disks to zero so that Assemble will always pull in valid
1331 info
->array
.raid_disks
= 0;
1332 info
->array
.level
= LEVEL_CONTAINER
;
1333 info
->array
.layout
= 0;
1334 info
->array
.md_minor
= -1;
1335 info
->array
.ctime
= 0; /* N/A for imsm */
1336 info
->array
.utime
= 0;
1337 info
->array
.chunk_size
= 0;
1339 info
->disk
.major
= 0;
1340 info
->disk
.minor
= 0;
1341 info
->disk
.raid_disk
= -1;
1342 info
->reshape_active
= 0;
1343 info
->array
.major_version
= -1;
1344 info
->array
.minor_version
= -2;
1345 strcpy(info
->text_version
, "imsm");
1346 info
->safe_mode_delay
= 0;
1347 info
->disk
.number
= -1;
1348 info
->disk
.state
= 0;
1352 __u32 reserved
= imsm_reserved_sectors(super
, super
->disks
);
1354 disk
= &super
->disks
->disk
;
1355 info
->data_offset
= __le32_to_cpu(disk
->total_blocks
) - reserved
;
1356 info
->component_size
= reserved
;
1357 info
->disk
.state
= is_configured(disk
) ? (1 << MD_DISK_ACTIVE
) : 0;
1358 /* we don't change info->disk.raid_disk here because
1359 * this state will be finalized in mdmon after we have
1360 * found the 'most fresh' version of the metadata
1362 info
->disk
.state
|= is_failed(disk
) ? (1 << MD_DISK_FAULTY
) : 0;
1363 info
->disk
.state
|= is_spare(disk
) ? 0 : (1 << MD_DISK_SYNC
);
1366 /* only call uuid_from_super_imsm when this disk is part of a populated container,
1367 * ->compare_super may have updated the 'num_raid_devs' field for spares
1369 if (info
->disk
.state
& (1 << MD_DISK_SYNC
) || super
->anchor
->num_raid_devs
)
1370 uuid_from_super_imsm(st
, info
->uuid
);
1372 memcpy(info
->uuid
, uuid_match_any
, sizeof(int[4]));
1373 fixup_container_spare_uuid(info
);
1377 static int update_super_imsm(struct supertype
*st
, struct mdinfo
*info
,
1378 char *update
, char *devname
, int verbose
,
1379 int uuid_set
, char *homehost
)
1381 /* For 'assemble' and 'force' we need to return non-zero if any
1382 * change was made. For others, the return value is ignored.
1383 * Update options are:
1384 * force-one : This device looks a bit old but needs to be included,
1385 * update age info appropriately.
1386 * assemble: clear any 'faulty' flag to allow this device to
1388 * force-array: Array is degraded but being forced, mark it clean
1389 * if that will be needed to assemble it.
1391 * newdev: not used ????
1392 * grow: Array has gained a new device - this is currently for
1394 * resync: mark as dirty so a resync will happen.
1395 * name: update the name - preserving the homehost
1396 * uuid: Change the uuid of the array to match watch is given
1398 * Following are not relevant for this imsm:
1399 * sparc2.2 : update from old dodgey metadata
1400 * super-minor: change the preferred_minor number
1401 * summaries: update redundant counters.
1402 * homehost: update the recorded homehost
1403 * _reshape_progress: record new reshape_progress position.
1406 struct intel_super
*super
= st
->sb
;
1407 struct imsm_super
*mpb
;
1409 /* we can only update container info */
1410 if (!super
|| super
->current_vol
>= 0 || !super
->anchor
)
1413 mpb
= super
->anchor
;
1415 if (strcmp(update
, "uuid") == 0 && uuid_set
&& !info
->update_private
)
1417 Name
": '--uuid' not supported for imsm metadata\n");
1418 else if (strcmp(update
, "uuid") == 0 && uuid_set
&& info
->update_private
) {
1419 mpb
->orig_family_num
= *((__u32
*) info
->update_private
);
1421 } else if (strcmp(update
, "uuid") == 0) {
1422 __u32
*new_family
= malloc(sizeof(*new_family
));
1424 /* update orig_family_number with the incoming random
1425 * data, report the new effective uuid, and store the
1426 * new orig_family_num for future updates.
1429 memcpy(&mpb
->orig_family_num
, info
->uuid
, sizeof(__u32
));
1430 uuid_from_super_imsm(st
, info
->uuid
);
1431 *new_family
= mpb
->orig_family_num
;
1432 info
->update_private
= new_family
;
1435 } else if (strcmp(update
, "assemble") == 0)
1439 Name
": '--update=%s' not supported for imsm metadata\n",
1442 /* successful update? recompute checksum */
1444 mpb
->check_sum
= __le32_to_cpu(__gen_imsm_checksum(mpb
));
1449 static size_t disks_to_mpb_size(int disks
)
1453 size
= sizeof(struct imsm_super
);
1454 size
+= (disks
- 1) * sizeof(struct imsm_disk
);
1455 size
+= 2 * sizeof(struct imsm_dev
);
1456 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
1457 size
+= (4 - 2) * sizeof(struct imsm_map
);
1458 /* 4 possible disk_ord_tbl's */
1459 size
+= 4 * (disks
- 1) * sizeof(__u32
);
1464 static __u64
avail_size_imsm(struct supertype
*st
, __u64 devsize
)
1466 if (devsize
< (MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
))
1469 return devsize
- (MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
);
1472 static void free_devlist(struct intel_super
*super
)
1474 struct intel_dev
*dv
;
1476 while (super
->devlist
) {
1477 dv
= super
->devlist
->next
;
1478 free(super
->devlist
->dev
);
1479 free(super
->devlist
);
1480 super
->devlist
= dv
;
1484 static void imsm_copy_dev(struct imsm_dev
*dest
, struct imsm_dev
*src
)
1486 memcpy(dest
, src
, sizeof_imsm_dev(src
, 0));
1489 static int compare_super_imsm(struct supertype
*st
, struct supertype
*tst
)
1493 * 0 same, or first was empty, and second was copied
1494 * 1 second had wrong number
1496 * 3 wrong other info
1498 struct intel_super
*first
= st
->sb
;
1499 struct intel_super
*sec
= tst
->sb
;
1507 /* if an anchor does not have num_raid_devs set then it is a free
1510 if (first
->anchor
->num_raid_devs
> 0 &&
1511 sec
->anchor
->num_raid_devs
> 0) {
1512 /* Determine if these disks might ever have been
1513 * related. Further disambiguation can only take place
1514 * in load_super_imsm_all
1516 __u32 first_family
= first
->anchor
->orig_family_num
;
1517 __u32 sec_family
= sec
->anchor
->orig_family_num
;
1519 if (memcmp(first
->anchor
->sig
, sec
->anchor
->sig
,
1520 MAX_SIGNATURE_LENGTH
) != 0)
1523 if (first_family
== 0)
1524 first_family
= first
->anchor
->family_num
;
1525 if (sec_family
== 0)
1526 sec_family
= sec
->anchor
->family_num
;
1528 if (first_family
!= sec_family
)
1534 /* if 'first' is a spare promote it to a populated mpb with sec's
1537 if (first
->anchor
->num_raid_devs
== 0 &&
1538 sec
->anchor
->num_raid_devs
> 0) {
1540 struct intel_dev
*dv
;
1541 struct imsm_dev
*dev
;
1543 /* we need to copy raid device info from sec if an allocation
1544 * fails here we don't associate the spare
1546 for (i
= 0; i
< sec
->anchor
->num_raid_devs
; i
++) {
1547 dv
= malloc(sizeof(*dv
));
1550 dev
= malloc(sizeof_imsm_dev(get_imsm_dev(sec
, i
), 1));
1557 dv
->next
= first
->devlist
;
1558 first
->devlist
= dv
;
1560 if (i
< sec
->anchor
->num_raid_devs
) {
1561 /* allocation failure */
1562 free_devlist(first
);
1563 fprintf(stderr
, "imsm: failed to associate spare\n");
1566 first
->anchor
->num_raid_devs
= sec
->anchor
->num_raid_devs
;
1567 first
->anchor
->orig_family_num
= sec
->anchor
->orig_family_num
;
1568 first
->anchor
->family_num
= sec
->anchor
->family_num
;
1569 memcpy(first
->anchor
->sig
, sec
->anchor
->sig
, MAX_SIGNATURE_LENGTH
);
1570 for (i
= 0; i
< sec
->anchor
->num_raid_devs
; i
++)
1571 imsm_copy_dev(get_imsm_dev(first
, i
), get_imsm_dev(sec
, i
));
1577 static void fd2devname(int fd
, char *name
)
1586 if (fstat(fd
, &st
) != 0)
1588 sprintf(path
, "/sys/dev/block/%d:%d",
1589 major(st
.st_rdev
), minor(st
.st_rdev
));
1591 rv
= readlink(path
, dname
, sizeof(dname
));
1596 nm
= strrchr(dname
, '/');
1598 snprintf(name
, MAX_RAID_SERIAL_LEN
, "/dev/%s", nm
);
1601 extern int scsi_get_serial(int fd
, void *buf
, size_t buf_len
);
1603 static int imsm_read_serial(int fd
, char *devname
,
1604 __u8 serial
[MAX_RAID_SERIAL_LEN
])
1606 unsigned char scsi_serial
[255];
1615 memset(scsi_serial
, 0, sizeof(scsi_serial
));
1617 rv
= scsi_get_serial(fd
, scsi_serial
, sizeof(scsi_serial
));
1619 if (rv
&& check_env("IMSM_DEVNAME_AS_SERIAL")) {
1620 memset(serial
, 0, MAX_RAID_SERIAL_LEN
);
1621 fd2devname(fd
, (char *) serial
);
1628 Name
": Failed to retrieve serial for %s\n",
1633 rsp_len
= scsi_serial
[3];
1637 Name
": Failed to retrieve serial for %s\n",
1641 rsp_buf
= (char *) &scsi_serial
[4];
1643 /* trim all whitespace and non-printable characters and convert
1646 for (i
= 0, dest
= rsp_buf
; i
< rsp_len
; i
++) {
1649 /* ':' is reserved for use in placeholder serial
1650 * numbers for missing disks
1658 len
= dest
- rsp_buf
;
1661 /* truncate leading characters */
1662 if (len
> MAX_RAID_SERIAL_LEN
) {
1663 dest
+= len
- MAX_RAID_SERIAL_LEN
;
1664 len
= MAX_RAID_SERIAL_LEN
;
1667 memset(serial
, 0, MAX_RAID_SERIAL_LEN
);
1668 memcpy(serial
, dest
, len
);
1673 static int serialcmp(__u8
*s1
, __u8
*s2
)
1675 return strncmp((char *) s1
, (char *) s2
, MAX_RAID_SERIAL_LEN
);
1678 static void serialcpy(__u8
*dest
, __u8
*src
)
1680 strncpy((char *) dest
, (char *) src
, MAX_RAID_SERIAL_LEN
);
1684 static struct dl
*serial_to_dl(__u8
*serial
, struct intel_super
*super
)
1688 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
1689 if (serialcmp(dl
->serial
, serial
) == 0)
1696 static struct imsm_disk
*
1697 __serial_to_disk(__u8
*serial
, struct imsm_super
*mpb
, int *idx
)
1701 for (i
= 0; i
< mpb
->num_disks
; i
++) {
1702 struct imsm_disk
*disk
= __get_imsm_disk(mpb
, i
);
1704 if (serialcmp(disk
->serial
, serial
) == 0) {
1715 load_imsm_disk(int fd
, struct intel_super
*super
, char *devname
, int keep_fd
)
1717 struct imsm_disk
*disk
;
1722 __u8 serial
[MAX_RAID_SERIAL_LEN
];
1724 rv
= imsm_read_serial(fd
, devname
, serial
);
1729 dl
= calloc(1, sizeof(*dl
));
1733 Name
": failed to allocate disk buffer for %s\n",
1739 dl
->major
= major(stb
.st_rdev
);
1740 dl
->minor
= minor(stb
.st_rdev
);
1741 dl
->next
= super
->disks
;
1742 dl
->fd
= keep_fd
? fd
: -1;
1743 assert(super
->disks
== NULL
);
1745 serialcpy(dl
->serial
, serial
);
1748 fd2devname(fd
, name
);
1750 dl
->devname
= strdup(devname
);
1752 dl
->devname
= strdup(name
);
1754 /* look up this disk's index in the current anchor */
1755 disk
= __serial_to_disk(dl
->serial
, super
->anchor
, &dl
->index
);
1758 /* only set index on disks that are a member of a
1759 * populated contianer, i.e. one with raid_devs
1761 if (is_failed(&dl
->disk
))
1763 else if (is_spare(&dl
->disk
))
1771 /* When migrating map0 contains the 'destination' state while map1
1772 * contains the current state. When not migrating map0 contains the
1773 * current state. This routine assumes that map[0].map_state is set to
1774 * the current array state before being called.
1776 * Migration is indicated by one of the following states
1777 * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed)
1778 * 2/ Initialize (migr_state=1 migr_type=MIGR_INIT map0state=normal
1779 * map1state=unitialized)
1780 * 3/ Repair (Resync) (migr_state=1 migr_type=MIGR_REPAIR map0state=normal
1782 * 4/ Rebuild (migr_state=1 migr_type=MIGR_REBUILD map0state=normal
1783 * map1state=degraded)
1785 static void migrate(struct imsm_dev
*dev
, __u8 to_state
, int migr_type
)
1787 struct imsm_map
*dest
;
1788 struct imsm_map
*src
= get_imsm_map(dev
, 0);
1790 dev
->vol
.migr_state
= 1;
1791 set_migr_type(dev
, migr_type
);
1792 dev
->vol
.curr_migr_unit
= 0;
1793 dest
= get_imsm_map(dev
, 1);
1795 /* duplicate and then set the target end state in map[0] */
1796 memcpy(dest
, src
, sizeof_imsm_map(src
));
1797 if (migr_type
== MIGR_REBUILD
) {
1801 for (i
= 0; i
< src
->num_members
; i
++) {
1802 ord
= __le32_to_cpu(src
->disk_ord_tbl
[i
]);
1803 set_imsm_ord_tbl_ent(src
, i
, ord_to_idx(ord
));
1807 src
->map_state
= to_state
;
1810 static void end_migration(struct imsm_dev
*dev
, __u8 map_state
)
1812 struct imsm_map
*map
= get_imsm_map(dev
, 0);
1813 struct imsm_map
*prev
= get_imsm_map(dev
, dev
->vol
.migr_state
);
1816 /* merge any IMSM_ORD_REBUILD bits that were not successfully
1817 * completed in the last migration.
1819 * FIXME add support for online capacity expansion and
1820 * raid-level-migration
1822 for (i
= 0; i
< prev
->num_members
; i
++)
1823 map
->disk_ord_tbl
[i
] |= prev
->disk_ord_tbl
[i
];
1825 dev
->vol
.migr_state
= 0;
1826 dev
->vol
.curr_migr_unit
= 0;
1827 map
->map_state
= map_state
;
1831 static int parse_raid_devices(struct intel_super
*super
)
1834 struct imsm_dev
*dev_new
;
1835 size_t len
, len_migr
;
1836 size_t space_needed
= 0;
1837 struct imsm_super
*mpb
= super
->anchor
;
1839 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
1840 struct imsm_dev
*dev_iter
= __get_imsm_dev(super
->anchor
, i
);
1841 struct intel_dev
*dv
;
1843 len
= sizeof_imsm_dev(dev_iter
, 0);
1844 len_migr
= sizeof_imsm_dev(dev_iter
, 1);
1846 space_needed
+= len_migr
- len
;
1848 dv
= malloc(sizeof(*dv
));
1851 dev_new
= malloc(len_migr
);
1856 imsm_copy_dev(dev_new
, dev_iter
);
1859 dv
->next
= super
->devlist
;
1860 super
->devlist
= dv
;
1863 /* ensure that super->buf is large enough when all raid devices
1866 if (__le32_to_cpu(mpb
->mpb_size
) + space_needed
> super
->len
) {
1869 len
= ROUND_UP(__le32_to_cpu(mpb
->mpb_size
) + space_needed
, 512);
1870 if (posix_memalign(&buf
, 512, len
) != 0)
1873 memcpy(buf
, super
->buf
, super
->len
);
1874 memset(buf
+ super
->len
, 0, len
- super
->len
);
1883 /* retrieve a pointer to the bbm log which starts after all raid devices */
1884 struct bbm_log
*__get_imsm_bbm_log(struct imsm_super
*mpb
)
1888 if (__le32_to_cpu(mpb
->bbm_log_size
)) {
1890 ptr
+= mpb
->mpb_size
- __le32_to_cpu(mpb
->bbm_log_size
);
1896 static void __free_imsm(struct intel_super
*super
, int free_disks
);
1898 /* load_imsm_mpb - read matrix metadata
1899 * allocates super->mpb to be freed by free_super
1901 static int load_imsm_mpb(int fd
, struct intel_super
*super
, char *devname
)
1903 unsigned long long dsize
;
1904 unsigned long long sectors
;
1906 struct imsm_super
*anchor
;
1909 get_dev_size(fd
, NULL
, &dsize
);
1911 if (lseek64(fd
, dsize
- (512 * 2), SEEK_SET
) < 0) {
1914 Name
": Cannot seek to anchor block on %s: %s\n",
1915 devname
, strerror(errno
));
1919 if (posix_memalign((void**)&anchor
, 512, 512) != 0) {
1922 Name
": Failed to allocate imsm anchor buffer"
1923 " on %s\n", devname
);
1926 if (read(fd
, anchor
, 512) != 512) {
1929 Name
": Cannot read anchor block on %s: %s\n",
1930 devname
, strerror(errno
));
1935 if (strncmp((char *) anchor
->sig
, MPB_SIGNATURE
, MPB_SIG_LEN
) != 0) {
1938 Name
": no IMSM anchor on %s\n", devname
);
1943 __free_imsm(super
, 0);
1944 super
->len
= ROUND_UP(anchor
->mpb_size
, 512);
1945 if (posix_memalign(&super
->buf
, 512, super
->len
) != 0) {
1948 Name
": unable to allocate %zu byte mpb buffer\n",
1953 memcpy(super
->buf
, anchor
, 512);
1955 sectors
= mpb_sectors(anchor
) - 1;
1958 check_sum
= __gen_imsm_checksum(super
->anchor
);
1959 if (check_sum
!= __le32_to_cpu(super
->anchor
->check_sum
)) {
1962 Name
": IMSM checksum %x != %x on %s\n",
1964 __le32_to_cpu(super
->anchor
->check_sum
),
1972 /* read the extended mpb */
1973 if (lseek64(fd
, dsize
- (512 * (2 + sectors
)), SEEK_SET
) < 0) {
1976 Name
": Cannot seek to extended mpb on %s: %s\n",
1977 devname
, strerror(errno
));
1981 if (read(fd
, super
->buf
+ 512, super
->len
- 512) != super
->len
- 512) {
1984 Name
": Cannot read extended mpb on %s: %s\n",
1985 devname
, strerror(errno
));
1989 check_sum
= __gen_imsm_checksum(super
->anchor
);
1990 if (check_sum
!= __le32_to_cpu(super
->anchor
->check_sum
)) {
1993 Name
": IMSM checksum %x != %x on %s\n",
1994 check_sum
, __le32_to_cpu(super
->anchor
->check_sum
),
1999 /* FIXME the BBM log is disk specific so we cannot use this global
2000 * buffer for all disks. Ok for now since we only look at the global
2001 * bbm_log_size parameter to gate assembly
2003 super
->bbm_log
= __get_imsm_bbm_log(super
->anchor
);
2009 load_and_parse_mpb(int fd
, struct intel_super
*super
, char *devname
, int keep_fd
)
2013 err
= load_imsm_mpb(fd
, super
, devname
);
2016 err
= load_imsm_disk(fd
, super
, devname
, keep_fd
);
2019 err
= parse_raid_devices(super
);
2024 static void __free_imsm_disk(struct dl
*d
)
2035 static void free_imsm_disks(struct intel_super
*super
)
2039 while (super
->disks
) {
2041 super
->disks
= d
->next
;
2042 __free_imsm_disk(d
);
2044 while (super
->missing
) {
2046 super
->missing
= d
->next
;
2047 __free_imsm_disk(d
);
2052 /* free all the pieces hanging off of a super pointer */
2053 static void __free_imsm(struct intel_super
*super
, int free_disks
)
2060 free_imsm_disks(super
);
2061 free_devlist(super
);
2063 free((void *) super
->hba
);
2068 static void free_imsm(struct intel_super
*super
)
2070 __free_imsm(super
, 1);
2074 static void free_super_imsm(struct supertype
*st
)
2076 struct intel_super
*super
= st
->sb
;
2085 static struct intel_super
*alloc_super(int creating_imsm
)
2087 struct intel_super
*super
= malloc(sizeof(*super
));
2090 memset(super
, 0, sizeof(*super
));
2091 super
->creating_imsm
= creating_imsm
;
2092 super
->current_vol
= -1;
2093 super
->create_offset
= ~((__u32
) 0);
2094 if (!check_env("IMSM_NO_PLATFORM"))
2095 super
->orom
= find_imsm_orom();
2096 if (super
->orom
&& !check_env("IMSM_TEST_OROM")) {
2097 struct sys_dev
*list
, *ent
;
2099 /* find the first intel ahci controller */
2100 list
= find_driver_devices("pci", "ahci");
2101 for (ent
= list
; ent
; ent
= ent
->next
)
2102 if (devpath_to_vendor(ent
->path
) == 0x8086)
2105 super
->hba
= ent
->path
;
2108 free_sys_dev(&list
);
2116 /* find_missing - helper routine for load_super_imsm_all that identifies
2117 * disks that have disappeared from the system. This routine relies on
2118 * the mpb being uptodate, which it is at load time.
2120 static int find_missing(struct intel_super
*super
)
2123 struct imsm_super
*mpb
= super
->anchor
;
2125 struct imsm_disk
*disk
;
2127 for (i
= 0; i
< mpb
->num_disks
; i
++) {
2128 disk
= __get_imsm_disk(mpb
, i
);
2129 dl
= serial_to_dl(disk
->serial
, super
);
2133 dl
= malloc(sizeof(*dl
));
2139 dl
->devname
= strdup("missing");
2141 serialcpy(dl
->serial
, disk
->serial
);
2144 dl
->next
= super
->missing
;
2145 super
->missing
= dl
;
2151 static struct intel_disk
*disk_list_get(__u8
*serial
, struct intel_disk
*disk_list
)
2153 struct intel_disk
*idisk
= disk_list
;
2156 if (serialcmp(idisk
->disk
.serial
, serial
) == 0)
2158 idisk
= idisk
->next
;
2164 static int __prep_thunderdome(struct intel_super
**table
, int tbl_size
,
2165 struct intel_super
*super
,
2166 struct intel_disk
**disk_list
)
2168 struct imsm_disk
*d
= &super
->disks
->disk
;
2169 struct imsm_super
*mpb
= super
->anchor
;
2172 for (i
= 0; i
< tbl_size
; i
++) {
2173 struct imsm_super
*tbl_mpb
= table
[i
]->anchor
;
2174 struct imsm_disk
*tbl_d
= &table
[i
]->disks
->disk
;
2176 if (tbl_mpb
->family_num
== mpb
->family_num
) {
2177 if (tbl_mpb
->check_sum
== mpb
->check_sum
) {
2178 dprintf("%s: mpb from %d:%d matches %d:%d\n",
2179 __func__
, super
->disks
->major
,
2180 super
->disks
->minor
,
2181 table
[i
]->disks
->major
,
2182 table
[i
]->disks
->minor
);
2186 if (((is_configured(d
) && !is_configured(tbl_d
)) ||
2187 is_configured(d
) == is_configured(tbl_d
)) &&
2188 tbl_mpb
->generation_num
< mpb
->generation_num
) {
2189 /* current version of the mpb is a
2190 * better candidate than the one in
2191 * super_table, but copy over "cross
2192 * generational" status
2194 struct intel_disk
*idisk
;
2196 dprintf("%s: mpb from %d:%d replaces %d:%d\n",
2197 __func__
, super
->disks
->major
,
2198 super
->disks
->minor
,
2199 table
[i
]->disks
->major
,
2200 table
[i
]->disks
->minor
);
2202 idisk
= disk_list_get(tbl_d
->serial
, *disk_list
);
2203 if (idisk
&& is_failed(&idisk
->disk
))
2204 tbl_d
->status
|= FAILED_DISK
;
2207 struct intel_disk
*idisk
;
2208 struct imsm_disk
*disk
;
2210 /* tbl_mpb is more up to date, but copy
2211 * over cross generational status before
2214 disk
= __serial_to_disk(d
->serial
, mpb
, NULL
);
2215 if (disk
&& is_failed(disk
))
2216 d
->status
|= FAILED_DISK
;
2218 idisk
= disk_list_get(d
->serial
, *disk_list
);
2221 if (disk
&& is_configured(disk
))
2222 idisk
->disk
.status
|= CONFIGURED_DISK
;
2225 dprintf("%s: mpb from %d:%d prefer %d:%d\n",
2226 __func__
, super
->disks
->major
,
2227 super
->disks
->minor
,
2228 table
[i
]->disks
->major
,
2229 table
[i
]->disks
->minor
);
2237 table
[tbl_size
++] = super
;
2241 /* update/extend the merged list of imsm_disk records */
2242 for (j
= 0; j
< mpb
->num_disks
; j
++) {
2243 struct imsm_disk
*disk
= __get_imsm_disk(mpb
, j
);
2244 struct intel_disk
*idisk
;
2246 idisk
= disk_list_get(disk
->serial
, *disk_list
);
2248 idisk
->disk
.status
|= disk
->status
;
2249 if (is_configured(&idisk
->disk
) ||
2250 is_failed(&idisk
->disk
))
2251 idisk
->disk
.status
&= ~(SPARE_DISK
);
2253 idisk
= calloc(1, sizeof(*idisk
));
2256 idisk
->owner
= IMSM_UNKNOWN_OWNER
;
2257 idisk
->disk
= *disk
;
2258 idisk
->next
= *disk_list
;
2262 if (serialcmp(idisk
->disk
.serial
, d
->serial
) == 0)
2269 static struct intel_super
*
2270 validate_members(struct intel_super
*super
, struct intel_disk
*disk_list
,
2273 struct imsm_super
*mpb
= super
->anchor
;
2277 for (i
= 0; i
< mpb
->num_disks
; i
++) {
2278 struct imsm_disk
*disk
= __get_imsm_disk(mpb
, i
);
2279 struct intel_disk
*idisk
;
2281 idisk
= disk_list_get(disk
->serial
, disk_list
);
2283 if (idisk
->owner
== owner
||
2284 idisk
->owner
== IMSM_UNKNOWN_OWNER
)
2287 dprintf("%s: '%.16s' owner %d != %d\n",
2288 __func__
, disk
->serial
, idisk
->owner
,
2291 dprintf("%s: unknown disk %x [%d]: %.16s\n",
2292 __func__
, __le32_to_cpu(mpb
->family_num
), i
,
2298 if (ok_count
== mpb
->num_disks
)
2303 static void show_conflicts(__u32 family_num
, struct intel_super
*super_list
)
2305 struct intel_super
*s
;
2307 for (s
= super_list
; s
; s
= s
->next
) {
2308 if (family_num
!= s
->anchor
->family_num
)
2310 fprintf(stderr
, "Conflict, offlining family %#x on '%s'\n",
2311 __le32_to_cpu(family_num
), s
->disks
->devname
);
2315 static struct intel_super
*
2316 imsm_thunderdome(struct intel_super
**super_list
, int len
)
2318 struct intel_super
*super_table
[len
];
2319 struct intel_disk
*disk_list
= NULL
;
2320 struct intel_super
*champion
, *spare
;
2321 struct intel_super
*s
, **del
;
2326 memset(super_table
, 0, sizeof(super_table
));
2327 for (s
= *super_list
; s
; s
= s
->next
)
2328 tbl_size
= __prep_thunderdome(super_table
, tbl_size
, s
, &disk_list
);
2330 for (i
= 0; i
< tbl_size
; i
++) {
2331 struct imsm_disk
*d
;
2332 struct intel_disk
*idisk
;
2333 struct imsm_super
*mpb
= super_table
[i
]->anchor
;
2336 d
= &s
->disks
->disk
;
2338 /* 'd' must appear in merged disk list for its
2339 * configuration to be valid
2341 idisk
= disk_list_get(d
->serial
, disk_list
);
2342 if (idisk
&& idisk
->owner
== i
)
2343 s
= validate_members(s
, disk_list
, i
);
2348 dprintf("%s: marking family: %#x from %d:%d offline\n",
2349 __func__
, mpb
->family_num
,
2350 super_table
[i
]->disks
->major
,
2351 super_table
[i
]->disks
->minor
);
2355 /* This is where the mdadm implementation differs from the Windows
2356 * driver which has no strict concept of a container. We can only
2357 * assemble one family from a container, so when returning a prodigal
2358 * array member to this system the code will not be able to disambiguate
2359 * the container contents that should be assembled ("foreign" versus
2360 * "local"). It requires user intervention to set the orig_family_num
2361 * to a new value to establish a new container. The Windows driver in
2362 * this situation fixes up the volume name in place and manages the
2363 * foreign array as an independent entity.
2368 for (i
= 0; i
< tbl_size
; i
++) {
2369 struct intel_super
*tbl_ent
= super_table
[i
];
2375 if (tbl_ent
->anchor
->num_raid_devs
== 0) {
2380 if (s
&& !is_spare
) {
2381 show_conflicts(tbl_ent
->anchor
->family_num
, *super_list
);
2383 } else if (!s
&& !is_spare
)
2396 fprintf(stderr
, "Chose family %#x on '%s', "
2397 "assemble conflicts to new container with '--update=uuid'\n",
2398 __le32_to_cpu(s
->anchor
->family_num
), s
->disks
->devname
);
2400 /* collect all dl's onto 'champion', and update them to
2401 * champion's version of the status
2403 for (s
= *super_list
; s
; s
= s
->next
) {
2404 struct imsm_super
*mpb
= champion
->anchor
;
2405 struct dl
*dl
= s
->disks
;
2410 for (i
= 0; i
< mpb
->num_disks
; i
++) {
2411 struct imsm_disk
*disk
;
2413 disk
= __serial_to_disk(dl
->serial
, mpb
, &dl
->index
);
2416 /* only set index on disks that are a member of
2417 * a populated contianer, i.e. one with
2420 if (is_failed(&dl
->disk
))
2422 else if (is_spare(&dl
->disk
))
2428 if (i
>= mpb
->num_disks
) {
2429 struct intel_disk
*idisk
;
2431 idisk
= disk_list_get(dl
->serial
, disk_list
);
2432 if (is_spare(&idisk
->disk
) &&
2433 !is_failed(&idisk
->disk
) && !is_configured(&idisk
->disk
))
2441 dl
->next
= champion
->disks
;
2442 champion
->disks
= dl
;
2446 /* delete 'champion' from super_list */
2447 for (del
= super_list
; *del
; ) {
2448 if (*del
== champion
) {
2449 *del
= (*del
)->next
;
2452 del
= &(*del
)->next
;
2454 champion
->next
= NULL
;
2458 struct intel_disk
*idisk
= disk_list
;
2460 disk_list
= disk_list
->next
;
2467 static int load_super_imsm_all(struct supertype
*st
, int fd
, void **sbp
,
2468 char *devname
, int keep_fd
)
2471 struct intel_super
*super_list
= NULL
;
2472 struct intel_super
*super
= NULL
;
2473 int devnum
= fd2devnum(fd
);
2478 enum sysfs_read_flags flags
;
2480 flags
= GET_LEVEL
|GET_VERSION
|GET_DEVS
|GET_STATE
;
2481 if (mdmon_running(devnum
))
2482 flags
|= SKIP_GONE_DEVS
;
2484 /* check if 'fd' an opened container */
2485 sra
= sysfs_read(fd
, 0, flags
);
2489 if (sra
->array
.major_version
!= -1 ||
2490 sra
->array
.minor_version
!= -2 ||
2491 strcmp(sra
->text_version
, "imsm") != 0)
2495 for (sd
= sra
->devs
, i
= 0; sd
; sd
= sd
->next
, i
++) {
2496 struct intel_super
*s
= alloc_super(0);
2503 s
->next
= super_list
;
2507 sprintf(nm
, "%d:%d", sd
->disk
.major
, sd
->disk
.minor
);
2508 dfd
= dev_open(nm
, keep_fd
? O_RDWR
: O_RDONLY
);
2512 err
= load_and_parse_mpb(dfd
, s
, NULL
, keep_fd
);
2514 /* retry the load if we might have raced against mdmon */
2515 if (err
== 3 && mdmon_running(devnum
))
2516 for (retry
= 0; retry
< 3; retry
++) {
2518 err
= load_and_parse_mpb(dfd
, s
, NULL
, keep_fd
);
2528 /* all mpbs enter, maybe one leaves */
2529 super
= imsm_thunderdome(&super_list
, i
);
2535 if (find_missing(super
) != 0) {
2541 if (st
->subarray
[0]) {
2542 if (atoi(st
->subarray
) <= super
->anchor
->num_raid_devs
)
2543 super
->current_vol
= atoi(st
->subarray
);
2553 while (super_list
) {
2554 struct intel_super
*s
= super_list
;
2556 super_list
= super_list
->next
;
2564 st
->container_dev
= devnum
;
2565 if (err
== 0 && st
->ss
== NULL
) {
2566 st
->ss
= &super_imsm
;
2567 st
->minor_version
= 0;
2568 st
->max_devs
= IMSM_MAX_DEVICES
;
2570 st
->loaded_container
= 1;
2576 static int load_super_imsm(struct supertype
*st
, int fd
, char *devname
)
2578 struct intel_super
*super
;
2582 if (load_super_imsm_all(st
, fd
, &st
->sb
, devname
, 1) == 0)
2586 free_super_imsm(st
);
2588 super
= alloc_super(0);
2591 Name
": malloc of %zu failed.\n",
2596 rv
= load_and_parse_mpb(fd
, super
, devname
, 0);
2601 Name
": Failed to load all information "
2602 "sections on %s\n", devname
);
2607 if (st
->subarray
[0]) {
2608 if (atoi(st
->subarray
) <= super
->anchor
->num_raid_devs
)
2609 super
->current_vol
= atoi(st
->subarray
);
2617 if (st
->ss
== NULL
) {
2618 st
->ss
= &super_imsm
;
2619 st
->minor_version
= 0;
2620 st
->max_devs
= IMSM_MAX_DEVICES
;
2622 st
->loaded_container
= 0;
2627 static __u16
info_to_blocks_per_strip(mdu_array_info_t
*info
)
2629 if (info
->level
== 1)
2631 return info
->chunk_size
>> 9;
2634 static __u32
info_to_num_data_stripes(mdu_array_info_t
*info
, int num_domains
)
2638 num_stripes
= (info
->size
* 2) / info_to_blocks_per_strip(info
);
2639 num_stripes
/= num_domains
;
2644 static __u32
info_to_blocks_per_member(mdu_array_info_t
*info
)
2646 if (info
->level
== 1)
2647 return info
->size
* 2;
2649 return (info
->size
* 2) & ~(info_to_blocks_per_strip(info
) - 1);
2652 static void imsm_update_version_info(struct intel_super
*super
)
2654 /* update the version and attributes */
2655 struct imsm_super
*mpb
= super
->anchor
;
2657 struct imsm_dev
*dev
;
2658 struct imsm_map
*map
;
2661 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
2662 dev
= get_imsm_dev(super
, i
);
2663 map
= get_imsm_map(dev
, 0);
2664 if (__le32_to_cpu(dev
->size_high
) > 0)
2665 mpb
->attributes
|= MPB_ATTRIB_2TB
;
2667 /* FIXME detect when an array spans a port multiplier */
2669 mpb
->attributes
|= MPB_ATTRIB_PM
;
2672 if (mpb
->num_raid_devs
> 1 ||
2673 mpb
->attributes
!= MPB_ATTRIB_CHECKSUM_VERIFY
) {
2674 version
= MPB_VERSION_ATTRIBS
;
2675 switch (get_imsm_raid_level(map
)) {
2676 case 0: mpb
->attributes
|= MPB_ATTRIB_RAID0
; break;
2677 case 1: mpb
->attributes
|= MPB_ATTRIB_RAID1
; break;
2678 case 10: mpb
->attributes
|= MPB_ATTRIB_RAID10
; break;
2679 case 5: mpb
->attributes
|= MPB_ATTRIB_RAID5
; break;
2682 if (map
->num_members
>= 5)
2683 version
= MPB_VERSION_5OR6_DISK_ARRAY
;
2684 else if (dev
->status
== DEV_CLONE_N_GO
)
2685 version
= MPB_VERSION_CNG
;
2686 else if (get_imsm_raid_level(map
) == 5)
2687 version
= MPB_VERSION_RAID5
;
2688 else if (map
->num_members
>= 3)
2689 version
= MPB_VERSION_3OR4_DISK_ARRAY
;
2690 else if (get_imsm_raid_level(map
) == 1)
2691 version
= MPB_VERSION_RAID1
;
2693 version
= MPB_VERSION_RAID0
;
2695 strcpy(((char *) mpb
->sig
) + strlen(MPB_SIGNATURE
), version
);
2699 static int init_super_imsm_volume(struct supertype
*st
, mdu_array_info_t
*info
,
2700 unsigned long long size
, char *name
,
2701 char *homehost
, int *uuid
)
2703 /* We are creating a volume inside a pre-existing container.
2704 * so st->sb is already set.
2706 struct intel_super
*super
= st
->sb
;
2707 struct imsm_super
*mpb
= super
->anchor
;
2708 struct intel_dev
*dv
;
2709 struct imsm_dev
*dev
;
2710 struct imsm_vol
*vol
;
2711 struct imsm_map
*map
;
2712 int idx
= mpb
->num_raid_devs
;
2714 unsigned long long array_blocks
;
2715 size_t size_old
, size_new
;
2716 __u32 num_data_stripes
;
2718 if (super
->orom
&& mpb
->num_raid_devs
>= super
->orom
->vpa
) {
2719 fprintf(stderr
, Name
": This imsm-container already has the "
2720 "maximum of %d volumes\n", super
->orom
->vpa
);
2724 /* ensure the mpb is large enough for the new data */
2725 size_old
= __le32_to_cpu(mpb
->mpb_size
);
2726 size_new
= disks_to_mpb_size(info
->nr_disks
);
2727 if (size_new
> size_old
) {
2729 size_t size_round
= ROUND_UP(size_new
, 512);
2731 if (posix_memalign(&mpb_new
, 512, size_round
) != 0) {
2732 fprintf(stderr
, Name
": could not allocate new mpb\n");
2735 memcpy(mpb_new
, mpb
, size_old
);
2738 super
->anchor
= mpb_new
;
2739 mpb
->mpb_size
= __cpu_to_le32(size_new
);
2740 memset(mpb_new
+ size_old
, 0, size_round
- size_old
);
2742 super
->current_vol
= idx
;
2743 /* when creating the first raid device in this container set num_disks
2744 * to zero, i.e. delete this spare and add raid member devices in
2745 * add_to_super_imsm_volume()
2747 if (super
->current_vol
== 0)
2750 for (i
= 0; i
< super
->current_vol
; i
++) {
2751 dev
= get_imsm_dev(super
, i
);
2752 if (strncmp((char *) dev
->volume
, name
,
2753 MAX_RAID_SERIAL_LEN
) == 0) {
2754 fprintf(stderr
, Name
": '%s' is already defined for this container\n",
2760 sprintf(st
->subarray
, "%d", idx
);
2761 dv
= malloc(sizeof(*dv
));
2763 fprintf(stderr
, Name
": failed to allocate device list entry\n");
2766 dev
= malloc(sizeof(*dev
) + sizeof(__u32
) * (info
->raid_disks
- 1));
2769 fprintf(stderr
, Name
": could not allocate raid device\n");
2772 strncpy((char *) dev
->volume
, name
, MAX_RAID_SERIAL_LEN
);
2773 if (info
->level
== 1)
2774 array_blocks
= info_to_blocks_per_member(info
);
2776 array_blocks
= calc_array_size(info
->level
, info
->raid_disks
,
2777 info
->layout
, info
->chunk_size
,
2779 /* round array size down to closest MB */
2780 array_blocks
= (array_blocks
>> SECT_PER_MB_SHIFT
) << SECT_PER_MB_SHIFT
;
2782 dev
->size_low
= __cpu_to_le32((__u32
) array_blocks
);
2783 dev
->size_high
= __cpu_to_le32((__u32
) (array_blocks
>> 32));
2784 dev
->status
= __cpu_to_le32(0);
2785 dev
->reserved_blocks
= __cpu_to_le32(0);
2787 vol
->migr_state
= 0;
2788 set_migr_type(dev
, MIGR_INIT
);
2790 vol
->curr_migr_unit
= 0;
2791 map
= get_imsm_map(dev
, 0);
2792 map
->pba_of_lba0
= __cpu_to_le32(super
->create_offset
);
2793 map
->blocks_per_member
= __cpu_to_le32(info_to_blocks_per_member(info
));
2794 map
->blocks_per_strip
= __cpu_to_le16(info_to_blocks_per_strip(info
));
2795 map
->failed_disk_num
= ~0;
2796 map
->map_state
= info
->level
? IMSM_T_STATE_UNINITIALIZED
:
2797 IMSM_T_STATE_NORMAL
;
2800 if (info
->level
== 1 && info
->raid_disks
> 2) {
2801 fprintf(stderr
, Name
": imsm does not support more than 2 disks"
2802 "in a raid1 volume\n");
2806 map
->raid_level
= info
->level
;
2807 if (info
->level
== 10) {
2808 map
->raid_level
= 1;
2809 map
->num_domains
= info
->raid_disks
/ 2;
2810 } else if (info
->level
== 1)
2811 map
->num_domains
= info
->raid_disks
;
2813 map
->num_domains
= 1;
2815 num_data_stripes
= info_to_num_data_stripes(info
, map
->num_domains
);
2816 map
->num_data_stripes
= __cpu_to_le32(num_data_stripes
);
2818 map
->num_members
= info
->raid_disks
;
2819 for (i
= 0; i
< map
->num_members
; i
++) {
2820 /* initialized in add_to_super */
2821 set_imsm_ord_tbl_ent(map
, i
, 0);
2823 mpb
->num_raid_devs
++;
2826 dv
->index
= super
->current_vol
;
2827 dv
->next
= super
->devlist
;
2828 super
->devlist
= dv
;
2830 imsm_update_version_info(super
);
2835 static int init_super_imsm(struct supertype
*st
, mdu_array_info_t
*info
,
2836 unsigned long long size
, char *name
,
2837 char *homehost
, int *uuid
)
2839 /* This is primarily called by Create when creating a new array.
2840 * We will then get add_to_super called for each component, and then
2841 * write_init_super called to write it out to each device.
2842 * For IMSM, Create can create on fresh devices or on a pre-existing
2844 * To create on a pre-existing array a different method will be called.
2845 * This one is just for fresh drives.
2847 struct intel_super
*super
;
2848 struct imsm_super
*mpb
;
2853 return init_super_imsm_volume(st
, info
, size
, name
, homehost
, uuid
);
2856 mpb_size
= disks_to_mpb_size(info
->nr_disks
);
2860 super
= alloc_super(1);
2861 if (super
&& posix_memalign(&super
->buf
, 512, mpb_size
) != 0) {
2866 fprintf(stderr
, Name
2867 ": %s could not allocate superblock\n", __func__
);
2870 memset(super
->buf
, 0, mpb_size
);
2872 mpb
->mpb_size
= __cpu_to_le32(mpb_size
);
2876 /* zeroing superblock */
2880 mpb
->attributes
= MPB_ATTRIB_CHECKSUM_VERIFY
;
2882 version
= (char *) mpb
->sig
;
2883 strcpy(version
, MPB_SIGNATURE
);
2884 version
+= strlen(MPB_SIGNATURE
);
2885 strcpy(version
, MPB_VERSION_RAID0
);
2891 static int add_to_super_imsm_volume(struct supertype
*st
, mdu_disk_info_t
*dk
,
2892 int fd
, char *devname
)
2894 struct intel_super
*super
= st
->sb
;
2895 struct imsm_super
*mpb
= super
->anchor
;
2897 struct imsm_dev
*dev
;
2898 struct imsm_map
*map
;
2900 dev
= get_imsm_dev(super
, super
->current_vol
);
2901 map
= get_imsm_map(dev
, 0);
2903 if (! (dk
->state
& (1<<MD_DISK_SYNC
))) {
2904 fprintf(stderr
, Name
": %s: Cannot add spare devices to IMSM volume\n",
2910 /* we're doing autolayout so grab the pre-marked (in
2911 * validate_geometry) raid_disk
2913 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
2914 if (dl
->raiddisk
== dk
->raid_disk
)
2917 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
2918 if (dl
->major
== dk
->major
&&
2919 dl
->minor
== dk
->minor
)
2924 fprintf(stderr
, Name
": %s is not a member of the same container\n", devname
);
2928 /* add a pristine spare to the metadata */
2929 if (dl
->index
< 0) {
2930 dl
->index
= super
->anchor
->num_disks
;
2931 super
->anchor
->num_disks
++;
2933 set_imsm_ord_tbl_ent(map
, dk
->number
, dl
->index
);
2934 dl
->disk
.status
= CONFIGURED_DISK
;
2936 /* if we are creating the first raid device update the family number */
2937 if (super
->current_vol
== 0) {
2939 struct imsm_dev
*_dev
= __get_imsm_dev(mpb
, 0);
2940 struct imsm_disk
*_disk
= __get_imsm_disk(mpb
, dl
->index
);
2945 sum
+= __gen_imsm_checksum(mpb
);
2946 mpb
->family_num
= __cpu_to_le32(sum
);
2947 mpb
->orig_family_num
= mpb
->family_num
;
2953 static int add_to_super_imsm(struct supertype
*st
, mdu_disk_info_t
*dk
,
2954 int fd
, char *devname
)
2956 struct intel_super
*super
= st
->sb
;
2958 unsigned long long size
;
2963 /* if we are on an RAID enabled platform check that the disk is
2964 * attached to the raid controller
2966 if (super
->hba
&& !disk_attached_to_hba(fd
, super
->hba
)) {
2968 Name
": %s is not attached to the raid controller: %s\n",
2969 devname
? : "disk", super
->hba
);
2973 if (super
->current_vol
>= 0)
2974 return add_to_super_imsm_volume(st
, dk
, fd
, devname
);
2977 dd
= malloc(sizeof(*dd
));
2980 Name
": malloc failed %s:%d.\n", __func__
, __LINE__
);
2983 memset(dd
, 0, sizeof(*dd
));
2984 dd
->major
= major(stb
.st_rdev
);
2985 dd
->minor
= minor(stb
.st_rdev
);
2987 dd
->devname
= devname
? strdup(devname
) : NULL
;
2990 rv
= imsm_read_serial(fd
, devname
, dd
->serial
);
2993 Name
": failed to retrieve scsi serial, aborting\n");
2998 get_dev_size(fd
, NULL
, &size
);
3000 serialcpy(dd
->disk
.serial
, dd
->serial
);
3001 dd
->disk
.total_blocks
= __cpu_to_le32(size
);
3002 dd
->disk
.status
= SPARE_DISK
;
3003 if (sysfs_disk_to_scsi_id(fd
, &id
) == 0)
3004 dd
->disk
.scsi_id
= __cpu_to_le32(id
);
3006 dd
->disk
.scsi_id
= __cpu_to_le32(0);
3008 if (st
->update_tail
) {
3009 dd
->next
= super
->add
;
3012 dd
->next
= super
->disks
;
3019 static int store_imsm_mpb(int fd
, struct imsm_super
*mpb
);
3023 struct imsm_super anchor
;
3024 } spare_record
__attribute__ ((aligned(512)));
3026 /* spare records have their own family number and do not have any defined raid
3029 static int write_super_imsm_spares(struct intel_super
*super
, int doclose
)
3031 struct imsm_super
*mpb
= super
->anchor
;
3032 struct imsm_super
*spare
= &spare_record
.anchor
;
3036 spare
->mpb_size
= __cpu_to_le32(sizeof(struct imsm_super
)),
3037 spare
->generation_num
= __cpu_to_le32(1UL),
3038 spare
->attributes
= MPB_ATTRIB_CHECKSUM_VERIFY
;
3039 spare
->num_disks
= 1,
3040 spare
->num_raid_devs
= 0,
3041 spare
->cache_size
= mpb
->cache_size
,
3042 spare
->pwr_cycle_count
= __cpu_to_le32(1),
3044 snprintf((char *) spare
->sig
, MAX_SIGNATURE_LENGTH
,
3045 MPB_SIGNATURE MPB_VERSION_RAID0
);
3047 for (d
= super
->disks
; d
; d
= d
->next
) {
3051 spare
->disk
[0] = d
->disk
;
3052 sum
= __gen_imsm_checksum(spare
);
3053 spare
->family_num
= __cpu_to_le32(sum
);
3054 spare
->orig_family_num
= 0;
3055 sum
= __gen_imsm_checksum(spare
);
3056 spare
->check_sum
= __cpu_to_le32(sum
);
3058 if (store_imsm_mpb(d
->fd
, spare
)) {
3059 fprintf(stderr
, "%s: failed for device %d:%d %s\n",
3060 __func__
, d
->major
, d
->minor
, strerror(errno
));
3072 static int write_super_imsm(struct intel_super
*super
, int doclose
)
3074 struct imsm_super
*mpb
= super
->anchor
;
3080 __u32 mpb_size
= sizeof(struct imsm_super
) - sizeof(struct imsm_disk
);
3082 /* 'generation' is incremented everytime the metadata is written */
3083 generation
= __le32_to_cpu(mpb
->generation_num
);
3085 mpb
->generation_num
= __cpu_to_le32(generation
);
3087 /* fix up cases where previous mdadm releases failed to set
3090 if (mpb
->orig_family_num
== 0)
3091 mpb
->orig_family_num
= mpb
->family_num
;
3093 mpb_size
+= sizeof(struct imsm_disk
) * mpb
->num_disks
;
3094 for (d
= super
->disks
; d
; d
= d
->next
) {
3098 mpb
->disk
[d
->index
] = d
->disk
;
3100 for (d
= super
->missing
; d
; d
= d
->next
)
3101 mpb
->disk
[d
->index
] = d
->disk
;
3103 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
3104 struct imsm_dev
*dev
= __get_imsm_dev(mpb
, i
);
3106 imsm_copy_dev(dev
, get_imsm_dev(super
, i
));
3107 mpb_size
+= sizeof_imsm_dev(dev
, 0);
3109 mpb_size
+= __le32_to_cpu(mpb
->bbm_log_size
);
3110 mpb
->mpb_size
= __cpu_to_le32(mpb_size
);
3112 /* recalculate checksum */
3113 sum
= __gen_imsm_checksum(mpb
);
3114 mpb
->check_sum
= __cpu_to_le32(sum
);
3116 /* write the mpb for disks that compose raid devices */
3117 for (d
= super
->disks
; d
; d
= d
->next
) {
3120 if (store_imsm_mpb(d
->fd
, mpb
))
3121 fprintf(stderr
, "%s: failed for device %d:%d %s\n",
3122 __func__
, d
->major
, d
->minor
, strerror(errno
));
3130 return write_super_imsm_spares(super
, doclose
);
3136 static int create_array(struct supertype
*st
, int dev_idx
)
3139 struct imsm_update_create_array
*u
;
3140 struct intel_super
*super
= st
->sb
;
3141 struct imsm_dev
*dev
= get_imsm_dev(super
, dev_idx
);
3142 struct imsm_map
*map
= get_imsm_map(dev
, 0);
3143 struct disk_info
*inf
;
3144 struct imsm_disk
*disk
;
3147 len
= sizeof(*u
) - sizeof(*dev
) + sizeof_imsm_dev(dev
, 0) +
3148 sizeof(*inf
) * map
->num_members
;
3151 fprintf(stderr
, "%s: failed to allocate update buffer\n",
3156 u
->type
= update_create_array
;
3157 u
->dev_idx
= dev_idx
;
3158 imsm_copy_dev(&u
->dev
, dev
);
3159 inf
= get_disk_info(u
);
3160 for (i
= 0; i
< map
->num_members
; i
++) {
3161 int idx
= get_imsm_disk_idx(dev
, i
);
3163 disk
= get_imsm_disk(super
, idx
);
3164 serialcpy(inf
[i
].serial
, disk
->serial
);
3166 append_metadata_update(st
, u
, len
);
3171 static int _add_disk(struct supertype
*st
)
3173 struct intel_super
*super
= st
->sb
;
3175 struct imsm_update_add_disk
*u
;
3183 fprintf(stderr
, "%s: failed to allocate update buffer\n",
3188 u
->type
= update_add_disk
;
3189 append_metadata_update(st
, u
, len
);
3194 static int write_init_super_imsm(struct supertype
*st
)
3196 struct intel_super
*super
= st
->sb
;
3197 int current_vol
= super
->current_vol
;
3199 /* we are done with current_vol reset it to point st at the container */
3200 super
->current_vol
= -1;
3202 if (st
->update_tail
) {
3203 /* queue the recently created array / added disk
3204 * as a metadata update */
3208 /* determine if we are creating a volume or adding a disk */
3209 if (current_vol
< 0) {
3210 /* in the add disk case we are running in mdmon
3211 * context, so don't close fd's
3213 return _add_disk(st
);
3215 rv
= create_array(st
, current_vol
);
3217 for (d
= super
->disks
; d
; d
= d
->next
) {
3224 return write_super_imsm(st
->sb
, 1);
3228 static int store_super_imsm(struct supertype
*st
, int fd
)
3230 struct intel_super
*super
= st
->sb
;
3231 struct imsm_super
*mpb
= super
? super
->anchor
: NULL
;
3237 return store_imsm_mpb(fd
, mpb
);
3243 static int imsm_bbm_log_size(struct imsm_super
*mpb
)
3245 return __le32_to_cpu(mpb
->bbm_log_size
);
3249 static int validate_geometry_imsm_container(struct supertype
*st
, int level
,
3250 int layout
, int raiddisks
, int chunk
,
3251 unsigned long long size
, char *dev
,
3252 unsigned long long *freesize
,
3256 unsigned long long ldsize
;
3257 const struct imsm_orom
*orom
;
3259 if (level
!= LEVEL_CONTAINER
)
3264 if (check_env("IMSM_NO_PLATFORM"))
3267 orom
= find_imsm_orom();
3268 if (orom
&& raiddisks
> orom
->tds
) {
3270 fprintf(stderr
, Name
": %d exceeds maximum number of"
3271 " platform supported disks: %d\n",
3272 raiddisks
, orom
->tds
);
3276 fd
= open(dev
, O_RDONLY
|O_EXCL
, 0);
3279 fprintf(stderr
, Name
": imsm: Cannot open %s: %s\n",
3280 dev
, strerror(errno
));
3283 if (!get_dev_size(fd
, dev
, &ldsize
)) {
3289 *freesize
= avail_size_imsm(st
, ldsize
>> 9);
3294 static unsigned long long find_size(struct extent
*e
, int *idx
, int num_extents
)
3296 const unsigned long long base_start
= e
[*idx
].start
;
3297 unsigned long long end
= base_start
+ e
[*idx
].size
;
3300 if (base_start
== end
)
3304 for (i
= *idx
; i
< num_extents
; i
++) {
3305 /* extend overlapping extents */
3306 if (e
[i
].start
>= base_start
&&
3307 e
[i
].start
<= end
) {
3310 if (e
[i
].start
+ e
[i
].size
> end
)
3311 end
= e
[i
].start
+ e
[i
].size
;
3312 } else if (e
[i
].start
> end
) {
3318 return end
- base_start
;
3321 static unsigned long long merge_extents(struct intel_super
*super
, int sum_extents
)
3323 /* build a composite disk with all known extents and generate a new
3324 * 'maxsize' given the "all disks in an array must share a common start
3325 * offset" constraint
3327 struct extent
*e
= calloc(sum_extents
, sizeof(*e
));
3331 unsigned long long pos
;
3332 unsigned long long start
= 0;
3333 unsigned long long maxsize
;
3334 unsigned long reserve
;
3339 /* coalesce and sort all extents. also, check to see if we need to
3340 * reserve space between member arrays
3343 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
3346 for (i
= 0; i
< dl
->extent_cnt
; i
++)
3349 qsort(e
, sum_extents
, sizeof(*e
), cmp_extent
);
3354 while (i
< sum_extents
) {
3355 e
[j
].start
= e
[i
].start
;
3356 e
[j
].size
= find_size(e
, &i
, sum_extents
);
3358 if (e
[j
-1].size
== 0)
3367 unsigned long long esize
;
3369 esize
= e
[i
].start
- pos
;
3370 if (esize
>= maxsize
) {
3375 pos
= e
[i
].start
+ e
[i
].size
;
3377 } while (e
[i
-1].size
);
3383 /* FIXME assumes volume at offset 0 is the first volume in a
3386 if (start_extent
> 0)
3387 reserve
= IMSM_RESERVED_SECTORS
; /* gap between raid regions */
3391 if (maxsize
< reserve
)
3394 super
->create_offset
= ~((__u32
) 0);
3395 if (start
+ reserve
> super
->create_offset
)
3396 return 0; /* start overflows create_offset */
3397 super
->create_offset
= start
+ reserve
;
3399 return maxsize
- reserve
;
3402 static int is_raid_level_supported(const struct imsm_orom
*orom
, int level
, int raiddisks
)
3404 if (level
< 0 || level
== 6 || level
== 4)
3407 /* if we have an orom prevent invalid raid levels */
3410 case 0: return imsm_orom_has_raid0(orom
);
3413 return imsm_orom_has_raid1e(orom
);
3414 return imsm_orom_has_raid1(orom
) && raiddisks
== 2;
3415 case 10: return imsm_orom_has_raid10(orom
) && raiddisks
== 4;
3416 case 5: return imsm_orom_has_raid5(orom
) && raiddisks
> 2;
3419 return 1; /* not on an Intel RAID platform so anything goes */
3424 #define pr_vrb(fmt, arg...) (void) (verbose && fprintf(stderr, Name fmt, ##arg))
3426 validate_geometry_imsm_orom(struct intel_super
*super
, int level
, int layout
,
3427 int raiddisks
, int chunk
, int verbose
)
3429 if (!is_raid_level_supported(super
->orom
, level
, raiddisks
)) {
3430 pr_vrb(": platform does not support raid%d with %d disk%s\n",
3431 level
, raiddisks
, raiddisks
> 1 ? "s" : "");
3434 if (super
->orom
&& level
!= 1 &&
3435 !imsm_orom_has_chunk(super
->orom
, chunk
)) {
3436 pr_vrb(": platform does not support a chunk size of: %d\n", chunk
);
3439 if (layout
!= imsm_level_to_layout(level
)) {
3441 pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
3442 else if (level
== 10)
3443 pr_vrb(": imsm raid 10 only supports the n2 layout\n");
3445 pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
3453 /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
3454 * FIX ME add ahci details
3456 static int validate_geometry_imsm_volume(struct supertype
*st
, int level
,
3457 int layout
, int raiddisks
, int chunk
,
3458 unsigned long long size
, char *dev
,
3459 unsigned long long *freesize
,
3463 struct intel_super
*super
= st
->sb
;
3464 struct imsm_super
*mpb
= super
->anchor
;
3466 unsigned long long pos
= 0;
3467 unsigned long long maxsize
;
3471 /* We must have the container info already read in. */
3475 if (!validate_geometry_imsm_orom(super
, level
, layout
, raiddisks
, chunk
, verbose
))
3479 /* General test: make sure there is space for
3480 * 'raiddisks' device extents of size 'size' at a given
3483 unsigned long long minsize
= size
;
3484 unsigned long long start_offset
= ~0ULL;
3487 minsize
= MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
;
3488 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
3493 e
= get_extents(super
, dl
);
3496 unsigned long long esize
;
3497 esize
= e
[i
].start
- pos
;
3498 if (esize
>= minsize
)
3500 if (found
&& start_offset
== ~0ULL) {
3503 } else if (found
&& pos
!= start_offset
) {
3507 pos
= e
[i
].start
+ e
[i
].size
;
3509 } while (e
[i
-1].size
);
3514 if (dcnt
< raiddisks
) {
3516 fprintf(stderr
, Name
": imsm: Not enough "
3517 "devices with space for this array "
3525 /* This device must be a member of the set */
3526 if (stat(dev
, &stb
) < 0)
3528 if ((S_IFMT
& stb
.st_mode
) != S_IFBLK
)
3530 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
3531 if (dl
->major
== major(stb
.st_rdev
) &&
3532 dl
->minor
== minor(stb
.st_rdev
))
3537 fprintf(stderr
, Name
": %s is not in the "
3538 "same imsm set\n", dev
);
3540 } else if (super
->orom
&& dl
->index
< 0 && mpb
->num_raid_devs
) {
3541 /* If a volume is present then the current creation attempt
3542 * cannot incorporate new spares because the orom may not
3543 * understand this configuration (all member disks must be
3544 * members of each array in the container).
3546 fprintf(stderr
, Name
": %s is a spare and a volume"
3547 " is already defined for this container\n", dev
);
3548 fprintf(stderr
, Name
": The option-rom requires all member"
3549 " disks to be a member of all volumes\n");
3553 /* retrieve the largest free space block */
3554 e
= get_extents(super
, dl
);
3559 unsigned long long esize
;
3561 esize
= e
[i
].start
- pos
;
3562 if (esize
>= maxsize
)
3564 pos
= e
[i
].start
+ e
[i
].size
;
3566 } while (e
[i
-1].size
);
3571 fprintf(stderr
, Name
": unable to determine free space for: %s\n",
3575 if (maxsize
< size
) {
3577 fprintf(stderr
, Name
": %s not enough space (%llu < %llu)\n",
3578 dev
, maxsize
, size
);
3582 /* count total number of extents for merge */
3584 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
3586 i
+= dl
->extent_cnt
;
3588 maxsize
= merge_extents(super
, i
);
3589 if (maxsize
< size
|| maxsize
== 0) {
3591 fprintf(stderr
, Name
": not enough space after merge (%llu < %llu)\n",
3596 *freesize
= maxsize
;
3601 static int reserve_space(struct supertype
*st
, int raiddisks
,
3602 unsigned long long size
, int chunk
,
3603 unsigned long long *freesize
)
3605 struct intel_super
*super
= st
->sb
;
3606 struct imsm_super
*mpb
= super
->anchor
;
3611 unsigned long long maxsize
;
3612 unsigned long long minsize
;
3616 /* find the largest common start free region of the possible disks */
3620 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
3626 /* don't activate new spares if we are orom constrained
3627 * and there is already a volume active in the container
3629 if (super
->orom
&& dl
->index
< 0 && mpb
->num_raid_devs
)
3632 e
= get_extents(super
, dl
);
3635 for (i
= 1; e
[i
-1].size
; i
++)
3643 maxsize
= merge_extents(super
, extent_cnt
);
3648 if (cnt
< raiddisks
||
3649 (super
->orom
&& used
&& used
!= raiddisks
) ||
3650 maxsize
< minsize
||
3652 fprintf(stderr
, Name
": not enough devices with space to create array.\n");
3653 return 0; /* No enough free spaces large enough */
3665 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
3667 dl
->raiddisk
= cnt
++;
3674 static int validate_geometry_imsm(struct supertype
*st
, int level
, int layout
,
3675 int raiddisks
, int chunk
, unsigned long long size
,
3676 char *dev
, unsigned long long *freesize
,
3682 /* if given unused devices create a container
3683 * if given given devices in a container create a member volume
3685 if (level
== LEVEL_CONTAINER
) {
3686 /* Must be a fresh device to add to a container */
3687 return validate_geometry_imsm_container(st
, level
, layout
,
3688 raiddisks
, chunk
, size
,
3694 if (st
->sb
&& freesize
) {
3695 /* we are being asked to automatically layout a
3696 * new volume based on the current contents of
3697 * the container. If the the parameters can be
3698 * satisfied reserve_space will record the disks,
3699 * start offset, and size of the volume to be
3700 * created. add_to_super and getinfo_super
3701 * detect when autolayout is in progress.
3703 if (!validate_geometry_imsm_orom(st
->sb
, level
, layout
,
3707 return reserve_space(st
, raiddisks
, size
, chunk
, freesize
);
3712 /* creating in a given container */
3713 return validate_geometry_imsm_volume(st
, level
, layout
,
3714 raiddisks
, chunk
, size
,
3715 dev
, freesize
, verbose
);
3718 /* limit creation to the following levels */
3728 fprintf(stderr
, Name
3729 ": IMSM only supports levels 0,1,5,10\n");
3733 /* This device needs to be a device in an 'imsm' container */
3734 fd
= open(dev
, O_RDONLY
|O_EXCL
, 0);
3738 Name
": Cannot create this array on device %s\n",
3743 if (errno
!= EBUSY
|| (fd
= open(dev
, O_RDONLY
, 0)) < 0) {
3745 fprintf(stderr
, Name
": Cannot open %s: %s\n",
3746 dev
, strerror(errno
));
3749 /* Well, it is in use by someone, maybe an 'imsm' container. */
3750 cfd
= open_container(fd
);
3754 fprintf(stderr
, Name
": Cannot use %s: It is busy\n",
3758 sra
= sysfs_read(cfd
, 0, GET_VERSION
);
3760 if (sra
&& sra
->array
.major_version
== -1 &&
3761 strcmp(sra
->text_version
, "imsm") == 0) {
3762 /* This is a member of a imsm container. Load the container
3763 * and try to create a volume
3765 struct intel_super
*super
;
3767 if (load_super_imsm_all(st
, cfd
, (void **) &super
, NULL
, 1) == 0) {
3769 st
->container_dev
= fd2devnum(cfd
);
3771 return validate_geometry_imsm_volume(st
, level
, layout
,
3777 } else /* may belong to another container */
3782 #endif /* MDASSEMBLE */
3784 static struct mdinfo
*container_content_imsm(struct supertype
*st
)
3786 /* Given a container loaded by load_super_imsm_all,
3787 * extract information about all the arrays into
3790 * For each imsm_dev create an mdinfo, fill it in,
3791 * then look for matching devices in super->disks
3792 * and create appropriate device mdinfo.
3794 struct intel_super
*super
= st
->sb
;
3795 struct imsm_super
*mpb
= super
->anchor
;
3796 struct mdinfo
*rest
= NULL
;
3799 /* do not assemble arrays that might have bad blocks */
3800 if (imsm_bbm_log_size(super
->anchor
)) {
3801 fprintf(stderr
, Name
": BBM log found in metadata. "
3802 "Cannot activate array(s).\n");
3806 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
3807 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
3808 struct imsm_map
*map
= get_imsm_map(dev
, 0);
3809 struct mdinfo
*this;
3812 /* do not publish arrays that are in the middle of an
3813 * unsupported migration
3815 if (dev
->vol
.migr_state
&&
3816 (migr_type(dev
) == MIGR_GEN_MIGR
||
3817 migr_type(dev
) == MIGR_STATE_CHANGE
)) {
3818 fprintf(stderr
, Name
": cannot assemble volume '%.16s':"
3819 " unsupported migration in progress\n",
3824 this = malloc(sizeof(*this));
3825 memset(this, 0, sizeof(*this));
3828 super
->current_vol
= i
;
3829 getinfo_super_imsm_volume(st
, this);
3830 for (slot
= 0 ; slot
< map
->num_members
; slot
++) {
3831 struct mdinfo
*info_d
;
3838 idx
= get_imsm_disk_idx(dev
, slot
);
3839 ord
= get_imsm_ord_tbl_ent(dev
, slot
);
3840 for (d
= super
->disks
; d
; d
= d
->next
)
3841 if (d
->index
== idx
)
3846 if (d
&& is_failed(&d
->disk
))
3848 if (ord
& IMSM_ORD_REBUILD
)
3852 * if we skip some disks the array will be assmebled degraded;
3853 * reset resync start to avoid a dirty-degraded situation
3855 * FIXME handle dirty degraded
3857 if (skip
&& !dev
->vol
.dirty
)
3858 this->resync_start
= ~0ULL;
3862 info_d
= malloc(sizeof(*info_d
));
3864 fprintf(stderr
, Name
": failed to allocate disk"
3865 " for volume %.16s\n", dev
->volume
);
3870 memset(info_d
, 0, sizeof(*info_d
));
3871 info_d
->next
= this->devs
;
3872 this->devs
= info_d
;
3874 info_d
->disk
.number
= d
->index
;
3875 info_d
->disk
.major
= d
->major
;
3876 info_d
->disk
.minor
= d
->minor
;
3877 info_d
->disk
.raid_disk
= slot
;
3879 this->array
.working_disks
++;
3881 info_d
->events
= __le32_to_cpu(mpb
->generation_num
);
3882 info_d
->data_offset
= __le32_to_cpu(map
->pba_of_lba0
);
3883 info_d
->component_size
= __le32_to_cpu(map
->blocks_per_member
);
3885 strcpy(info_d
->name
, d
->devname
);
3895 static int imsm_open_new(struct supertype
*c
, struct active_array
*a
,
3898 struct intel_super
*super
= c
->sb
;
3899 struct imsm_super
*mpb
= super
->anchor
;
3901 if (atoi(inst
) >= mpb
->num_raid_devs
) {
3902 fprintf(stderr
, "%s: subarry index %d, out of range\n",
3903 __func__
, atoi(inst
));
3907 dprintf("imsm: open_new %s\n", inst
);
3908 a
->info
.container_member
= atoi(inst
);
3912 static __u8
imsm_check_degraded(struct intel_super
*super
, struct imsm_dev
*dev
, int failed
)
3914 struct imsm_map
*map
= get_imsm_map(dev
, 0);
3917 return map
->map_state
== IMSM_T_STATE_UNINITIALIZED
?
3918 IMSM_T_STATE_UNINITIALIZED
: IMSM_T_STATE_NORMAL
;
3920 switch (get_imsm_raid_level(map
)) {
3922 return IMSM_T_STATE_FAILED
;
3925 if (failed
< map
->num_members
)
3926 return IMSM_T_STATE_DEGRADED
;
3928 return IMSM_T_STATE_FAILED
;
3933 * check to see if any mirrors have failed, otherwise we
3934 * are degraded. Even numbered slots are mirrored on
3938 /* gcc -Os complains that this is unused */
3939 int insync
= insync
;
3941 for (i
= 0; i
< map
->num_members
; i
++) {
3942 __u32 ord
= get_imsm_ord_tbl_ent(dev
, i
);
3943 int idx
= ord_to_idx(ord
);
3944 struct imsm_disk
*disk
;
3946 /* reset the potential in-sync count on even-numbered
3947 * slots. num_copies is always 2 for imsm raid10
3952 disk
= get_imsm_disk(super
, idx
);
3953 if (!disk
|| is_failed(disk
) || ord
& IMSM_ORD_REBUILD
)
3956 /* no in-sync disks left in this mirror the
3960 return IMSM_T_STATE_FAILED
;
3963 return IMSM_T_STATE_DEGRADED
;
3967 return IMSM_T_STATE_DEGRADED
;
3969 return IMSM_T_STATE_FAILED
;
3975 return map
->map_state
;
3978 static int imsm_count_failed(struct intel_super
*super
, struct imsm_dev
*dev
)
3982 struct imsm_disk
*disk
;
3983 struct imsm_map
*map
= get_imsm_map(dev
, 0);
3984 struct imsm_map
*prev
= get_imsm_map(dev
, dev
->vol
.migr_state
);
3988 /* at the beginning of migration we set IMSM_ORD_REBUILD on
3989 * disks that are being rebuilt. New failures are recorded to
3990 * map[0]. So we look through all the disks we started with and
3991 * see if any failures are still present, or if any new ones
3994 * FIXME add support for online capacity expansion and
3995 * raid-level-migration
3997 for (i
= 0; i
< prev
->num_members
; i
++) {
3998 ord
= __le32_to_cpu(prev
->disk_ord_tbl
[i
]);
3999 ord
|= __le32_to_cpu(map
->disk_ord_tbl
[i
]);
4000 idx
= ord_to_idx(ord
);
4002 disk
= get_imsm_disk(super
, idx
);
4003 if (!disk
|| is_failed(disk
) || ord
& IMSM_ORD_REBUILD
)
4010 static int is_resyncing(struct imsm_dev
*dev
)
4012 struct imsm_map
*migr_map
;
4014 if (!dev
->vol
.migr_state
)
4017 if (migr_type(dev
) == MIGR_INIT
||
4018 migr_type(dev
) == MIGR_REPAIR
)
4021 migr_map
= get_imsm_map(dev
, 1);
4023 if (migr_map
->map_state
== IMSM_T_STATE_NORMAL
)
4029 static int is_rebuilding(struct imsm_dev
*dev
)
4031 struct imsm_map
*migr_map
;
4033 if (!dev
->vol
.migr_state
)
4036 if (migr_type(dev
) != MIGR_REBUILD
)
4039 migr_map
= get_imsm_map(dev
, 1);
4041 if (migr_map
->map_state
== IMSM_T_STATE_DEGRADED
)
4047 /* return true if we recorded new information */
4048 static int mark_failure(struct imsm_dev
*dev
, struct imsm_disk
*disk
, int idx
)
4052 struct imsm_map
*map
;
4054 /* new failures are always set in map[0] */
4055 map
= get_imsm_map(dev
, 0);
4057 slot
= get_imsm_disk_slot(map
, idx
);
4061 ord
= __le32_to_cpu(map
->disk_ord_tbl
[slot
]);
4062 if (is_failed(disk
) && (ord
& IMSM_ORD_REBUILD
))
4065 disk
->status
|= FAILED_DISK
;
4066 disk
->status
&= ~CONFIGURED_DISK
;
4067 set_imsm_ord_tbl_ent(map
, slot
, idx
| IMSM_ORD_REBUILD
);
4068 if (~map
->failed_disk_num
== 0)
4069 map
->failed_disk_num
= slot
;
4073 static void mark_missing(struct imsm_dev
*dev
, struct imsm_disk
*disk
, int idx
)
4075 mark_failure(dev
, disk
, idx
);
4077 if (disk
->scsi_id
== __cpu_to_le32(~(__u32
)0))
4080 disk
->scsi_id
= __cpu_to_le32(~(__u32
)0);
4081 memmove(&disk
->serial
[0], &disk
->serial
[1], MAX_RAID_SERIAL_LEN
- 1);
4084 /* Handle dirty -> clean transititions and resync. Degraded and rebuild
4085 * states are handled in imsm_set_disk() with one exception, when a
4086 * resync is stopped due to a new failure this routine will set the
4087 * 'degraded' state for the array.
4089 static int imsm_set_array_state(struct active_array
*a
, int consistent
)
4091 int inst
= a
->info
.container_member
;
4092 struct intel_super
*super
= a
->container
->sb
;
4093 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
4094 struct imsm_map
*map
= get_imsm_map(dev
, 0);
4095 int failed
= imsm_count_failed(super
, dev
);
4096 __u8 map_state
= imsm_check_degraded(super
, dev
, failed
);
4098 /* before we activate this array handle any missing disks */
4099 if (consistent
== 2 && super
->missing
) {
4102 dprintf("imsm: mark missing\n");
4103 end_migration(dev
, map_state
);
4104 for (dl
= super
->missing
; dl
; dl
= dl
->next
)
4105 mark_missing(dev
, &dl
->disk
, dl
->index
);
4106 super
->updates_pending
++;
4109 if (consistent
== 2 &&
4110 (!is_resync_complete(a
) ||
4111 map_state
!= IMSM_T_STATE_NORMAL
||
4112 dev
->vol
.migr_state
))
4115 if (is_resync_complete(a
)) {
4116 /* complete intialization / resync,
4117 * recovery and interrupted recovery is completed in
4120 if (is_resyncing(dev
)) {
4121 dprintf("imsm: mark resync done\n");
4122 end_migration(dev
, map_state
);
4123 super
->updates_pending
++;
4125 } else if (!is_resyncing(dev
) && !failed
) {
4126 /* mark the start of the init process if nothing is failed */
4127 dprintf("imsm: mark resync start (%llu)\n", a
->resync_start
);
4128 if (map
->map_state
== IMSM_T_STATE_UNINITIALIZED
)
4129 migrate(dev
, IMSM_T_STATE_NORMAL
, MIGR_INIT
);
4131 migrate(dev
, IMSM_T_STATE_NORMAL
, MIGR_REPAIR
);
4132 super
->updates_pending
++;
4135 /* FIXME check if we can update curr_migr_unit from resync_start */
4137 /* mark dirty / clean */
4138 if (dev
->vol
.dirty
!= !consistent
) {
4139 dprintf("imsm: mark '%s' (%llu)\n",
4140 consistent
? "clean" : "dirty", a
->resync_start
);
4145 super
->updates_pending
++;
4150 static void imsm_set_disk(struct active_array
*a
, int n
, int state
)
4152 int inst
= a
->info
.container_member
;
4153 struct intel_super
*super
= a
->container
->sb
;
4154 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
4155 struct imsm_map
*map
= get_imsm_map(dev
, 0);
4156 struct imsm_disk
*disk
;
4161 if (n
> map
->num_members
)
4162 fprintf(stderr
, "imsm: set_disk %d out of range 0..%d\n",
4163 n
, map
->num_members
- 1);
4168 dprintf("imsm: set_disk %d:%x\n", n
, state
);
4170 ord
= get_imsm_ord_tbl_ent(dev
, n
);
4171 disk
= get_imsm_disk(super
, ord_to_idx(ord
));
4173 /* check for new failures */
4174 if (state
& DS_FAULTY
) {
4175 if (mark_failure(dev
, disk
, ord_to_idx(ord
)))
4176 super
->updates_pending
++;
4179 /* check if in_sync */
4180 if (state
& DS_INSYNC
&& ord
& IMSM_ORD_REBUILD
&& is_rebuilding(dev
)) {
4181 struct imsm_map
*migr_map
= get_imsm_map(dev
, 1);
4183 set_imsm_ord_tbl_ent(migr_map
, n
, ord_to_idx(ord
));
4184 super
->updates_pending
++;
4187 failed
= imsm_count_failed(super
, dev
);
4188 map_state
= imsm_check_degraded(super
, dev
, failed
);
4190 /* check if recovery complete, newly degraded, or failed */
4191 if (map_state
== IMSM_T_STATE_NORMAL
&& is_rebuilding(dev
)) {
4192 end_migration(dev
, map_state
);
4193 map
= get_imsm_map(dev
, 0);
4194 map
->failed_disk_num
= ~0;
4195 super
->updates_pending
++;
4196 } else if (map_state
== IMSM_T_STATE_DEGRADED
&&
4197 map
->map_state
!= map_state
&&
4198 !dev
->vol
.migr_state
) {
4199 dprintf("imsm: mark degraded\n");
4200 map
->map_state
= map_state
;
4201 super
->updates_pending
++;
4202 } else if (map_state
== IMSM_T_STATE_FAILED
&&
4203 map
->map_state
!= map_state
) {
4204 dprintf("imsm: mark failed\n");
4205 end_migration(dev
, map_state
);
4206 super
->updates_pending
++;
4210 static int store_imsm_mpb(int fd
, struct imsm_super
*mpb
)
4213 __u32 mpb_size
= __le32_to_cpu(mpb
->mpb_size
);
4214 unsigned long long dsize
;
4215 unsigned long long sectors
;
4217 get_dev_size(fd
, NULL
, &dsize
);
4219 if (mpb_size
> 512) {
4220 /* -1 to account for anchor */
4221 sectors
= mpb_sectors(mpb
) - 1;
4223 /* write the extended mpb to the sectors preceeding the anchor */
4224 if (lseek64(fd
, dsize
- (512 * (2 + sectors
)), SEEK_SET
) < 0)
4227 if (write(fd
, buf
+ 512, 512 * sectors
) != 512 * sectors
)
4231 /* first block is stored on second to last sector of the disk */
4232 if (lseek64(fd
, dsize
- (512 * 2), SEEK_SET
) < 0)
4235 if (write(fd
, buf
, 512) != 512)
4241 static void imsm_sync_metadata(struct supertype
*container
)
4243 struct intel_super
*super
= container
->sb
;
4245 if (!super
->updates_pending
)
4248 write_super_imsm(super
, 0);
4250 super
->updates_pending
= 0;
4253 static struct dl
*imsm_readd(struct intel_super
*super
, int idx
, struct active_array
*a
)
4255 struct imsm_dev
*dev
= get_imsm_dev(super
, a
->info
.container_member
);
4256 int i
= get_imsm_disk_idx(dev
, idx
);
4259 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
4263 if (dl
&& is_failed(&dl
->disk
))
4267 dprintf("%s: found %x:%x\n", __func__
, dl
->major
, dl
->minor
);
4272 static struct dl
*imsm_add_spare(struct intel_super
*super
, int slot
,
4273 struct active_array
*a
, int activate_new
)
4275 struct imsm_dev
*dev
= get_imsm_dev(super
, a
->info
.container_member
);
4276 int idx
= get_imsm_disk_idx(dev
, slot
);
4277 struct imsm_super
*mpb
= super
->anchor
;
4278 struct imsm_map
*map
;
4279 unsigned long long pos
;
4288 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
4289 /* If in this array, skip */
4290 for (d
= a
->info
.devs
; d
; d
= d
->next
)
4291 if (d
->state_fd
>= 0 &&
4292 d
->disk
.major
== dl
->major
&&
4293 d
->disk
.minor
== dl
->minor
) {
4294 dprintf("%x:%x already in array\n", dl
->major
, dl
->minor
);
4300 /* skip in use or failed drives */
4301 if (is_failed(&dl
->disk
) || idx
== dl
->index
||
4303 dprintf("%x:%x status (failed: %d index: %d)\n",
4304 dl
->major
, dl
->minor
, is_failed(&dl
->disk
), idx
);
4308 /* skip pure spares when we are looking for partially
4309 * assimilated drives
4311 if (dl
->index
== -1 && !activate_new
)
4314 /* Does this unused device have the requisite free space?
4315 * It needs to be able to cover all member volumes
4317 ex
= get_extents(super
, dl
);
4319 dprintf("cannot get extents\n");
4322 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
4323 dev
= get_imsm_dev(super
, i
);
4324 map
= get_imsm_map(dev
, 0);
4326 /* check if this disk is already a member of
4329 if (get_imsm_disk_slot(map
, dl
->index
) >= 0)
4335 array_start
= __le32_to_cpu(map
->pba_of_lba0
);
4336 array_end
= array_start
+
4337 __le32_to_cpu(map
->blocks_per_member
) - 1;
4340 /* check that we can start at pba_of_lba0 with
4341 * blocks_per_member of space
4343 if (array_start
>= pos
&& array_end
< ex
[j
].start
) {
4347 pos
= ex
[j
].start
+ ex
[j
].size
;
4349 } while (ex
[j
-1].size
);
4356 if (i
< mpb
->num_raid_devs
) {
4357 dprintf("%x:%x does not have %u to %u available\n",
4358 dl
->major
, dl
->minor
, array_start
, array_end
);
4368 static struct mdinfo
*imsm_activate_spare(struct active_array
*a
,
4369 struct metadata_update
**updates
)
4372 * Find a device with unused free space and use it to replace a
4373 * failed/vacant region in an array. We replace failed regions one a
4374 * array at a time. The result is that a new spare disk will be added
4375 * to the first failed array and after the monitor has finished
4376 * propagating failures the remainder will be consumed.
4378 * FIXME add a capability for mdmon to request spares from another
4382 struct intel_super
*super
= a
->container
->sb
;
4383 int inst
= a
->info
.container_member
;
4384 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
4385 struct imsm_map
*map
= get_imsm_map(dev
, 0);
4386 int failed
= a
->info
.array
.raid_disks
;
4387 struct mdinfo
*rv
= NULL
;
4390 struct metadata_update
*mu
;
4392 struct imsm_update_activate_spare
*u
;
4396 for (d
= a
->info
.devs
; d
; d
= d
->next
) {
4397 if ((d
->curr_state
& DS_FAULTY
) &&
4399 /* wait for Removal to happen */
4401 if (d
->state_fd
>= 0)
4405 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
4406 inst
, failed
, a
->info
.array
.raid_disks
, a
->info
.array
.level
);
4407 if (imsm_check_degraded(super
, dev
, failed
) != IMSM_T_STATE_DEGRADED
)
4410 /* For each slot, if it is not working, find a spare */
4411 for (i
= 0; i
< a
->info
.array
.raid_disks
; i
++) {
4412 for (d
= a
->info
.devs
; d
; d
= d
->next
)
4413 if (d
->disk
.raid_disk
== i
)
4415 dprintf("found %d: %p %x\n", i
, d
, d
?d
->curr_state
:0);
4416 if (d
&& (d
->state_fd
>= 0))
4420 * OK, this device needs recovery. Try to re-add the
4421 * previous occupant of this slot, if this fails see if
4422 * we can continue the assimilation of a spare that was
4423 * partially assimilated, finally try to activate a new
4426 dl
= imsm_readd(super
, i
, a
);
4428 dl
= imsm_add_spare(super
, i
, a
, 0);
4430 dl
= imsm_add_spare(super
, i
, a
, 1);
4434 /* found a usable disk with enough space */
4435 di
= malloc(sizeof(*di
));
4438 memset(di
, 0, sizeof(*di
));
4440 /* dl->index will be -1 in the case we are activating a
4441 * pristine spare. imsm_process_update() will create a
4442 * new index in this case. Once a disk is found to be
4443 * failed in all member arrays it is kicked from the
4446 di
->disk
.number
= dl
->index
;
4448 /* (ab)use di->devs to store a pointer to the device
4451 di
->devs
= (struct mdinfo
*) dl
;
4453 di
->disk
.raid_disk
= i
;
4454 di
->disk
.major
= dl
->major
;
4455 di
->disk
.minor
= dl
->minor
;
4457 di
->data_offset
= __le32_to_cpu(map
->pba_of_lba0
);
4458 di
->component_size
= a
->info
.component_size
;
4459 di
->container_member
= inst
;
4460 super
->random
= random32();
4464 dprintf("%x:%x to be %d at %llu\n", dl
->major
, dl
->minor
,
4465 i
, di
->data_offset
);
4471 /* No spares found */
4473 /* Now 'rv' has a list of devices to return.
4474 * Create a metadata_update record to update the
4475 * disk_ord_tbl for the array
4477 mu
= malloc(sizeof(*mu
));
4479 mu
->buf
= malloc(sizeof(struct imsm_update_activate_spare
) * num_spares
);
4480 if (mu
->buf
== NULL
) {
4487 struct mdinfo
*n
= rv
->next
;
4496 mu
->len
= sizeof(struct imsm_update_activate_spare
) * num_spares
;
4497 mu
->next
= *updates
;
4498 u
= (struct imsm_update_activate_spare
*) mu
->buf
;
4500 for (di
= rv
; di
; di
= di
->next
) {
4501 u
->type
= update_activate_spare
;
4502 u
->dl
= (struct dl
*) di
->devs
;
4504 u
->slot
= di
->disk
.raid_disk
;
4515 static int disks_overlap(struct intel_super
*super
, int idx
, struct imsm_update_create_array
*u
)
4517 struct imsm_dev
*dev
= get_imsm_dev(super
, idx
);
4518 struct imsm_map
*map
= get_imsm_map(dev
, 0);
4519 struct imsm_map
*new_map
= get_imsm_map(&u
->dev
, 0);
4520 struct disk_info
*inf
= get_disk_info(u
);
4521 struct imsm_disk
*disk
;
4525 for (i
= 0; i
< map
->num_members
; i
++) {
4526 disk
= get_imsm_disk(super
, get_imsm_disk_idx(dev
, i
));
4527 for (j
= 0; j
< new_map
->num_members
; j
++)
4528 if (serialcmp(disk
->serial
, inf
[j
].serial
) == 0)
4535 static void imsm_delete(struct intel_super
*super
, struct dl
**dlp
, int index
);
4537 static void imsm_process_update(struct supertype
*st
,
4538 struct metadata_update
*update
)
4541 * crack open the metadata_update envelope to find the update record
4542 * update can be one of:
4543 * update_activate_spare - a spare device has replaced a failed
4544 * device in an array, update the disk_ord_tbl. If this disk is
4545 * present in all member arrays then also clear the SPARE_DISK
4548 struct intel_super
*super
= st
->sb
;
4549 struct imsm_super
*mpb
;
4550 enum imsm_update_type type
= *(enum imsm_update_type
*) update
->buf
;
4552 /* update requires a larger buf but the allocation failed */
4553 if (super
->next_len
&& !super
->next_buf
) {
4554 super
->next_len
= 0;
4558 if (super
->next_buf
) {
4559 memcpy(super
->next_buf
, super
->buf
, super
->len
);
4561 super
->len
= super
->next_len
;
4562 super
->buf
= super
->next_buf
;
4564 super
->next_len
= 0;
4565 super
->next_buf
= NULL
;
4568 mpb
= super
->anchor
;
4571 case update_activate_spare
: {
4572 struct imsm_update_activate_spare
*u
= (void *) update
->buf
;
4573 struct imsm_dev
*dev
= get_imsm_dev(super
, u
->array
);
4574 struct imsm_map
*map
= get_imsm_map(dev
, 0);
4575 struct imsm_map
*migr_map
;
4576 struct active_array
*a
;
4577 struct imsm_disk
*disk
;
4582 int victim
= get_imsm_disk_idx(dev
, u
->slot
);
4585 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
4590 fprintf(stderr
, "error: imsm_activate_spare passed "
4591 "an unknown disk (index: %d)\n",
4596 super
->updates_pending
++;
4598 /* count failures (excluding rebuilds and the victim)
4599 * to determine map[0] state
4602 for (i
= 0; i
< map
->num_members
; i
++) {
4605 disk
= get_imsm_disk(super
, get_imsm_disk_idx(dev
, i
));
4606 if (!disk
|| is_failed(disk
))
4610 /* adding a pristine spare, assign a new index */
4611 if (dl
->index
< 0) {
4612 dl
->index
= super
->anchor
->num_disks
;
4613 super
->anchor
->num_disks
++;
4616 disk
->status
|= CONFIGURED_DISK
;
4617 disk
->status
&= ~SPARE_DISK
;
4620 to_state
= imsm_check_degraded(super
, dev
, failed
);
4621 map
->map_state
= IMSM_T_STATE_DEGRADED
;
4622 migrate(dev
, to_state
, MIGR_REBUILD
);
4623 migr_map
= get_imsm_map(dev
, 1);
4624 set_imsm_ord_tbl_ent(map
, u
->slot
, dl
->index
);
4625 set_imsm_ord_tbl_ent(migr_map
, u
->slot
, dl
->index
| IMSM_ORD_REBUILD
);
4627 /* update the family_num to mark a new container
4628 * generation, being careful to record the existing
4629 * family_num in orig_family_num to clean up after
4630 * earlier mdadm versions that neglected to set it.
4632 if (mpb
->orig_family_num
== 0)
4633 mpb
->orig_family_num
= mpb
->family_num
;
4634 mpb
->family_num
+= super
->random
;
4636 /* count arrays using the victim in the metadata */
4638 for (a
= st
->arrays
; a
; a
= a
->next
) {
4639 dev
= get_imsm_dev(super
, a
->info
.container_member
);
4640 map
= get_imsm_map(dev
, 0);
4642 if (get_imsm_disk_slot(map
, victim
) >= 0)
4646 /* delete the victim if it is no longer being
4652 /* We know that 'manager' isn't touching anything,
4653 * so it is safe to delete
4655 for (dlp
= &super
->disks
; *dlp
; dlp
= &(*dlp
)->next
)
4656 if ((*dlp
)->index
== victim
)
4659 /* victim may be on the missing list */
4661 for (dlp
= &super
->missing
; *dlp
; dlp
= &(*dlp
)->next
)
4662 if ((*dlp
)->index
== victim
)
4664 imsm_delete(super
, dlp
, victim
);
4668 case update_create_array
: {
4669 /* someone wants to create a new array, we need to be aware of
4670 * a few races/collisions:
4671 * 1/ 'Create' called by two separate instances of mdadm
4672 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
4673 * devices that have since been assimilated via
4675 * In the event this update can not be carried out mdadm will
4676 * (FIX ME) notice that its update did not take hold.
4678 struct imsm_update_create_array
*u
= (void *) update
->buf
;
4679 struct intel_dev
*dv
;
4680 struct imsm_dev
*dev
;
4681 struct imsm_map
*map
, *new_map
;
4682 unsigned long long start
, end
;
4683 unsigned long long new_start
, new_end
;
4685 struct disk_info
*inf
;
4688 /* handle racing creates: first come first serve */
4689 if (u
->dev_idx
< mpb
->num_raid_devs
) {
4690 dprintf("%s: subarray %d already defined\n",
4691 __func__
, u
->dev_idx
);
4695 /* check update is next in sequence */
4696 if (u
->dev_idx
!= mpb
->num_raid_devs
) {
4697 dprintf("%s: can not create array %d expected index %d\n",
4698 __func__
, u
->dev_idx
, mpb
->num_raid_devs
);
4702 new_map
= get_imsm_map(&u
->dev
, 0);
4703 new_start
= __le32_to_cpu(new_map
->pba_of_lba0
);
4704 new_end
= new_start
+ __le32_to_cpu(new_map
->blocks_per_member
);
4705 inf
= get_disk_info(u
);
4707 /* handle activate_spare versus create race:
4708 * check to make sure that overlapping arrays do not include
4711 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
4712 dev
= get_imsm_dev(super
, i
);
4713 map
= get_imsm_map(dev
, 0);
4714 start
= __le32_to_cpu(map
->pba_of_lba0
);
4715 end
= start
+ __le32_to_cpu(map
->blocks_per_member
);
4716 if ((new_start
>= start
&& new_start
<= end
) ||
4717 (start
>= new_start
&& start
<= new_end
))
4722 if (disks_overlap(super
, i
, u
)) {
4723 dprintf("%s: arrays overlap\n", __func__
);
4728 /* check that prepare update was successful */
4729 if (!update
->space
) {
4730 dprintf("%s: prepare update failed\n", __func__
);
4734 /* check that all disks are still active before committing
4735 * changes. FIXME: could we instead handle this by creating a
4736 * degraded array? That's probably not what the user expects,
4737 * so better to drop this update on the floor.
4739 for (i
= 0; i
< new_map
->num_members
; i
++) {
4740 dl
= serial_to_dl(inf
[i
].serial
, super
);
4742 dprintf("%s: disk disappeared\n", __func__
);
4747 super
->updates_pending
++;
4749 /* convert spares to members and fixup ord_tbl */
4750 for (i
= 0; i
< new_map
->num_members
; i
++) {
4751 dl
= serial_to_dl(inf
[i
].serial
, super
);
4752 if (dl
->index
== -1) {
4753 dl
->index
= mpb
->num_disks
;
4755 dl
->disk
.status
|= CONFIGURED_DISK
;
4756 dl
->disk
.status
&= ~SPARE_DISK
;
4758 set_imsm_ord_tbl_ent(new_map
, i
, dl
->index
);
4763 update
->space
= NULL
;
4764 imsm_copy_dev(dev
, &u
->dev
);
4765 dv
->index
= u
->dev_idx
;
4766 dv
->next
= super
->devlist
;
4767 super
->devlist
= dv
;
4768 mpb
->num_raid_devs
++;
4770 imsm_update_version_info(super
);
4773 /* mdmon knows how to release update->space, but not
4774 * ((struct intel_dev *) update->space)->dev
4776 if (update
->space
) {
4782 case update_add_disk
:
4784 /* we may be able to repair some arrays if disks are
4787 struct active_array
*a
;
4789 super
->updates_pending
++;
4790 for (a
= st
->arrays
; a
; a
= a
->next
)
4791 a
->check_degraded
= 1;
4793 /* add some spares to the metadata */
4794 while (super
->add
) {
4798 super
->add
= al
->next
;
4799 al
->next
= super
->disks
;
4801 dprintf("%s: added %x:%x\n",
4802 __func__
, al
->major
, al
->minor
);
4809 static void imsm_prepare_update(struct supertype
*st
,
4810 struct metadata_update
*update
)
4813 * Allocate space to hold new disk entries, raid-device entries or a new
4814 * mpb if necessary. The manager synchronously waits for updates to
4815 * complete in the monitor, so new mpb buffers allocated here can be
4816 * integrated by the monitor thread without worrying about live pointers
4817 * in the manager thread.
4819 enum imsm_update_type type
= *(enum imsm_update_type
*) update
->buf
;
4820 struct intel_super
*super
= st
->sb
;
4821 struct imsm_super
*mpb
= super
->anchor
;
4826 case update_create_array
: {
4827 struct imsm_update_create_array
*u
= (void *) update
->buf
;
4828 struct intel_dev
*dv
;
4829 struct imsm_dev
*dev
= &u
->dev
;
4830 struct imsm_map
*map
= get_imsm_map(dev
, 0);
4832 struct disk_info
*inf
;
4836 inf
= get_disk_info(u
);
4837 len
= sizeof_imsm_dev(dev
, 1);
4838 /* allocate a new super->devlist entry */
4839 dv
= malloc(sizeof(*dv
));
4841 dv
->dev
= malloc(len
);
4846 update
->space
= NULL
;
4850 /* count how many spares will be converted to members */
4851 for (i
= 0; i
< map
->num_members
; i
++) {
4852 dl
= serial_to_dl(inf
[i
].serial
, super
);
4854 /* hmm maybe it failed?, nothing we can do about
4859 if (count_memberships(dl
, super
) == 0)
4862 len
+= activate
* sizeof(struct imsm_disk
);
4869 /* check if we need a larger metadata buffer */
4870 if (super
->next_buf
)
4871 buf_len
= super
->next_len
;
4873 buf_len
= super
->len
;
4875 if (__le32_to_cpu(mpb
->mpb_size
) + len
> buf_len
) {
4876 /* ok we need a larger buf than what is currently allocated
4877 * if this allocation fails process_update will notice that
4878 * ->next_len is set and ->next_buf is NULL
4880 buf_len
= ROUND_UP(__le32_to_cpu(mpb
->mpb_size
) + len
, 512);
4881 if (super
->next_buf
)
4882 free(super
->next_buf
);
4884 super
->next_len
= buf_len
;
4885 if (posix_memalign(&super
->next_buf
, 512, buf_len
) == 0)
4886 memset(super
->next_buf
, 0, buf_len
);
4888 super
->next_buf
= NULL
;
4892 /* must be called while manager is quiesced */
4893 static void imsm_delete(struct intel_super
*super
, struct dl
**dlp
, int index
)
4895 struct imsm_super
*mpb
= super
->anchor
;
4897 struct imsm_dev
*dev
;
4898 struct imsm_map
*map
;
4899 int i
, j
, num_members
;
4902 dprintf("%s: deleting device[%d] from imsm_super\n",
4905 /* shift all indexes down one */
4906 for (iter
= super
->disks
; iter
; iter
= iter
->next
)
4907 if (iter
->index
> index
)
4909 for (iter
= super
->missing
; iter
; iter
= iter
->next
)
4910 if (iter
->index
> index
)
4913 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
4914 dev
= get_imsm_dev(super
, i
);
4915 map
= get_imsm_map(dev
, 0);
4916 num_members
= map
->num_members
;
4917 for (j
= 0; j
< num_members
; j
++) {
4918 /* update ord entries being careful not to propagate
4919 * ord-flags to the first map
4921 ord
= get_imsm_ord_tbl_ent(dev
, j
);
4923 if (ord_to_idx(ord
) <= index
)
4926 map
= get_imsm_map(dev
, 0);
4927 set_imsm_ord_tbl_ent(map
, j
, ord_to_idx(ord
- 1));
4928 map
= get_imsm_map(dev
, 1);
4930 set_imsm_ord_tbl_ent(map
, j
, ord
- 1);
4935 super
->updates_pending
++;
4937 struct dl
*dl
= *dlp
;
4939 *dlp
= (*dlp
)->next
;
4940 __free_imsm_disk(dl
);
4943 #endif /* MDASSEMBLE */
4945 struct superswitch super_imsm
= {
4947 .examine_super
= examine_super_imsm
,
4948 .brief_examine_super
= brief_examine_super_imsm
,
4949 .brief_examine_subarrays
= brief_examine_subarrays_imsm
,
4950 .export_examine_super
= export_examine_super_imsm
,
4951 .detail_super
= detail_super_imsm
,
4952 .brief_detail_super
= brief_detail_super_imsm
,
4953 .write_init_super
= write_init_super_imsm
,
4954 .validate_geometry
= validate_geometry_imsm
,
4955 .add_to_super
= add_to_super_imsm
,
4956 .detail_platform
= detail_platform_imsm
,
4958 .match_home
= match_home_imsm
,
4959 .uuid_from_super
= uuid_from_super_imsm
,
4960 .getinfo_super
= getinfo_super_imsm
,
4961 .update_super
= update_super_imsm
,
4963 .avail_size
= avail_size_imsm
,
4965 .compare_super
= compare_super_imsm
,
4967 .load_super
= load_super_imsm
,
4968 .init_super
= init_super_imsm
,
4969 .store_super
= store_super_imsm
,
4970 .free_super
= free_super_imsm
,
4971 .match_metadata_desc
= match_metadata_desc_imsm
,
4972 .container_content
= container_content_imsm
,
4973 .default_layout
= imsm_level_to_layout
,
4980 .open_new
= imsm_open_new
,
4981 .load_super
= load_super_imsm
,
4982 .set_array_state
= imsm_set_array_state
,
4983 .set_disk
= imsm_set_disk
,
4984 .sync_metadata
= imsm_sync_metadata
,
4985 .activate_spare
= imsm_activate_spare
,
4986 .process_update
= imsm_process_update
,
4987 .prepare_update
= imsm_prepare_update
,
4988 #endif /* MDASSEMBLE */