2 * mdadm - Intel(R) Matrix Storage Manager Support
4 * Copyright (C) 2002-2008 Intel Corporation
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #define HAVE_STDINT_H 1
24 #include "drive_encryption.h"
26 #include "platform-intel.h"
31 #include <scsi/scsi.h>
34 #include <sys/ioctl.h>
37 /* MPB == Metadata Parameter Block */
38 #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
39 #define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
41 /* Legacy IMSM versions:
42 * MPB_VERSION_RAID0 1.0.00
43 * MPB_VERSION_RAID1 1.1.00
44 * MPB_VERSION_MANY_VOLUMES_PER_ARRAY 1.2.00
45 * MPB_VERSION_3OR4_DISK_ARRAY 1.2.01
46 * MPB_VERSION_RAID5 1.2.02
47 * MPB_VERSION_5OR6_DISK_ARRAY 1.2.04
48 * MPB_VERSION_CNG 1.2.06
51 #define MPB_VERSION_ATTRIBS "1.3.00"
52 #define MPB_VERSION_ATTRIBS_JD "2.0.00"
53 #define MAX_SIGNATURE_LENGTH 32
54 #define MAX_RAID_SERIAL_LEN 16
57 #define MPB_ATTRIB_RAID0 __cpu_to_le32(0x00000001)
59 #define MPB_ATTRIB_RAID1 __cpu_to_le32(0x00000002)
61 #define MPB_ATTRIB_RAID10 __cpu_to_le32(0x00000004)
63 #define MPB_ATTRIB_RAID1E __cpu_to_le32(0x00000008)
65 #define MPB_ATTRIB_RAID5 __cpu_to_le32(0x00000010)
66 /* supports RAID CNG */
67 #define MPB_ATTRIB_RAIDCNG __cpu_to_le32(0x00000020)
68 /* supports expanded stripe sizes of 256K, 512K and 1MB */
69 #define MPB_ATTRIB_EXP_STRIPE_SIZE __cpu_to_le32(0x00000040)
70 /* supports RAID10 with more than 4 drives */
71 #define MPB_ATTRIB_RAID10_EXT __cpu_to_le32(0x00000080)
73 /* The OROM Support RST Caching of Volumes */
74 #define MPB_ATTRIB_NVM __cpu_to_le32(0x02000000)
75 /* The OROM supports creating disks greater than 2TB */
76 #define MPB_ATTRIB_2TB_DISK __cpu_to_le32(0x04000000)
77 /* The OROM supports Bad Block Management */
78 #define MPB_ATTRIB_BBM __cpu_to_le32(0x08000000)
80 /* THe OROM Supports NVM Caching of Volumes */
81 #define MPB_ATTRIB_NEVER_USE2 __cpu_to_le32(0x10000000)
82 /* The OROM supports creating volumes greater than 2TB */
83 #define MPB_ATTRIB_2TB __cpu_to_le32(0x20000000)
84 /* originally for PMP, now it's wasted b/c. Never use this bit! */
85 #define MPB_ATTRIB_NEVER_USE __cpu_to_le32(0x40000000)
86 /* Verify MPB contents against checksum after reading MPB */
87 #define MPB_ATTRIB_CHECKSUM_VERIFY __cpu_to_le32(0x80000000)
89 /* Define all supported attributes that have to be accepted by mdadm
91 #define MPB_ATTRIB_SUPPORTED (MPB_ATTRIB_CHECKSUM_VERIFY | \
93 MPB_ATTRIB_2TB_DISK | \
98 MPB_ATTRIB_EXP_STRIPE_SIZE | \
99 MPB_ATTRIB_RAID10_EXT | \
102 /* Define attributes that are unused but not harmful */
103 #define MPB_ATTRIB_IGNORED (MPB_ATTRIB_NEVER_USE)
105 #define MPB_SECTOR_CNT 2210
106 #define IMSM_RESERVED_SECTORS 8192
107 #define NUM_BLOCKS_DIRTY_STRIPE_REGION 2048
108 #define SECT_PER_MB_SHIFT 11
109 #define MAX_SECTOR_SIZE 4096
110 #define MULTIPLE_PPL_AREA_SIZE_IMSM (1024 * 1024) /* Size of the whole
115 * Internal Write-intent bitmap is stored in the same area where PPL.
116 * Both features are mutually exclusive, so it is not an issue.
117 * The first 8KiB of the area are reserved and shall not be used.
119 #define IMSM_BITMAP_AREA_RESERVED_SIZE 8192
121 #define IMSM_BITMAP_HEADER_OFFSET (IMSM_BITMAP_AREA_RESERVED_SIZE)
122 #define IMSM_BITMAP_HEADER_SIZE MAX_SECTOR_SIZE
124 #define IMSM_BITMAP_START_OFFSET (IMSM_BITMAP_HEADER_OFFSET + IMSM_BITMAP_HEADER_SIZE)
125 #define IMSM_BITMAP_AREA_SIZE (MULTIPLE_PPL_AREA_SIZE_IMSM - IMSM_BITMAP_START_OFFSET)
126 #define IMSM_BITMAP_AND_HEADER_SIZE (IMSM_BITMAP_AREA_SIZE + IMSM_BITMAP_HEADER_SIZE)
128 #define IMSM_DEFAULT_BITMAP_CHUNKSIZE (64 * 1024 * 1024)
129 #define IMSM_DEFAULT_BITMAP_DAEMON_SLEEP 5
132 * This macro let's us ensure that no-one accidentally
133 * changes the size of a struct
135 #define ASSERT_SIZE(_struct, size) \
136 static inline void __assert_size_##_struct(void) \
140 case (sizeof(struct _struct) == size): break; \
144 /* Disk configuration info. */
145 #define IMSM_MAX_DEVICES 255
147 __u8 serial
[MAX_RAID_SERIAL_LEN
];/* 0xD8 - 0xE7 ascii serial number */
148 __u32 total_blocks_lo
; /* 0xE8 - 0xEB total blocks lo */
149 __u32 scsi_id
; /* 0xEC - 0xEF scsi ID */
150 #define SPARE_DISK __cpu_to_le32(0x01) /* Spare */
151 #define CONFIGURED_DISK __cpu_to_le32(0x02) /* Member of some RaidDev */
152 #define FAILED_DISK __cpu_to_le32(0x04) /* Permanent failure */
153 #define JOURNAL_DISK __cpu_to_le32(0x2000000) /* Device marked as Journaling Drive */
154 __u32 status
; /* 0xF0 - 0xF3 */
155 __u32 owner_cfg_num
; /* which config 0,1,2... owns this disk */
156 __u32 total_blocks_hi
; /* 0xF4 - 0xF5 total blocks hi */
157 #define IMSM_DISK_FILLERS 3
158 __u32 filler
[IMSM_DISK_FILLERS
]; /* 0xF5 - 0x107 MPB_DISK_FILLERS for future expansion */
160 ASSERT_SIZE(imsm_disk
, 48)
162 /* map selector for map managment
168 /* RAID map configuration infos. */
170 __u32 pba_of_lba0_lo
; /* start address of partition */
171 __u32 blocks_per_member_lo
;/* blocks per member */
172 __u32 num_data_stripes_lo
; /* number of data stripes */
173 __u16 blocks_per_strip
;
174 __u8 map_state
; /* Normal, Uninitialized, Degraded, Failed */
175 #define IMSM_T_STATE_NORMAL 0
176 #define IMSM_T_STATE_UNINITIALIZED 1
177 #define IMSM_T_STATE_DEGRADED 2
178 #define IMSM_T_STATE_FAILED 3
180 #define IMSM_T_RAID0 0
181 #define IMSM_T_RAID1 1
182 #define IMSM_T_RAID5 5
183 #define IMSM_T_RAID10 10
184 __u8 num_members
; /* number of member disks */
185 __u8 num_domains
; /* number of parity domains */
186 __u8 failed_disk_num
; /* valid only when state is degraded */
188 __u32 pba_of_lba0_hi
;
189 __u32 blocks_per_member_hi
;
190 __u32 num_data_stripes_hi
;
191 __u32 filler
[4]; /* expansion area */
192 #define IMSM_ORD_REBUILD (1 << 24)
193 __u32 disk_ord_tbl
[1]; /* disk_ord_tbl[num_members],
194 * top byte contains some flags
197 ASSERT_SIZE(imsm_map
, 52)
200 __u32 curr_migr_unit_lo
;
201 __u32 checkpoint_id
; /* id to access curr_migr_unit */
202 #define MIGR_STATE_NORMAL 0
203 #define MIGR_STATE_MIGRATING 1
204 __u8 migr_state
; /* Normal or Migrating */
206 #define MIGR_REBUILD 1
207 #define MIGR_VERIFY 2 /* analagous to echo check > sync_action */
208 #define MIGR_GEN_MIGR 3
209 #define MIGR_STATE_CHANGE 4
210 #define MIGR_REPAIR 5
211 __u8 migr_type
; /* Initializing, Rebuilding, ... */
212 #define RAIDVOL_CLEAN 0
213 #define RAIDVOL_DIRTY 1
214 #define RAIDVOL_DSRECORD_VALID 2
216 __u8 fs_state
; /* fast-sync state for CnG (0xff == disabled) */
217 __u16 verify_errors
; /* number of mismatches */
218 __u16 bad_blocks
; /* number of bad blocks during verify */
219 __u32 curr_migr_unit_hi
;
221 struct imsm_map map
[1];
222 /* here comes another one if migr_state */
224 ASSERT_SIZE(imsm_vol
, 84)
227 __u8 volume
[MAX_RAID_SERIAL_LEN
];
230 #define DEV_BOOTABLE __cpu_to_le32(0x01)
231 #define DEV_BOOT_DEVICE __cpu_to_le32(0x02)
232 #define DEV_READ_COALESCING __cpu_to_le32(0x04)
233 #define DEV_WRITE_COALESCING __cpu_to_le32(0x08)
234 #define DEV_LAST_SHUTDOWN_DIRTY __cpu_to_le32(0x10)
235 #define DEV_HIDDEN_AT_BOOT __cpu_to_le32(0x20)
236 #define DEV_CURRENTLY_HIDDEN __cpu_to_le32(0x40)
237 #define DEV_VERIFY_AND_FIX __cpu_to_le32(0x80)
238 #define DEV_MAP_STATE_UNINIT __cpu_to_le32(0x100)
239 #define DEV_NO_AUTO_RECOVERY __cpu_to_le32(0x200)
240 #define DEV_CLONE_N_GO __cpu_to_le32(0x400)
241 #define DEV_CLONE_MAN_SYNC __cpu_to_le32(0x800)
242 #define DEV_CNG_MASTER_DISK_NUM __cpu_to_le32(0x1000)
243 __u32 status
; /* Persistent RaidDev status */
244 __u32 reserved_blocks
; /* Reserved blocks at beginning of volume */
248 __u8 cng_master_disk
;
252 __u16 my_vol_raid_dev_num
; /* Used in Unique volume Id for this RaidDev */
258 /* Unique Volume Id of the NvCache Volume associated with this volume */
259 __u32 nvc_vol_orig_family_num
;
260 __u16 nvc_vol_raid_dev_num
;
263 #define RWH_DISTRIBUTED 1
264 #define RWH_JOURNALING_DRIVE 2
265 #define RWH_MULTIPLE_DISTRIBUTED 3
266 #define RWH_MULTIPLE_PPLS_JOURNALING_DRIVE 4
267 #define RWH_MULTIPLE_OFF 5
269 __u8 rwh_policy
; /* Raid Write Hole Policy */
270 __u8 jd_serial
[MAX_RAID_SERIAL_LEN
]; /* Journal Drive serial number */
273 #define IMSM_DEV_FILLERS 3
274 __u32 filler
[IMSM_DEV_FILLERS
];
277 ASSERT_SIZE(imsm_dev
, 164)
280 __u8 sig
[MAX_SIGNATURE_LENGTH
]; /* 0x00 - 0x1F */
281 __u32 check_sum
; /* 0x20 - 0x23 MPB Checksum */
282 __u32 mpb_size
; /* 0x24 - 0x27 Size of MPB */
283 __u32 family_num
; /* 0x28 - 0x2B Checksum from first time this config was written */
284 __u32 generation_num
; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
285 __u32 error_log_size
; /* 0x30 - 0x33 in bytes */
286 __u32 attributes
; /* 0x34 - 0x37 */
287 __u8 num_disks
; /* 0x38 Number of configured disks */
288 __u8 num_raid_devs
; /* 0x39 Number of configured volumes */
289 __u8 error_log_pos
; /* 0x3A */
290 __u8 fill
[1]; /* 0x3B */
291 __u32 cache_size
; /* 0x3c - 0x40 in mb */
292 __u32 orig_family_num
; /* 0x40 - 0x43 original family num */
293 __u32 pwr_cycle_count
; /* 0x44 - 0x47 simulated power cycle count for array */
294 __u32 bbm_log_size
; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
295 __u16 num_raid_devs_created
; /* 0x4C - 0x4D Used for generating unique
296 * volume IDs for raid_dev created in this array
299 __u16 filler1
; /* 0x4E - 0x4F */
300 __u64 creation_time
; /* 0x50 - 0x57 Array creation time */
301 #define IMSM_FILLERS 32
302 __u32 filler
[IMSM_FILLERS
]; /* 0x58 - 0xD7 RAID_MPB_FILLERS */
303 struct imsm_disk disk
[1]; /* 0xD8 diskTbl[numDisks] */
304 /* here comes imsm_dev[num_raid_devs] */
305 /* here comes BBM logs */
307 ASSERT_SIZE(imsm_super
, 264)
309 #define BBM_LOG_MAX_ENTRIES 254
310 #define BBM_LOG_MAX_LBA_ENTRY_VAL 256 /* Represents 256 LBAs */
311 #define BBM_LOG_SIGNATURE 0xabadb10c
313 struct bbm_log_block_addr
{
316 } __attribute__ ((__packed__
));
318 struct bbm_log_entry
{
319 __u8 marked_count
; /* Number of blocks marked - 1 */
320 __u8 disk_ordinal
; /* Disk entry within the imsm_super */
321 struct bbm_log_block_addr defective_block_start
;
322 } __attribute__ ((__packed__
));
325 __u32 signature
; /* 0xABADB10C */
327 struct bbm_log_entry marked_block_entries
[BBM_LOG_MAX_ENTRIES
];
329 ASSERT_SIZE(bbm_log
, 2040)
331 static char *map_state_str
[] = { "normal", "uninitialized", "degraded", "failed" };
333 #define BLOCKS_PER_KB (1024/512)
335 #define RAID_DISK_RESERVED_BLOCKS_IMSM_HI 2209
337 #define GEN_MIGR_AREA_SIZE 2048 /* General Migration Copy Area size in blocks */
339 #define MIGR_REC_BUF_SECTORS 1 /* size of migr_record i/o buffer in sectors */
340 #define MIGR_REC_SECTOR_POSITION 1 /* migr_record position offset on disk,
341 * MIGR_REC_BUF_SECTORS <= MIGR_REC_SECTOR_POS
344 #define UNIT_SRC_NORMAL 0 /* Source data for curr_migr_unit must
345 * be recovered using srcMap */
346 #define UNIT_SRC_IN_CP_AREA 1 /* Source data for curr_migr_unit has
347 * already been migrated and must
348 * be recovered from checkpoint area */
350 #define PPL_ENTRY_SPACE (128 * 1024) /* Size of single PPL, without the header */
353 __u32 rec_status
; /* Status used to determine how to restart
354 * migration in case it aborts
356 __u32 curr_migr_unit_lo
; /* 0..numMigrUnits-1 */
357 __u32 family_num
; /* Family number of MPB
358 * containing the RaidDev
359 * that is migrating */
360 __u32 ascending_migr
; /* True if migrating in increasing
362 __u32 blocks_per_unit
; /* Num disk blocks per unit of operation */
363 __u32 dest_depth_per_unit
; /* Num member blocks each destMap
365 * advances per unit-of-operation */
366 __u32 ckpt_area_pba_lo
; /* Pba of first block of ckpt copy area */
367 __u32 dest_1st_member_lba_lo
; /* First member lba on first
368 * stripe of destination */
369 __u32 num_migr_units_lo
; /* Total num migration units-of-op */
370 __u32 post_migr_vol_cap
; /* Size of volume after
371 * migration completes */
372 __u32 post_migr_vol_cap_hi
; /* Expansion space for LBA64 */
373 __u32 ckpt_read_disk_num
; /* Which member disk in destSubMap[0] the
374 * migration ckpt record was read from
375 * (for recovered migrations) */
376 __u32 curr_migr_unit_hi
; /* 0..numMigrUnits-1 high order 32 bits */
377 __u32 ckpt_area_pba_hi
; /* Pba of first block of ckpt copy area
378 * high order 32 bits */
379 __u32 dest_1st_member_lba_hi
; /* First member lba on first stripe of
380 * destination - high order 32 bits */
381 __u32 num_migr_units_hi
; /* Total num migration units-of-op
382 * high order 32 bits */
385 ASSERT_SIZE(migr_record
, 128)
388 * enum imsm_status - internal IMSM return values representation.
389 * @STATUS_OK: function succeeded.
390 * @STATUS_ERROR: General error ocurred (not specified).
392 * Typedefed to imsm_status_t.
394 typedef enum imsm_status
{
395 IMSM_STATUS_ERROR
= -1,
402 * 2: metadata does not match
410 struct md_list
*next
;
413 static __u8
migr_type(struct imsm_dev
*dev
)
415 if (dev
->vol
.migr_type
== MIGR_VERIFY
&&
416 dev
->status
& DEV_VERIFY_AND_FIX
)
419 return dev
->vol
.migr_type
;
422 static void set_migr_type(struct imsm_dev
*dev
, __u8 migr_type
)
424 /* for compatibility with older oroms convert MIGR_REPAIR, into
425 * MIGR_VERIFY w/ DEV_VERIFY_AND_FIX status
427 if (migr_type
== MIGR_REPAIR
) {
428 dev
->vol
.migr_type
= MIGR_VERIFY
;
429 dev
->status
|= DEV_VERIFY_AND_FIX
;
431 dev
->vol
.migr_type
= migr_type
;
432 dev
->status
&= ~DEV_VERIFY_AND_FIX
;
436 static unsigned int sector_count(__u32 bytes
, unsigned int sector_size
)
438 return ROUND_UP(bytes
, sector_size
) / sector_size
;
441 static unsigned int mpb_sectors(struct imsm_super
*mpb
,
442 unsigned int sector_size
)
444 return sector_count(__le32_to_cpu(mpb
->mpb_size
), sector_size
);
448 struct imsm_dev
*dev
;
449 struct intel_dev
*next
;
454 enum sys_dev_type type
;
457 struct intel_hba
*next
;
464 /* internal representation of IMSM metadata */
467 void *buf
; /* O_DIRECT buffer for reading/writing metadata */
468 struct imsm_super
*anchor
; /* immovable parameters */
471 void *migr_rec_buf
; /* buffer for I/O operations */
472 struct migr_record
*migr_rec
; /* migration record */
474 int clean_migration_record_by_mdmon
; /* when reshape is switched to next
475 array, it indicates that mdmon is allowed to clean migration
477 size_t len
; /* size of the 'buf' allocation */
478 size_t extra_space
; /* extra space in 'buf' that is not used yet */
479 void *next_buf
; /* for realloc'ing buf from the manager */
481 int updates_pending
; /* count of pending updates for mdmon */
482 int current_vol
; /* index of raid device undergoing creation */
483 unsigned long long create_offset
; /* common start for 'current_vol' */
484 __u32 random
; /* random data for seeding new family numbers */
485 struct intel_dev
*devlist
;
486 unsigned int sector_size
; /* sector size of used member drives */
490 __u8 serial
[MAX_RAID_SERIAL_LEN
];
493 struct imsm_disk disk
;
496 struct extent
*e
; /* for determining freespace @ create */
497 int raiddisk
; /* slot to fill in autolayout */
499 } *disks
, *current_disk
;
500 struct dl
*disk_mgmt_list
; /* list of disks to add/remove while mdmon
502 struct dl
*missing
; /* disks removed while we weren't looking */
503 struct bbm_log
*bbm_log
;
504 struct intel_hba
*hba
; /* device path of the raid controller for this metadata */
505 const struct imsm_orom
*orom
; /* platform firmware support */
506 struct intel_super
*next
; /* (temp) list for disambiguating family_num */
507 struct md_bb bb
; /* memory for get_bad_blocks call */
511 struct imsm_disk disk
;
512 #define IMSM_UNKNOWN_OWNER (-1)
514 struct intel_disk
*next
;
518 * struct extent - reserved space details.
519 * @start: start offset.
520 * @size: size of reservation, set to 0 for metadata reservation.
521 * @vol: index of the volume, meaningful if &size is set.
524 unsigned long long start
, size
;
528 /* definitions of reshape process types */
529 enum imsm_reshape_type
{
536 /* definition of messages passed to imsm_process_update */
537 enum imsm_update_type
{
538 update_activate_spare
,
542 update_add_remove_disk
,
543 update_reshape_container_disks
,
544 update_reshape_migration
,
546 update_general_migration_checkpoint
,
548 update_prealloc_badblocks_mem
,
552 struct imsm_update_activate_spare
{
553 enum imsm_update_type type
;
557 struct imsm_update_activate_spare
*next
;
563 unsigned long long size
;
570 enum takeover_direction
{
574 struct imsm_update_takeover
{
575 enum imsm_update_type type
;
577 enum takeover_direction direction
;
580 struct imsm_update_reshape
{
581 enum imsm_update_type type
;
585 int new_disks
[1]; /* new_raid_disks - old_raid_disks makedev number */
588 struct imsm_update_reshape_migration
{
589 enum imsm_update_type type
;
592 /* fields for array migration changes
599 int new_disks
[1]; /* new_raid_disks - old_raid_disks makedev number */
602 struct imsm_update_size_change
{
603 enum imsm_update_type type
;
608 struct imsm_update_general_migration_checkpoint
{
609 enum imsm_update_type type
;
610 __u64 curr_migr_unit
;
614 __u8 serial
[MAX_RAID_SERIAL_LEN
];
617 struct imsm_update_create_array
{
618 enum imsm_update_type type
;
623 struct imsm_update_kill_array
{
624 enum imsm_update_type type
;
628 struct imsm_update_rename_array
{
629 enum imsm_update_type type
;
630 __u8 name
[MAX_RAID_SERIAL_LEN
];
634 struct imsm_update_add_remove_disk
{
635 enum imsm_update_type type
;
638 struct imsm_update_prealloc_bb_mem
{
639 enum imsm_update_type type
;
642 struct imsm_update_rwh_policy
{
643 enum imsm_update_type type
;
650 SKU_STANDARD_KEY
= 1,
652 SKU_INTEL_SSD_ONLY_KEY
= 3,
653 SKU_RAID1_ONLY_KEY
= 4
656 static const char *_sys_dev_type
[] = {
657 [SYS_DEV_UNKNOWN
] = "Unknown",
658 [SYS_DEV_SAS
] = "SAS",
659 [SYS_DEV_SATA
] = "SATA",
660 [SYS_DEV_NVME
] = "NVMe",
661 [SYS_DEV_VMD
] = "VMD",
662 [SYS_DEV_SATA_VMD
] = "SATA VMD"
665 struct imsm_chunk_ops
{
670 static const struct imsm_chunk_ops imsm_chunk_ops
[] = {
671 {IMSM_OROM_SSS_2kB
, "2k"},
672 {IMSM_OROM_SSS_4kB
, "4k"},
673 {IMSM_OROM_SSS_8kB
, "8k"},
674 {IMSM_OROM_SSS_16kB
, "16k"},
675 {IMSM_OROM_SSS_32kB
, "32k"},
676 {IMSM_OROM_SSS_64kB
, "64k"},
677 {IMSM_OROM_SSS_128kB
, "128k"},
678 {IMSM_OROM_SSS_256kB
, "256k"},
679 {IMSM_OROM_SSS_512kB
, "512k"},
680 {IMSM_OROM_SSS_1MB
, "1M"},
681 {IMSM_OROM_SSS_2MB
, "2M"},
682 {IMSM_OROM_SSS_4MB
, "4M"},
683 {IMSM_OROM_SSS_8MB
, "8M"},
684 {IMSM_OROM_SSS_16MB
, "16M"},
685 {IMSM_OROM_SSS_32MB
, "32M"},
686 {IMSM_OROM_SSS_64MB
, "64M"},
690 static int no_platform
= -1;
692 static int check_no_platform(void)
694 static const char search
[] = "mdadm.imsm.test=1";
697 if (no_platform
>= 0)
700 if (check_env("IMSM_NO_PLATFORM")) {
704 fp
= fopen("/proc/cmdline", "r");
706 char *l
= conf_line(fp
);
715 if (strcmp(w
, search
) == 0)
721 if (no_platform
>= 0)
728 void imsm_set_no_platform(int v
)
733 const char *get_sys_dev_type(enum sys_dev_type type
)
735 if (type
>= SYS_DEV_MAX
)
736 type
= SYS_DEV_UNKNOWN
;
738 return _sys_dev_type
[type
];
741 static struct intel_hba
* alloc_intel_hba(struct sys_dev
*device
)
743 struct intel_hba
*result
= xmalloc(sizeof(*result
));
745 result
->type
= device
->type
;
746 result
->path
= xstrdup(device
->path
);
748 if (result
->path
&& (result
->pci_id
= strrchr(result
->path
, '/')) != NULL
)
754 static struct intel_hba
* find_intel_hba(struct intel_hba
*hba
, struct sys_dev
*device
)
756 struct intel_hba
*result
;
758 for (result
= hba
; result
; result
= result
->next
) {
759 if (result
->type
== device
->type
&& strcmp(result
->path
, device
->path
) == 0)
765 static int attach_hba_to_super(struct intel_super
*super
, struct sys_dev
*device
)
767 struct intel_hba
*hba
;
769 /* check if disk attached to Intel HBA */
770 hba
= find_intel_hba(super
->hba
, device
);
773 /* Check if HBA is already attached to super */
774 if (super
->hba
== NULL
) {
775 super
->hba
= alloc_intel_hba(device
);
780 /* Intel metadata allows for all disks attached to the same type HBA.
781 * Do not support HBA types mixing
783 if (device
->type
!= hba
->type
)
786 /* Multiple same type HBAs can be used if they share the same OROM */
787 const struct imsm_orom
*device_orom
= get_orom_by_device_id(device
->dev_id
);
789 if (device_orom
!= super
->orom
)
795 hba
->next
= alloc_intel_hba(device
);
799 static struct sys_dev
* find_disk_attached_hba(int fd
, const char *devname
)
801 struct sys_dev
*list
, *elem
;
804 if ((list
= find_intel_devices()) == NULL
)
807 if (!is_fd_valid(fd
))
808 disk_path
= (char *) devname
;
810 disk_path
= diskfd_to_devpath(fd
, 1, NULL
);
815 for (elem
= list
; elem
; elem
= elem
->next
)
816 if (is_path_attached_to_hba(disk_path
, elem
->path
))
819 if (disk_path
!= devname
)
825 static int find_intel_hba_capability(int fd
, struct intel_super
*super
,
828 static struct supertype
*match_metadata_desc_imsm(char *arg
)
830 struct supertype
*st
;
832 if (strcmp(arg
, "imsm") != 0 &&
833 strcmp(arg
, "default") != 0
837 st
= xcalloc(1, sizeof(*st
));
838 st
->ss
= &super_imsm
;
839 st
->max_devs
= IMSM_MAX_DEVICES
;
840 st
->minor_version
= 0;
845 static __u8
*get_imsm_version(struct imsm_super
*mpb
)
847 return &mpb
->sig
[MPB_SIG_LEN
];
850 /* retrieve a disk directly from the anchor when the anchor is known to be
851 * up-to-date, currently only at load time
853 static struct imsm_disk
*__get_imsm_disk(struct imsm_super
*mpb
, __u8 index
)
855 if (index
>= mpb
->num_disks
)
857 return &mpb
->disk
[index
];
860 /* retrieve the disk description based on a index of the disk
863 static struct dl
*get_imsm_dl_disk(struct intel_super
*super
, __u8 index
)
867 for (d
= super
->disks
; d
; d
= d
->next
)
868 if (d
->index
== index
)
873 /* retrieve a disk from the parsed metadata */
874 static struct imsm_disk
*get_imsm_disk(struct intel_super
*super
, __u8 index
)
878 dl
= get_imsm_dl_disk(super
, index
);
885 /* generate a checksum directly from the anchor when the anchor is known to be
886 * up-to-date, currently only at load or write_super after coalescing
888 static __u32
__gen_imsm_checksum(struct imsm_super
*mpb
)
890 __u32 end
= mpb
->mpb_size
/ sizeof(end
);
891 __u32
*p
= (__u32
*) mpb
;
895 sum
+= __le32_to_cpu(*p
);
899 return sum
- __le32_to_cpu(mpb
->check_sum
);
902 static size_t sizeof_imsm_map(struct imsm_map
*map
)
904 return sizeof(struct imsm_map
) + sizeof(__u32
) * (map
->num_members
- 1);
907 struct imsm_map
*get_imsm_map(struct imsm_dev
*dev
, int second_map
)
909 /* A device can have 2 maps if it is in the middle of a migration.
911 * MAP_0 - we return the first map
912 * MAP_1 - we return the second map if it exists, else NULL
913 * MAP_X - we return the second map if it exists, else the first
915 struct imsm_map
*map
= &dev
->vol
.map
[0];
916 struct imsm_map
*map2
= NULL
;
918 if (dev
->vol
.migr_state
)
919 map2
= (void *)map
+ sizeof_imsm_map(map
);
921 switch (second_map
) {
938 /* return the size of the device.
939 * migr_state increases the returned size if map[0] were to be duplicated
941 static size_t sizeof_imsm_dev(struct imsm_dev
*dev
, int migr_state
)
943 size_t size
= sizeof(*dev
) - sizeof(struct imsm_map
) +
944 sizeof_imsm_map(get_imsm_map(dev
, MAP_0
));
946 /* migrating means an additional map */
947 if (dev
->vol
.migr_state
)
948 size
+= sizeof_imsm_map(get_imsm_map(dev
, MAP_1
));
950 size
+= sizeof_imsm_map(get_imsm_map(dev
, MAP_0
));
955 /* retrieve disk serial number list from a metadata update */
956 static struct disk_info
*get_disk_info(struct imsm_update_create_array
*update
)
959 struct disk_info
*inf
;
961 inf
= u
+ sizeof(*update
) - sizeof(struct imsm_dev
) +
962 sizeof_imsm_dev(&update
->dev
, 0);
968 * __get_imsm_dev() - Get device with index from imsm_super.
969 * @mpb: &imsm_super pointer, not NULL.
970 * @index: Device index.
972 * Function works as non-NULL, aborting in such a case,
973 * when NULL would be returned.
975 * Device index should be in range 0 up to num_raid_devs.
976 * Function assumes the index was already verified.
977 * Index must be valid, otherwise abort() is called.
979 * Return: Pointer to corresponding imsm_dev.
982 static struct imsm_dev
*__get_imsm_dev(struct imsm_super
*mpb
, __u8 index
)
988 if (index
>= mpb
->num_raid_devs
)
991 /* devices start after all disks */
992 offset
= ((void *) &mpb
->disk
[mpb
->num_disks
]) - _mpb
;
994 for (i
= 0; i
<= index
; i
++, offset
+= sizeof_imsm_dev(_mpb
+ offset
, 0))
996 return _mpb
+ offset
;
998 pr_err("cannot find imsm_dev with index %u in imsm_super\n", index
);
1003 * get_imsm_dev() - Get device with index from intel_super.
1004 * @super: &intel_super pointer, not NULL.
1005 * @index: Device index.
1007 * Function works as non-NULL, aborting in such a case,
1008 * when NULL would be returned.
1010 * Device index should be in range 0 up to num_raid_devs.
1011 * Function assumes the index was already verified.
1012 * Index must be valid, otherwise abort() is called.
1014 * Return: Pointer to corresponding imsm_dev.
1017 static struct imsm_dev
*get_imsm_dev(struct intel_super
*super
, __u8 index
)
1019 struct intel_dev
*dv
;
1021 if (index
>= super
->anchor
->num_raid_devs
)
1024 for (dv
= super
->devlist
; dv
; dv
= dv
->next
)
1025 if (dv
->index
== index
)
1028 pr_err("cannot find imsm_dev with index %u in intel_super\n", index
);
1032 static inline unsigned long long __le48_to_cpu(const struct bbm_log_block_addr
1035 return ((((__u64
)__le32_to_cpu(addr
->dw1
)) << 16) |
1036 __le16_to_cpu(addr
->w1
));
1039 static inline struct bbm_log_block_addr
__cpu_to_le48(unsigned long long sec
)
1041 struct bbm_log_block_addr addr
;
1043 addr
.w1
= __cpu_to_le16((__u16
)(sec
& 0xffff));
1044 addr
.dw1
= __cpu_to_le32((__u32
)(sec
>> 16) & 0xffffffff);
1048 /* get size of the bbm log */
1049 static __u32
get_imsm_bbm_log_size(struct bbm_log
*log
)
1051 if (!log
|| log
->entry_count
== 0)
1054 return sizeof(log
->signature
) +
1055 sizeof(log
->entry_count
) +
1056 log
->entry_count
* sizeof(struct bbm_log_entry
);
1059 /* check if bad block is not partially stored in bbm log */
1060 static int is_stored_in_bbm(struct bbm_log
*log
, const __u8 idx
, const unsigned
1061 long long sector
, const int length
, __u32
*pos
)
1065 for (i
= *pos
; i
< log
->entry_count
; i
++) {
1066 struct bbm_log_entry
*entry
= &log
->marked_block_entries
[i
];
1067 unsigned long long bb_start
;
1068 unsigned long long bb_end
;
1070 bb_start
= __le48_to_cpu(&entry
->defective_block_start
);
1071 bb_end
= bb_start
+ (entry
->marked_count
+ 1);
1073 if ((entry
->disk_ordinal
== idx
) && (bb_start
>= sector
) &&
1074 (bb_end
<= sector
+ length
)) {
1082 /* record new bad block in bbm log */
1083 static int record_new_badblock(struct bbm_log
*log
, const __u8 idx
, unsigned
1084 long long sector
, int length
)
1088 struct bbm_log_entry
*entry
= NULL
;
1090 while (is_stored_in_bbm(log
, idx
, sector
, length
, &pos
)) {
1091 struct bbm_log_entry
*e
= &log
->marked_block_entries
[pos
];
1093 if ((e
->marked_count
+ 1 == BBM_LOG_MAX_LBA_ENTRY_VAL
) &&
1094 (__le48_to_cpu(&e
->defective_block_start
) == sector
)) {
1095 sector
+= BBM_LOG_MAX_LBA_ENTRY_VAL
;
1096 length
-= BBM_LOG_MAX_LBA_ENTRY_VAL
;
1105 int cnt
= (length
<= BBM_LOG_MAX_LBA_ENTRY_VAL
) ? length
:
1106 BBM_LOG_MAX_LBA_ENTRY_VAL
;
1107 entry
->defective_block_start
= __cpu_to_le48(sector
);
1108 entry
->marked_count
= cnt
- 1;
1115 new_bb
= ROUND_UP(length
, BBM_LOG_MAX_LBA_ENTRY_VAL
) /
1116 BBM_LOG_MAX_LBA_ENTRY_VAL
;
1117 if (log
->entry_count
+ new_bb
> BBM_LOG_MAX_ENTRIES
)
1120 while (length
> 0) {
1121 int cnt
= (length
<= BBM_LOG_MAX_LBA_ENTRY_VAL
) ? length
:
1122 BBM_LOG_MAX_LBA_ENTRY_VAL
;
1123 struct bbm_log_entry
*entry
=
1124 &log
->marked_block_entries
[log
->entry_count
];
1126 entry
->defective_block_start
= __cpu_to_le48(sector
);
1127 entry
->marked_count
= cnt
- 1;
1128 entry
->disk_ordinal
= idx
;
1139 /* clear all bad blocks for given disk */
1140 static void clear_disk_badblocks(struct bbm_log
*log
, const __u8 idx
)
1144 while (i
< log
->entry_count
) {
1145 struct bbm_log_entry
*entries
= log
->marked_block_entries
;
1147 if (entries
[i
].disk_ordinal
== idx
) {
1148 if (i
< log
->entry_count
- 1)
1149 entries
[i
] = entries
[log
->entry_count
- 1];
1157 /* clear given bad block */
1158 static int clear_badblock(struct bbm_log
*log
, const __u8 idx
, const unsigned
1159 long long sector
, const int length
) {
1162 while (i
< log
->entry_count
) {
1163 struct bbm_log_entry
*entries
= log
->marked_block_entries
;
1165 if ((entries
[i
].disk_ordinal
== idx
) &&
1166 (__le48_to_cpu(&entries
[i
].defective_block_start
) ==
1167 sector
) && (entries
[i
].marked_count
+ 1 == length
)) {
1168 if (i
< log
->entry_count
- 1)
1169 entries
[i
] = entries
[log
->entry_count
- 1];
1179 /* allocate and load BBM log from metadata */
1180 static int load_bbm_log(struct intel_super
*super
)
1182 struct imsm_super
*mpb
= super
->anchor
;
1183 __u32 bbm_log_size
= __le32_to_cpu(mpb
->bbm_log_size
);
1185 super
->bbm_log
= xcalloc(1, sizeof(struct bbm_log
));
1186 if (!super
->bbm_log
)
1190 struct bbm_log
*log
= (void *)mpb
+
1191 __le32_to_cpu(mpb
->mpb_size
) - bbm_log_size
;
1195 if (bbm_log_size
< sizeof(log
->signature
) +
1196 sizeof(log
->entry_count
))
1199 entry_count
= __le32_to_cpu(log
->entry_count
);
1200 if ((__le32_to_cpu(log
->signature
) != BBM_LOG_SIGNATURE
) ||
1201 (entry_count
> BBM_LOG_MAX_ENTRIES
))
1205 sizeof(log
->signature
) + sizeof(log
->entry_count
) +
1206 entry_count
* sizeof(struct bbm_log_entry
))
1209 memcpy(super
->bbm_log
, log
, bbm_log_size
);
1211 super
->bbm_log
->signature
= __cpu_to_le32(BBM_LOG_SIGNATURE
);
1212 super
->bbm_log
->entry_count
= 0;
1218 /* checks if bad block is within volume boundaries */
1219 static int is_bad_block_in_volume(const struct bbm_log_entry
*entry
,
1220 const unsigned long long start_sector
,
1221 const unsigned long long size
)
1223 unsigned long long bb_start
;
1224 unsigned long long bb_end
;
1226 bb_start
= __le48_to_cpu(&entry
->defective_block_start
);
1227 bb_end
= bb_start
+ (entry
->marked_count
+ 1);
1229 if (((bb_start
>= start_sector
) && (bb_start
< start_sector
+ size
)) ||
1230 ((bb_end
>= start_sector
) && (bb_end
<= start_sector
+ size
)))
1236 /* get list of bad blocks on a drive for a volume */
1237 static void get_volume_badblocks(const struct bbm_log
*log
, const __u8 idx
,
1238 const unsigned long long start_sector
,
1239 const unsigned long long size
,
1245 for (i
= 0; i
< log
->entry_count
; i
++) {
1246 const struct bbm_log_entry
*ent
=
1247 &log
->marked_block_entries
[i
];
1248 struct md_bb_entry
*bb
;
1250 if ((ent
->disk_ordinal
== idx
) &&
1251 is_bad_block_in_volume(ent
, start_sector
, size
)) {
1253 if (!bbs
->entries
) {
1254 bbs
->entries
= xmalloc(BBM_LOG_MAX_ENTRIES
*
1260 bb
= &bbs
->entries
[count
++];
1261 bb
->sector
= __le48_to_cpu(&ent
->defective_block_start
);
1262 bb
->length
= ent
->marked_count
+ 1;
1270 * == MAP_0 get first map
1271 * == MAP_1 get second map
1272 * == MAP_X than get map according to the current migr_state
1274 static __u32
get_imsm_ord_tbl_ent(struct imsm_dev
*dev
,
1278 struct imsm_map
*map
;
1280 map
= get_imsm_map(dev
, second_map
);
1282 /* top byte identifies disk under rebuild */
1283 return __le32_to_cpu(map
->disk_ord_tbl
[slot
]);
1286 #define ord_to_idx(ord) (((ord) << 8) >> 8)
1287 static __u32
get_imsm_disk_idx(struct imsm_dev
*dev
, int slot
, int second_map
)
1289 __u32 ord
= get_imsm_ord_tbl_ent(dev
, slot
, second_map
);
1291 return ord_to_idx(ord
);
1294 static void set_imsm_ord_tbl_ent(struct imsm_map
*map
, int slot
, __u32 ord
)
1296 map
->disk_ord_tbl
[slot
] = __cpu_to_le32(ord
);
1299 static int get_imsm_disk_slot(struct imsm_map
*map
, const unsigned int idx
)
1304 for (slot
= 0; slot
< map
->num_members
; slot
++) {
1305 ord
= __le32_to_cpu(map
->disk_ord_tbl
[slot
]);
1306 if (ord_to_idx(ord
) == idx
)
1310 return IMSM_STATUS_ERROR
;
1313 * update_imsm_raid_level() - update raid level appropriately in &imsm_map.
1314 * @map: &imsm_map pointer.
1315 * @new_level: MD style level.
1317 * For backward compatibility reasons we need to differentiate RAID10.
1318 * In the past IMSM RAID10 was presented as RAID1.
1319 * Keep compatibility unless it is not explicitly updated by UEFI driver.
1321 * Routine needs num_members to be set and (optionally) raid_level.
1323 static void update_imsm_raid_level(struct imsm_map
*map
, int new_level
)
1325 if (new_level
!= IMSM_T_RAID10
) {
1326 map
->raid_level
= new_level
;
1331 * RAID0 to RAID10 migration.
1332 * Due to the compatibility with VROC UEFI must be maintained, this case must be handled
1333 * separately, because the map does not have an updated number of disks.
1335 if (map
->raid_level
== IMSM_T_RAID0
) {
1336 if (map
->num_members
== 2)
1337 map
->raid_level
= IMSM_T_RAID1
;
1339 map
->raid_level
= IMSM_T_RAID10
;
1343 if (map
->num_members
== 4) {
1344 if (map
->raid_level
== IMSM_T_RAID10
|| map
->raid_level
== IMSM_T_RAID1
)
1347 map
->raid_level
= IMSM_T_RAID1
;
1351 map
->raid_level
= IMSM_T_RAID10
;
1354 static int get_imsm_raid_level(struct imsm_map
*map
)
1356 if (map
->raid_level
== IMSM_T_RAID1
) {
1357 if (map
->num_members
== 2)
1358 return IMSM_T_RAID1
;
1360 return IMSM_T_RAID10
;
1363 return map
->raid_level
;
1367 * get_disk_slot_in_dev() - retrieve disk slot from &imsm_dev.
1368 * @super: &intel_super pointer, not NULL.
1369 * @dev_idx: imsm device index.
1372 * Return: Slot on success, IMSM_STATUS_ERROR otherwise.
1374 static int get_disk_slot_in_dev(struct intel_super
*super
, const __u8 dev_idx
,
1375 const unsigned int idx
)
1377 struct imsm_dev
*dev
= get_imsm_dev(super
, dev_idx
);
1378 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
1380 return get_imsm_disk_slot(map
, idx
);
1383 static int cmp_extent(const void *av
, const void *bv
)
1385 const struct extent
*a
= av
;
1386 const struct extent
*b
= bv
;
1387 if (a
->start
< b
->start
)
1389 if (a
->start
> b
->start
)
1394 static int count_memberships(struct dl
*dl
, struct intel_super
*super
)
1396 int memberships
= 0;
1399 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++)
1400 if (get_disk_slot_in_dev(super
, i
, dl
->index
) >= 0)
1406 static __u32
imsm_min_reserved_sectors(struct intel_super
*super
);
1408 static int split_ull(unsigned long long n
, void *lo
, void *hi
)
1410 if (lo
== 0 || hi
== 0)
1412 __put_unaligned32(__cpu_to_le32((__u32
)n
), lo
);
1413 __put_unaligned32(__cpu_to_le32((n
>> 32)), hi
);
1417 static unsigned long long join_u32(__u32 lo
, __u32 hi
)
1419 return (unsigned long long)__le32_to_cpu(lo
) |
1420 (((unsigned long long)__le32_to_cpu(hi
)) << 32);
1423 static unsigned long long total_blocks(struct imsm_disk
*disk
)
1427 return join_u32(disk
->total_blocks_lo
, disk
->total_blocks_hi
);
1431 * imsm_num_data_members() - get data drives count for an array.
1432 * @map: Map to analyze.
1434 * num_data_members value represents minimal count of drives for level.
1435 * The name of the property could be misleading for RAID5 with asymmetric layout
1436 * because some data required to be calculated from parity.
1437 * The property is extracted from level and num_members value.
1439 * Return: num_data_members value on success, zero otherwise.
1441 static __u8
imsm_num_data_members(struct imsm_map
*map
)
1443 switch (get_imsm_raid_level(map
)) {
1445 return map
->num_members
;
1448 return map
->num_members
/ 2;
1450 return map
->num_members
- 1;
1452 dprintf("unsupported raid level\n");
1457 static unsigned long long pba_of_lba0(struct imsm_map
*map
)
1461 return join_u32(map
->pba_of_lba0_lo
, map
->pba_of_lba0_hi
);
1464 static unsigned long long blocks_per_member(struct imsm_map
*map
)
1468 return join_u32(map
->blocks_per_member_lo
, map
->blocks_per_member_hi
);
1471 static unsigned long long num_data_stripes(struct imsm_map
*map
)
1475 return join_u32(map
->num_data_stripes_lo
, map
->num_data_stripes_hi
);
1478 static unsigned long long vol_curr_migr_unit(struct imsm_dev
*dev
)
1483 return join_u32(dev
->vol
.curr_migr_unit_lo
, dev
->vol
.curr_migr_unit_hi
);
1486 static unsigned long long imsm_dev_size(struct imsm_dev
*dev
)
1490 return join_u32(dev
->size_low
, dev
->size_high
);
1493 static unsigned long long migr_chkp_area_pba(struct migr_record
*migr_rec
)
1495 if (migr_rec
== NULL
)
1497 return join_u32(migr_rec
->ckpt_area_pba_lo
,
1498 migr_rec
->ckpt_area_pba_hi
);
1501 static unsigned long long current_migr_unit(struct migr_record
*migr_rec
)
1503 if (migr_rec
== NULL
)
1505 return join_u32(migr_rec
->curr_migr_unit_lo
,
1506 migr_rec
->curr_migr_unit_hi
);
1509 static unsigned long long migr_dest_1st_member_lba(struct migr_record
*migr_rec
)
1511 if (migr_rec
== NULL
)
1513 return join_u32(migr_rec
->dest_1st_member_lba_lo
,
1514 migr_rec
->dest_1st_member_lba_hi
);
1517 static unsigned long long get_num_migr_units(struct migr_record
*migr_rec
)
1519 if (migr_rec
== NULL
)
1521 return join_u32(migr_rec
->num_migr_units_lo
,
1522 migr_rec
->num_migr_units_hi
);
1525 static void set_total_blocks(struct imsm_disk
*disk
, unsigned long long n
)
1527 split_ull(n
, &disk
->total_blocks_lo
, &disk
->total_blocks_hi
);
1531 * set_num_domains() - Set number of domains for an array.
1532 * @map: Map to be updated.
1534 * num_domains property represents copies count of each data drive, thus make
1535 * it meaningful only for RAID1 and RAID10. IMSM supports two domains for
1538 static void set_num_domains(struct imsm_map
*map
)
1540 int level
= get_imsm_raid_level(map
);
1542 if (level
== 1 || level
== 10)
1543 map
->num_domains
= 2;
1545 map
->num_domains
= 1;
1548 static void set_pba_of_lba0(struct imsm_map
*map
, unsigned long long n
)
1550 split_ull(n
, &map
->pba_of_lba0_lo
, &map
->pba_of_lba0_hi
);
1553 static void set_blocks_per_member(struct imsm_map
*map
, unsigned long long n
)
1555 split_ull(n
, &map
->blocks_per_member_lo
, &map
->blocks_per_member_hi
);
1558 static void set_num_data_stripes(struct imsm_map
*map
, unsigned long long n
)
1560 split_ull(n
, &map
->num_data_stripes_lo
, &map
->num_data_stripes_hi
);
1564 * update_num_data_stripes() - Calculate and update num_data_stripes value.
1565 * @map: map to be updated.
1566 * @dev_size: size of volume.
1568 * num_data_stripes value is addictionally divided by num_domains, therefore for
1569 * levels where num_domains is not 1, nds is a part of real value.
1571 static void update_num_data_stripes(struct imsm_map
*map
,
1572 unsigned long long dev_size
)
1574 unsigned long long nds
= dev_size
/ imsm_num_data_members(map
);
1576 nds
/= map
->num_domains
;
1577 nds
/= map
->blocks_per_strip
;
1578 set_num_data_stripes(map
, nds
);
1581 static void set_vol_curr_migr_unit(struct imsm_dev
*dev
, unsigned long long n
)
1586 split_ull(n
, &dev
->vol
.curr_migr_unit_lo
, &dev
->vol
.curr_migr_unit_hi
);
1589 static void set_imsm_dev_size(struct imsm_dev
*dev
, unsigned long long n
)
1591 split_ull(n
, &dev
->size_low
, &dev
->size_high
);
1594 static void set_migr_chkp_area_pba(struct migr_record
*migr_rec
,
1595 unsigned long long n
)
1597 split_ull(n
, &migr_rec
->ckpt_area_pba_lo
, &migr_rec
->ckpt_area_pba_hi
);
1600 static void set_current_migr_unit(struct migr_record
*migr_rec
,
1601 unsigned long long n
)
1603 split_ull(n
, &migr_rec
->curr_migr_unit_lo
,
1604 &migr_rec
->curr_migr_unit_hi
);
1607 static void set_migr_dest_1st_member_lba(struct migr_record
*migr_rec
,
1608 unsigned long long n
)
1610 split_ull(n
, &migr_rec
->dest_1st_member_lba_lo
,
1611 &migr_rec
->dest_1st_member_lba_hi
);
1614 static void set_num_migr_units(struct migr_record
*migr_rec
,
1615 unsigned long long n
)
1617 split_ull(n
, &migr_rec
->num_migr_units_lo
,
1618 &migr_rec
->num_migr_units_hi
);
1621 static unsigned long long per_dev_array_size(struct imsm_map
*map
)
1623 unsigned long long array_size
= 0;
1628 array_size
= num_data_stripes(map
) * map
->blocks_per_strip
;
1629 if (get_imsm_raid_level(map
) == 1 || get_imsm_raid_level(map
) == 10)
1635 static struct extent
*get_extents(struct intel_super
*super
, struct dl
*dl
,
1636 int get_minimal_reservation
)
1638 /* find a list of used extents on the given physical device */
1639 int memberships
= count_memberships(dl
, super
);
1640 struct extent
*rv
= xcalloc(memberships
+ 1, sizeof(struct extent
));
1641 struct extent
*e
= rv
;
1645 /* trim the reserved area for spares, so they can join any array
1646 * regardless of whether the OROM has assigned sectors from the
1647 * IMSM_RESERVED_SECTORS region
1649 if (dl
->index
== -1 || get_minimal_reservation
)
1650 reservation
= imsm_min_reserved_sectors(super
);
1652 reservation
= MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
;
1654 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
1655 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
1656 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
1658 if (get_imsm_disk_slot(map
, dl
->index
) >= 0) {
1659 e
->start
= pba_of_lba0(map
);
1660 e
->size
= per_dev_array_size(map
);
1665 qsort(rv
, memberships
, sizeof(*rv
), cmp_extent
);
1667 /* determine the start of the metadata
1668 * when no raid devices are defined use the default
1669 * ...otherwise allow the metadata to truncate the value
1670 * as is the case with older versions of imsm
1673 struct extent
*last
= &rv
[memberships
- 1];
1674 unsigned long long remainder
;
1676 remainder
= total_blocks(&dl
->disk
) - (last
->start
+ last
->size
);
1677 /* round down to 1k block to satisfy precision of the kernel
1681 /* make sure remainder is still sane */
1682 if (remainder
< (unsigned)ROUND_UP(super
->len
, 512) >> 9)
1683 remainder
= ROUND_UP(super
->len
, 512) >> 9;
1684 if (reservation
> remainder
)
1685 reservation
= remainder
;
1687 e
->start
= total_blocks(&dl
->disk
) - reservation
;
1692 /* try to determine how much space is reserved for metadata from
1693 * the last get_extents() entry, otherwise fallback to the
1696 static __u32
imsm_reserved_sectors(struct intel_super
*super
, struct dl
*dl
)
1702 /* for spares just return a minimal reservation which will grow
1703 * once the spare is picked up by an array
1705 if (dl
->index
== -1)
1706 return MPB_SECTOR_CNT
;
1708 e
= get_extents(super
, dl
, 0);
1710 return MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
;
1712 /* scroll to last entry */
1713 for (i
= 0; e
[i
].size
; i
++)
1716 rv
= total_blocks(&dl
->disk
) - e
[i
].start
;
1723 static int is_spare(struct imsm_disk
*disk
)
1725 return (disk
->status
& SPARE_DISK
) == SPARE_DISK
;
1728 static int is_configured(struct imsm_disk
*disk
)
1730 return (disk
->status
& CONFIGURED_DISK
) == CONFIGURED_DISK
;
1733 static int is_failed(struct imsm_disk
*disk
)
1735 return (disk
->status
& FAILED_DISK
) == FAILED_DISK
;
1738 static int is_journal(struct imsm_disk
*disk
)
1740 return (disk
->status
& JOURNAL_DISK
) == JOURNAL_DISK
;
1744 * round_member_size_to_mb()- Round given size to closest MiB.
1745 * @size: size to round in sectors.
1747 static inline unsigned long long round_member_size_to_mb(unsigned long long size
)
1749 return (size
>> SECT_PER_MB_SHIFT
) << SECT_PER_MB_SHIFT
;
1753 * round_size_to_mb()- Round given size.
1754 * @array_size: size to round in sectors.
1755 * @disk_count: count of data members.
1757 * Get size per each data member and round it to closest MiB to ensure that data
1758 * splits evenly between members.
1760 * Return: Array size, rounded down.
1762 static inline unsigned long long round_size_to_mb(unsigned long long array_size
,
1763 unsigned int disk_count
)
1765 return round_member_size_to_mb(array_size
/ disk_count
) * disk_count
;
1768 static int able_to_resync(int raid_level
, int missing_disks
)
1770 int max_missing_disks
= 0;
1772 switch (raid_level
) {
1774 max_missing_disks
= 1;
1777 max_missing_disks
= 0;
1779 return missing_disks
<= max_missing_disks
;
1782 /* try to determine how much space is reserved for metadata from
1783 * the last get_extents() entry on the smallest active disk,
1784 * otherwise fallback to the default
1786 static __u32
imsm_min_reserved_sectors(struct intel_super
*super
)
1790 unsigned long long min_active
;
1792 __u32 rv
= MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
;
1793 struct dl
*dl
, *dl_min
= NULL
;
1799 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
1802 unsigned long long blocks
= total_blocks(&dl
->disk
);
1803 if (blocks
< min_active
|| min_active
== 0) {
1805 min_active
= blocks
;
1811 /* find last lba used by subarrays on the smallest active disk */
1812 e
= get_extents(super
, dl_min
, 0);
1815 for (i
= 0; e
[i
].size
; i
++)
1818 remainder
= min_active
- e
[i
].start
;
1821 /* to give priority to recovery we should not require full
1822 IMSM_RESERVED_SECTORS from the spare */
1823 rv
= MPB_SECTOR_CNT
+ NUM_BLOCKS_DIRTY_STRIPE_REGION
;
1825 /* if real reservation is smaller use that value */
1826 return (remainder
< rv
) ? remainder
: rv
;
1829 static bool is_gen_migration(struct imsm_dev
*dev
);
1831 #define IMSM_4K_DIV 8
1833 static __u64
blocks_per_migr_unit(struct intel_super
*super
,
1834 struct imsm_dev
*dev
);
1836 static void print_imsm_dev(struct intel_super
*super
,
1837 struct imsm_dev
*dev
,
1843 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
1844 struct imsm_map
*map2
= get_imsm_map(dev
, MAP_1
);
1848 printf("[%.16s]:\n", dev
->volume
);
1849 printf(" Subarray : %d\n", super
->current_vol
);
1850 printf(" UUID : %s\n", uuid
);
1851 printf(" RAID Level : %d", get_imsm_raid_level(map
));
1853 printf(" <-- %d", get_imsm_raid_level(map2
));
1855 printf(" Members : %d", map
->num_members
);
1857 printf(" <-- %d", map2
->num_members
);
1859 printf(" Slots : [");
1860 for (i
= 0; i
< map
->num_members
; i
++) {
1861 ord
= get_imsm_ord_tbl_ent(dev
, i
, MAP_0
);
1862 printf("%s", ord
& IMSM_ORD_REBUILD
? "_" : "U");
1867 for (i
= 0; i
< map2
->num_members
; i
++) {
1868 ord
= get_imsm_ord_tbl_ent(dev
, i
, MAP_1
);
1869 printf("%s", ord
& IMSM_ORD_REBUILD
? "_" : "U");
1874 printf(" Failed disk : ");
1875 if (map
->failed_disk_num
== 0xff)
1876 printf(STR_COMMON_NONE
);
1878 printf("%i", map
->failed_disk_num
);
1880 slot
= get_imsm_disk_slot(map
, disk_idx
);
1882 ord
= get_imsm_ord_tbl_ent(dev
, slot
, MAP_X
);
1883 printf(" This Slot : %d%s\n", slot
,
1884 ord
& IMSM_ORD_REBUILD
? " (out-of-sync)" : "");
1886 printf(" This Slot : ?\n");
1887 printf(" Sector Size : %u\n", super
->sector_size
);
1888 sz
= imsm_dev_size(dev
);
1889 printf(" Array Size : %llu%s\n",
1890 (unsigned long long)sz
* 512 / super
->sector_size
,
1891 human_size(sz
* 512));
1892 sz
= blocks_per_member(map
);
1893 printf(" Per Dev Size : %llu%s\n",
1894 (unsigned long long)sz
* 512 / super
->sector_size
,
1895 human_size(sz
* 512));
1896 printf(" Sector Offset : %llu\n",
1897 pba_of_lba0(map
) * 512 / super
->sector_size
);
1898 printf(" Num Stripes : %llu\n",
1899 num_data_stripes(map
));
1900 printf(" Chunk Size : %u KiB",
1901 __le16_to_cpu(map
->blocks_per_strip
) / 2);
1903 printf(" <-- %u KiB",
1904 __le16_to_cpu(map2
->blocks_per_strip
) / 2);
1906 printf(" Reserved : %d\n", __le32_to_cpu(dev
->reserved_blocks
));
1907 printf(" Migrate State : ");
1908 if (dev
->vol
.migr_state
) {
1909 if (migr_type(dev
) == MIGR_INIT
)
1910 printf("initialize\n");
1911 else if (migr_type(dev
) == MIGR_REBUILD
)
1912 printf("rebuild\n");
1913 else if (migr_type(dev
) == MIGR_VERIFY
)
1915 else if (migr_type(dev
) == MIGR_GEN_MIGR
)
1916 printf("general migration\n");
1917 else if (migr_type(dev
) == MIGR_STATE_CHANGE
)
1918 printf("state change\n");
1919 else if (migr_type(dev
) == MIGR_REPAIR
)
1922 printf("<unknown:%d>\n", migr_type(dev
));
1925 printf(" Map State : %s", map_state_str
[map
->map_state
]);
1926 if (dev
->vol
.migr_state
) {
1927 struct imsm_map
*map
= get_imsm_map(dev
, MAP_1
);
1929 printf(" <-- %s", map_state_str
[map
->map_state
]);
1930 printf("\n Checkpoint : %llu ", vol_curr_migr_unit(dev
));
1931 if (is_gen_migration(dev
) && (slot
> 1 || slot
< 0))
1934 printf("(%llu)", (unsigned long long)
1935 blocks_per_migr_unit(super
, dev
));
1938 printf(" Dirty State : %s\n", (dev
->vol
.dirty
& RAIDVOL_DIRTY
) ?
1940 printf(" RWH Policy : ");
1941 if (dev
->rwh_policy
== RWH_OFF
|| dev
->rwh_policy
== RWH_MULTIPLE_OFF
)
1943 else if (dev
->rwh_policy
== RWH_DISTRIBUTED
)
1944 printf("PPL distributed\n");
1945 else if (dev
->rwh_policy
== RWH_JOURNALING_DRIVE
)
1946 printf("PPL journaling drive\n");
1947 else if (dev
->rwh_policy
== RWH_MULTIPLE_DISTRIBUTED
)
1948 printf("Multiple distributed PPLs\n");
1949 else if (dev
->rwh_policy
== RWH_MULTIPLE_PPLS_JOURNALING_DRIVE
)
1950 printf("Multiple PPLs on journaling drive\n");
1951 else if (dev
->rwh_policy
== RWH_BITMAP
)
1952 printf("Write-intent bitmap\n");
1954 printf("<unknown:%d>\n", dev
->rwh_policy
);
1956 printf(" Volume ID : %u\n", dev
->my_vol_raid_dev_num
);
1959 static void print_imsm_disk(struct imsm_disk
*disk
,
1962 unsigned int sector_size
) {
1963 char str
[MAX_RAID_SERIAL_LEN
+ 1];
1966 if (index
< -1 || !disk
)
1970 snprintf(str
, MAX_RAID_SERIAL_LEN
+ 1, "%s", disk
->serial
);
1972 printf(" Disk%02d Serial : %s\n", index
, str
);
1974 printf(" Disk Serial : %s\n", str
);
1975 printf(" State :%s%s%s%s\n", is_spare(disk
) ? " spare" : "",
1976 is_configured(disk
) ? " active" : "",
1977 is_failed(disk
) ? " failed" : "",
1978 is_journal(disk
) ? " journal" : "");
1979 printf(" Id : %08x\n", __le32_to_cpu(disk
->scsi_id
));
1980 sz
= total_blocks(disk
) - reserved
;
1981 printf(" Usable Size : %llu%s\n",
1982 (unsigned long long)sz
* 512 / sector_size
,
1983 human_size(sz
* 512));
1986 void convert_to_4k_imsm_migr_rec(struct intel_super
*super
)
1988 struct migr_record
*migr_rec
= super
->migr_rec
;
1990 migr_rec
->blocks_per_unit
/= IMSM_4K_DIV
;
1991 migr_rec
->dest_depth_per_unit
/= IMSM_4K_DIV
;
1992 split_ull((join_u32(migr_rec
->post_migr_vol_cap
,
1993 migr_rec
->post_migr_vol_cap_hi
) / IMSM_4K_DIV
),
1994 &migr_rec
->post_migr_vol_cap
, &migr_rec
->post_migr_vol_cap_hi
);
1995 set_migr_chkp_area_pba(migr_rec
,
1996 migr_chkp_area_pba(migr_rec
) / IMSM_4K_DIV
);
1997 set_migr_dest_1st_member_lba(migr_rec
,
1998 migr_dest_1st_member_lba(migr_rec
) / IMSM_4K_DIV
);
2001 void convert_to_4k_imsm_disk(struct imsm_disk
*disk
)
2003 set_total_blocks(disk
, (total_blocks(disk
)/IMSM_4K_DIV
));
2006 void convert_to_4k(struct intel_super
*super
)
2008 struct imsm_super
*mpb
= super
->anchor
;
2009 struct imsm_disk
*disk
;
2011 __u32 bbm_log_size
= __le32_to_cpu(mpb
->bbm_log_size
);
2013 for (i
= 0; i
< mpb
->num_disks
; i
++) {
2014 disk
= __get_imsm_disk(mpb
, i
);
2016 convert_to_4k_imsm_disk(disk
);
2018 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
2019 struct imsm_dev
*dev
= __get_imsm_dev(mpb
, i
);
2020 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
2022 set_imsm_dev_size(dev
, imsm_dev_size(dev
)/IMSM_4K_DIV
);
2023 set_vol_curr_migr_unit(dev
,
2024 vol_curr_migr_unit(dev
) / IMSM_4K_DIV
);
2027 set_blocks_per_member(map
, blocks_per_member(map
)/IMSM_4K_DIV
);
2028 map
->blocks_per_strip
/= IMSM_4K_DIV
;
2029 set_pba_of_lba0(map
, pba_of_lba0(map
)/IMSM_4K_DIV
);
2031 if (dev
->vol
.migr_state
) {
2033 map
= get_imsm_map(dev
, MAP_1
);
2034 set_blocks_per_member(map
,
2035 blocks_per_member(map
)/IMSM_4K_DIV
);
2036 map
->blocks_per_strip
/= IMSM_4K_DIV
;
2037 set_pba_of_lba0(map
, pba_of_lba0(map
)/IMSM_4K_DIV
);
2041 struct bbm_log
*log
= (void *)mpb
+
2042 __le32_to_cpu(mpb
->mpb_size
) - bbm_log_size
;
2045 for (i
= 0; i
< log
->entry_count
; i
++) {
2046 struct bbm_log_entry
*entry
=
2047 &log
->marked_block_entries
[i
];
2049 __u8 count
= entry
->marked_count
+ 1;
2050 unsigned long long sector
=
2051 __le48_to_cpu(&entry
->defective_block_start
);
2053 entry
->defective_block_start
=
2054 __cpu_to_le48(sector
/IMSM_4K_DIV
);
2055 entry
->marked_count
= max(count
/IMSM_4K_DIV
, 1) - 1;
2059 mpb
->check_sum
= __gen_imsm_checksum(mpb
);
2062 void examine_migr_rec_imsm(struct intel_super
*super
)
2064 struct migr_record
*migr_rec
= super
->migr_rec
;
2065 struct imsm_super
*mpb
= super
->anchor
;
2068 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
2069 struct imsm_dev
*dev
= __get_imsm_dev(mpb
, i
);
2070 struct imsm_map
*map
;
2073 if (is_gen_migration(dev
) == false)
2076 printf("\nMigration Record Information:");
2078 /* first map under migration */
2079 map
= get_imsm_map(dev
, MAP_0
);
2082 slot
= get_imsm_disk_slot(map
, super
->disks
->index
);
2083 if (map
== NULL
|| slot
> 1 || slot
< 0) {
2084 printf(" Empty\n ");
2085 printf("Examine one of first two disks in array\n");
2088 printf("\n Status : ");
2089 if (__le32_to_cpu(migr_rec
->rec_status
) == UNIT_SRC_NORMAL
)
2092 printf("Contains Data\n");
2093 printf(" Current Unit : %llu\n",
2094 current_migr_unit(migr_rec
));
2095 printf(" Family : %u\n",
2096 __le32_to_cpu(migr_rec
->family_num
));
2097 printf(" Ascending : %u\n",
2098 __le32_to_cpu(migr_rec
->ascending_migr
));
2099 printf(" Blocks Per Unit : %u\n",
2100 __le32_to_cpu(migr_rec
->blocks_per_unit
));
2101 printf(" Dest. Depth Per Unit : %u\n",
2102 __le32_to_cpu(migr_rec
->dest_depth_per_unit
));
2103 printf(" Checkpoint Area pba : %llu\n",
2104 migr_chkp_area_pba(migr_rec
));
2105 printf(" First member lba : %llu\n",
2106 migr_dest_1st_member_lba(migr_rec
));
2107 printf(" Total Number of Units : %llu\n",
2108 get_num_migr_units(migr_rec
));
2109 printf(" Size of volume : %llu\n",
2110 join_u32(migr_rec
->post_migr_vol_cap
,
2111 migr_rec
->post_migr_vol_cap_hi
));
2112 printf(" Record was read from : %u\n",
2113 __le32_to_cpu(migr_rec
->ckpt_read_disk_num
));
2119 void convert_from_4k_imsm_migr_rec(struct intel_super
*super
)
2121 struct migr_record
*migr_rec
= super
->migr_rec
;
2123 migr_rec
->blocks_per_unit
*= IMSM_4K_DIV
;
2124 migr_rec
->dest_depth_per_unit
*= IMSM_4K_DIV
;
2125 split_ull((join_u32(migr_rec
->post_migr_vol_cap
,
2126 migr_rec
->post_migr_vol_cap_hi
) * IMSM_4K_DIV
),
2127 &migr_rec
->post_migr_vol_cap
,
2128 &migr_rec
->post_migr_vol_cap_hi
);
2129 set_migr_chkp_area_pba(migr_rec
,
2130 migr_chkp_area_pba(migr_rec
) * IMSM_4K_DIV
);
2131 set_migr_dest_1st_member_lba(migr_rec
,
2132 migr_dest_1st_member_lba(migr_rec
) * IMSM_4K_DIV
);
2135 void convert_from_4k(struct intel_super
*super
)
2137 struct imsm_super
*mpb
= super
->anchor
;
2138 struct imsm_disk
*disk
;
2140 __u32 bbm_log_size
= __le32_to_cpu(mpb
->bbm_log_size
);
2142 for (i
= 0; i
< mpb
->num_disks
; i
++) {
2143 disk
= __get_imsm_disk(mpb
, i
);
2145 set_total_blocks(disk
, (total_blocks(disk
)*IMSM_4K_DIV
));
2148 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
2149 struct imsm_dev
*dev
= __get_imsm_dev(mpb
, i
);
2150 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
2152 set_imsm_dev_size(dev
, imsm_dev_size(dev
)*IMSM_4K_DIV
);
2153 set_vol_curr_migr_unit(dev
,
2154 vol_curr_migr_unit(dev
) * IMSM_4K_DIV
);
2157 set_blocks_per_member(map
, blocks_per_member(map
)*IMSM_4K_DIV
);
2158 map
->blocks_per_strip
*= IMSM_4K_DIV
;
2159 set_pba_of_lba0(map
, pba_of_lba0(map
)*IMSM_4K_DIV
);
2161 if (dev
->vol
.migr_state
) {
2163 map
= get_imsm_map(dev
, MAP_1
);
2164 set_blocks_per_member(map
,
2165 blocks_per_member(map
)*IMSM_4K_DIV
);
2166 map
->blocks_per_strip
*= IMSM_4K_DIV
;
2167 set_pba_of_lba0(map
, pba_of_lba0(map
)*IMSM_4K_DIV
);
2171 struct bbm_log
*log
= (void *)mpb
+
2172 __le32_to_cpu(mpb
->mpb_size
) - bbm_log_size
;
2175 for (i
= 0; i
< log
->entry_count
; i
++) {
2176 struct bbm_log_entry
*entry
=
2177 &log
->marked_block_entries
[i
];
2179 __u8 count
= entry
->marked_count
+ 1;
2180 unsigned long long sector
=
2181 __le48_to_cpu(&entry
->defective_block_start
);
2183 entry
->defective_block_start
=
2184 __cpu_to_le48(sector
*IMSM_4K_DIV
);
2185 entry
->marked_count
= count
*IMSM_4K_DIV
- 1;
2189 mpb
->check_sum
= __gen_imsm_checksum(mpb
);
2193 * imsm_check_attributes() - Check if features represented by attributes flags are supported.
2195 * @attributes: attributes read from metadata.
2196 * Returns: true if all features are supported, false otherwise.
2198 static bool imsm_check_attributes(__u32 attributes
)
2200 if ((attributes
& (MPB_ATTRIB_SUPPORTED
| MPB_ATTRIB_IGNORED
)) == attributes
)
2206 static void getinfo_super_imsm(struct supertype
*st
, struct mdinfo
*info
, char *map
);
2208 static void examine_super_imsm(struct supertype
*st
, char *homehost
)
2210 struct intel_super
*super
= st
->sb
;
2211 struct imsm_super
*mpb
= super
->anchor
;
2212 char str
[MAX_SIGNATURE_LENGTH
];
2217 __u32 reserved
= imsm_reserved_sectors(super
, super
->disks
);
2219 time_t creation_time
;
2221 strncpy(str
, (char *)mpb
->sig
, MPB_SIG_LEN
);
2222 str
[MPB_SIG_LEN
-1] = '\0';
2223 printf(" Magic : %s\n", str
);
2224 printf(" Version : %s\n", get_imsm_version(mpb
));
2225 printf(" Orig Family : %08x\n", __le32_to_cpu(mpb
->orig_family_num
));
2226 printf(" Family : %08x\n", __le32_to_cpu(mpb
->family_num
));
2227 printf(" Generation : %08x\n", __le32_to_cpu(mpb
->generation_num
));
2228 creation_time
= __le64_to_cpu(mpb
->creation_time
);
2229 printf(" Creation Time : %.24s\n",
2230 creation_time
? ctime(&creation_time
) : "Unknown");
2232 printf(" Attributes : %08x (%s)\n", mpb
->attributes
,
2233 imsm_check_attributes(mpb
->attributes
) ? "supported" : "not supported");
2235 getinfo_super_imsm(st
, &info
, NULL
);
2236 fname_from_uuid(&info
, nbuf
);
2237 printf(" UUID : %s\n", nbuf
+ 5);
2238 sum
= __le32_to_cpu(mpb
->check_sum
);
2239 printf(" Checksum : %08x %s\n", sum
,
2240 __gen_imsm_checksum(mpb
) == sum
? "correct" : "incorrect");
2241 printf(" MPB Sectors : %d\n", mpb_sectors(mpb
, super
->sector_size
));
2242 printf(" Disks : %d\n", mpb
->num_disks
);
2243 printf(" RAID Devices : %d\n", mpb
->num_raid_devs
);
2244 print_imsm_disk(__get_imsm_disk(mpb
, super
->disks
->index
),
2245 super
->disks
->index
, reserved
, super
->sector_size
);
2246 if (get_imsm_bbm_log_size(super
->bbm_log
)) {
2247 struct bbm_log
*log
= super
->bbm_log
;
2250 printf("Bad Block Management Log:\n");
2251 printf(" Log Size : %d\n", __le32_to_cpu(mpb
->bbm_log_size
));
2252 printf(" Signature : %x\n", __le32_to_cpu(log
->signature
));
2253 printf(" Entry Count : %d\n", __le32_to_cpu(log
->entry_count
));
2255 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
2257 struct imsm_dev
*dev
= __get_imsm_dev(mpb
, i
);
2259 super
->current_vol
= i
;
2260 getinfo_super_imsm(st
, &info
, NULL
);
2261 fname_from_uuid(&info
, nbuf
);
2262 print_imsm_dev(super
, dev
, nbuf
+ 5, super
->disks
->index
);
2264 for (i
= 0; i
< mpb
->num_disks
; i
++) {
2265 if (i
== super
->disks
->index
)
2267 print_imsm_disk(__get_imsm_disk(mpb
, i
), i
, reserved
,
2268 super
->sector_size
);
2271 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
2272 if (dl
->index
== -1)
2273 print_imsm_disk(&dl
->disk
, -1, reserved
,
2274 super
->sector_size
);
2276 examine_migr_rec_imsm(super
);
2279 static void brief_examine_super_imsm(struct supertype
*st
, int verbose
)
2281 /* We just write a generic IMSM ARRAY entry */
2285 getinfo_super_imsm(st
, &info
, NULL
);
2286 fname_from_uuid(&info
, nbuf
);
2287 printf("ARRAY metadata=imsm UUID=%s\n", nbuf
+ 5);
2290 static void brief_examine_subarrays_imsm(struct supertype
*st
, int verbose
)
2292 /* We just write a generic IMSM ARRAY entry */
2296 struct intel_super
*super
= st
->sb
;
2299 if (!super
->anchor
->num_raid_devs
)
2302 getinfo_super_imsm(st
, &info
, NULL
);
2303 fname_from_uuid(&info
, nbuf
);
2304 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
2305 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
2307 super
->current_vol
= i
;
2308 getinfo_super_imsm(st
, &info
, NULL
);
2309 fname_from_uuid(&info
, nbuf1
);
2310 printf("ARRAY " DEV_MD_DIR
"%.16s container=%s member=%d UUID=%s\n",
2311 dev
->volume
, nbuf
+ 5, i
, nbuf1
+ 5);
2315 static void export_examine_super_imsm(struct supertype
*st
)
2317 struct intel_super
*super
= st
->sb
;
2318 struct imsm_super
*mpb
= super
->anchor
;
2322 getinfo_super_imsm(st
, &info
, NULL
);
2323 fname_from_uuid(&info
, nbuf
);
2324 printf("MD_METADATA=imsm\n");
2325 printf("MD_LEVEL=container\n");
2326 printf("MD_UUID=%s\n", nbuf
+5);
2327 printf("MD_DEVICES=%u\n", mpb
->num_disks
);
2328 printf("MD_CREATION_TIME=%llu\n",
2329 (unsigned long long)__le64_to_cpu(mpb
->creation_time
));
2332 static void detail_super_imsm(struct supertype
*st
, char *homehost
,
2337 struct intel_super
*super
= st
->sb
;
2338 int temp_vol
= super
->current_vol
;
2341 super
->current_vol
= strtoul(subarray
, NULL
, 10);
2343 getinfo_super_imsm(st
, &info
, NULL
);
2344 fname_from_uuid(&info
, nbuf
);
2345 printf("\n UUID : %s\n", nbuf
+ 5);
2347 super
->current_vol
= temp_vol
;
2350 static void brief_detail_super_imsm(struct supertype
*st
, char *subarray
)
2354 struct intel_super
*super
= st
->sb
;
2355 int temp_vol
= super
->current_vol
;
2358 super
->current_vol
= strtoul(subarray
, NULL
, 10);
2360 getinfo_super_imsm(st
, &info
, NULL
);
2361 fname_from_uuid(&info
, nbuf
);
2362 printf(" UUID=%s", nbuf
+ 5);
2364 super
->current_vol
= temp_vol
;
2367 static int imsm_read_serial(int fd
, char *devname
, __u8
*serial
,
2368 size_t serial_buf_len
);
2369 static void fd2devname(int fd
, char *name
);
2371 void print_encryption_information(int disk_fd
, enum sys_dev_type hba_type
)
2373 struct encryption_information information
= {0};
2374 mdadm_status_t status
= MDADM_STATUS_SUCCESS
;
2375 const char *indent
= " ";
2380 status
= get_nvme_opal_encryption_information(disk_fd
, &information
, 1);
2383 case SYS_DEV_SATA_VMD
:
2384 status
= get_ata_encryption_information(disk_fd
, &information
, 1);
2391 pr_err("Failed to get drive encryption information.\n");
2395 printf("%sEncryption(Ability|Status): %s|%s\n", indent
,
2396 get_encryption_ability_string(information
.ability
),
2397 get_encryption_status_string(information
.status
));
2400 static int ahci_enumerate_ports(struct sys_dev
*hba
, unsigned long port_count
, int host_base
,
2403 /* dump an unsorted list of devices attached to AHCI Intel storage
2404 * controller, as well as non-connected ports
2406 int hba_len
= strlen(hba
->path
) + 1;
2411 unsigned long port_mask
= (1 << port_count
) - 1;
2413 if (port_count
> (int)sizeof(port_mask
) * 8) {
2415 pr_err("port_count %ld out of range\n", port_count
);
2419 /* scroll through /sys/dev/block looking for devices attached to
2422 dir
= opendir("/sys/dev/block");
2426 for (ent
= readdir(dir
); ent
; ent
= readdir(dir
)) {
2432 char device
[PATH_MAX
];
2437 if (sscanf(ent
->d_name
, "%d:%d", &major
, &minor
) != 2)
2439 path
= devt_to_devpath(makedev(major
, minor
), 1, NULL
);
2442 if (!is_path_attached_to_hba(path
, hba
->path
)) {
2448 /* retrieve the scsi device */
2449 if (!devt_to_devpath(makedev(major
, minor
), 1, device
)) {
2451 pr_err("failed to get device\n");
2455 if (devpath_to_char(device
, "type", buf
, sizeof(buf
), 0)) {
2459 type
= strtoul(buf
, NULL
, 10);
2461 /* if it's not a disk print the vendor and model */
2462 if (!(type
== 0 || type
== 7 || type
== 14)) {
2466 if (devpath_to_char(device
, "vendor", buf
,
2467 sizeof(buf
), 0) == 0) {
2468 strncpy(vendor
, buf
, sizeof(vendor
));
2469 vendor
[sizeof(vendor
) - 1] = '\0';
2470 c
= (char *) &vendor
[sizeof(vendor
) - 1];
2471 while (isspace(*c
) || *c
== '\0')
2476 if (devpath_to_char(device
, "model", buf
,
2477 sizeof(buf
), 0) == 0) {
2478 strncpy(model
, buf
, sizeof(model
));
2479 model
[sizeof(model
) - 1] = '\0';
2480 c
= (char *) &model
[sizeof(model
) - 1];
2481 while (isspace(*c
) || *c
== '\0')
2485 if (vendor
[0] && model
[0])
2486 sprintf(buf
, "%.64s %.64s", vendor
, model
);
2488 switch (type
) { /* numbers from hald/linux/device.c */
2489 case 1: sprintf(buf
, "tape"); break;
2490 case 2: sprintf(buf
, "printer"); break;
2491 case 3: sprintf(buf
, "processor"); break;
2493 case 5: sprintf(buf
, "cdrom"); break;
2494 case 6: sprintf(buf
, "scanner"); break;
2495 case 8: sprintf(buf
, "media_changer"); break;
2496 case 9: sprintf(buf
, "comm"); break;
2497 case 12: sprintf(buf
, "raid"); break;
2498 default: sprintf(buf
, "unknown");
2503 /* chop device path to 'host%d' and calculate the port number */
2504 c
= strchr(&path
[hba_len
], '/');
2507 pr_err("%s - invalid path name\n", path
+ hba_len
);
2512 if ((sscanf(&path
[hba_len
], "ata%d", &port
) == 1) ||
2513 ((sscanf(&path
[hba_len
], "host%d", &port
) == 1)))
2517 *c
= '/'; /* repair the full string */
2518 pr_err("failed to determine port number for %s\n",
2525 /* mark this port as used */
2526 port_mask
&= ~(1 << port
);
2528 /* print out the device information */
2530 printf(" Port%d : - non-disk device (%s) -\n", port
, buf
);
2534 fd
= dev_open(ent
->d_name
, O_RDONLY
);
2535 if (!is_fd_valid(fd
))
2536 printf(" Port%d : - disk info unavailable -\n", port
);
2538 fd2devname(fd
, buf
);
2539 printf(" Port%d : %s", port
, buf
);
2540 if (imsm_read_serial(fd
, NULL
, (__u8
*)buf
,
2542 printf(" (%s)\n", buf
);
2546 print_encryption_information(fd
, hba
->type
);
2559 for (i
= 0; i
< port_count
; i
++)
2560 if (port_mask
& (1L << i
))
2561 printf(" Port%ld : - no device attached -\n", i
);
2567 static int print_nvme_info(struct sys_dev
*hba
)
2572 dir
= opendir("/sys/block/");
2576 for (ent
= readdir(dir
); ent
; ent
= readdir(dir
)) {
2577 char ns_path
[PATH_MAX
];
2578 char cntrl_path
[PATH_MAX
];
2582 if (!strstr(ent
->d_name
, "nvme"))
2585 fd
= open_dev(ent
->d_name
);
2586 if (!is_fd_valid(fd
))
2589 if (!diskfd_to_devpath(fd
, 0, ns_path
) ||
2590 !diskfd_to_devpath(fd
, 1, cntrl_path
))
2593 if (!is_path_attached_to_hba(cntrl_path
, hba
->path
))
2596 if (!imsm_is_nvme_namespace_supported(fd
, 0))
2599 fd2devname(fd
, buf
);
2600 if (hba
->type
== SYS_DEV_VMD
)
2601 printf(" NVMe under VMD : %s", buf
);
2602 else if (hba
->type
== SYS_DEV_NVME
)
2603 printf(" NVMe Device : %s", buf
);
2605 if (!imsm_read_serial(fd
, NULL
, (__u8
*)buf
,
2607 printf(" (%s)\n", buf
);
2611 print_encryption_information(fd
, hba
->type
);
2621 static void print_found_intel_controllers(struct sys_dev
*elem
)
2623 for (; elem
; elem
= elem
->next
) {
2624 pr_err("found Intel(R) ");
2625 if (elem
->type
== SYS_DEV_SATA
)
2626 fprintf(stderr
, "SATA ");
2627 else if (elem
->type
== SYS_DEV_SAS
)
2628 fprintf(stderr
, "SAS ");
2629 else if (elem
->type
== SYS_DEV_NVME
)
2630 fprintf(stderr
, "NVMe ");
2632 if (elem
->type
== SYS_DEV_VMD
)
2633 fprintf(stderr
, "VMD domain");
2634 else if (elem
->type
== SYS_DEV_SATA_VMD
)
2635 fprintf(stderr
, "SATA VMD domain");
2637 fprintf(stderr
, "RAID controller");
2640 fprintf(stderr
, " at %s", elem
->pci_id
);
2641 fprintf(stderr
, ".\n");
2646 static int ahci_get_port_count(const char *hba_path
, int *port_count
)
2653 if ((dir
= opendir(hba_path
)) == NULL
)
2656 for (ent
= readdir(dir
); ent
; ent
= readdir(dir
)) {
2659 if ((sscanf(ent
->d_name
, "ata%d", &host
) != 1) &&
2660 ((sscanf(ent
->d_name
, "host%d", &host
) != 1)))
2662 if (*port_count
== 0)
2664 else if (host
< host_base
)
2667 if (host
+ 1 > *port_count
+ host_base
)
2668 *port_count
= host
+ 1 - host_base
;
2674 static void print_imsm_level_capability(const struct imsm_orom
*orom
)
2678 for (idx
= 0; imsm_level_ops
[idx
].name
; idx
++)
2679 if (imsm_level_ops
[idx
].is_level_supported(orom
))
2680 printf("%s ", imsm_level_ops
[idx
].name
);
2683 static void print_imsm_sku_capability(const struct imsm_orom
*orom
)
2687 key_val
= (orom
->driver_features
& IMSM_OROM_CAPABILITIES_SKUMode_LOW
) >>
2688 IMSM_OROM_CAPABILITIES_SKUMode_LOW_SHIFT
;
2689 key_val
|= (orom
->driver_features
& IMSM_OROM_CAPABILITIES_SKUMode_HIGH
) >>
2690 IMSM_OROM_CAPABILITIES_SKUMode_HIGH_SHIFT
;
2694 printf("Pass-through");
2696 case SKU_STANDARD_KEY
:
2699 case SKU_PREMIUM_KEY
:
2702 case SKU_INTEL_SSD_ONLY_KEY
:
2703 printf("Intel-SSD-only");
2705 case SKU_RAID1_ONLY_KEY
:
2706 printf("RAID1 Only");
2712 if (orom
->driver_features
& IMSM_OROM_CAPABILITIES_SKUMode_NON_PRODUCTION
)
2713 printf(" - for evaluation only");
2716 static void print_imsm_chunk_size_capability(const struct imsm_orom
*orom
)
2720 for (idx
= 0; imsm_chunk_ops
[idx
].chunk_str
; idx
++)
2721 if (imsm_chunk_ops
[idx
].chunk
& orom
->sss
)
2722 printf("%s ", imsm_chunk_ops
[idx
].chunk_str
);
2726 static void print_imsm_capability(const struct orom_entry
*entry
)
2728 const struct imsm_orom
*orom
= &entry
->orom
;
2730 printf(" Platform : Intel(R) ");
2732 if (orom
->capabilities
== 0 && orom
->driver_features
== 0)
2733 printf("Matrix Storage Manager\n");
2734 else if (imsm_orom_is_enterprise(orom
) && orom
->major_ver
>= 6)
2735 printf("Virtual RAID on CPU\n");
2737 printf("Rapid Storage Technology%s\n",
2738 imsm_orom_is_enterprise(orom
) ? " enterprise" : "");
2740 if (orom
->major_ver
|| orom
->minor_ver
|| orom
->hotfix_ver
|| orom
->build
) {
2741 if (imsm_orom_is_vmd_without_efi(orom
))
2742 printf(" Version : %d.%d\n", orom
->major_ver
, orom
->minor_ver
);
2744 printf(" Version : %d.%d.%d.%d\n", orom
->major_ver
, orom
->minor_ver
,
2745 orom
->hotfix_ver
, orom
->build
);
2748 if (entry
->type
== SYS_DEV_VMD
) {
2749 printf(" License : ");
2750 print_imsm_sku_capability(orom
);
2754 printf(" RAID Levels : ");
2755 print_imsm_level_capability(orom
);
2758 printf(" Chunk Sizes : ");
2759 print_imsm_chunk_size_capability(orom
);
2762 printf(" 2TB volumes :%s supported\n", (orom
->attr
& IMSM_OROM_ATTR_2TB
) ? "" : " not");
2764 printf(" 2TB disks :%s supported\n",
2765 (orom
->attr
& IMSM_OROM_ATTR_2TB_DISK
) ? "" : " not");
2767 printf(" Max Disks : %d\n", orom
->tds
);
2769 printf(" Max Volumes : %d per array, %d per %s\n", orom
->vpa
, orom
->vphba
,
2770 imsm_orom_is_nvme(orom
) ? "platform" : "controller");
2772 if (entry
->type
== SYS_DEV_VMD
|| entry
->type
== SYS_DEV_NVME
)
2773 /* This is only meaningful for controllers with nvme support */
2774 printf(" 3rd party NVMe :%s supported\n",
2775 imsm_orom_has_tpv_support(&entry
->orom
) ? "" : " not");
2779 static void print_imsm_capability_export(const struct imsm_orom
*orom
)
2781 printf("MD_FIRMWARE_TYPE=imsm\n");
2782 if (orom
->major_ver
|| orom
->minor_ver
|| orom
->hotfix_ver
|| orom
->build
)
2783 printf("IMSM_VERSION=%d.%d.%d.%d\n", orom
->major_ver
, orom
->minor_ver
,
2784 orom
->hotfix_ver
, orom
->build
);
2786 printf("IMSM_SUPPORTED_RAID_LEVELS=");
2787 print_imsm_level_capability(orom
);
2790 printf("IMSM_SUPPORTED_CHUNK_SIZES=");
2791 print_imsm_chunk_size_capability(orom
);
2794 printf("IMSM_2TB_VOLUMES=%s\n",(orom
->attr
& IMSM_OROM_ATTR_2TB
) ? "yes" : "no");
2795 printf("IMSM_2TB_DISKS=%s\n",(orom
->attr
& IMSM_OROM_ATTR_2TB_DISK
) ? "yes" : "no");
2796 printf("IMSM_MAX_DISKS=%d\n",orom
->tds
);
2797 printf("IMSM_MAX_VOLUMES_PER_ARRAY=%d\n",orom
->vpa
);
2798 printf("IMSM_MAX_VOLUMES_PER_CONTROLLER=%d\n",orom
->vphba
);
2801 static int detail_platform_imsm(int verbose
, int enumerate_only
, char *controller_path
)
2803 /* There are two components to imsm platform support, the ahci SATA
2804 * controller and the option-rom. To find the SATA controller we
2805 * simply look in /sys/bus/pci/drivers/ahci to see if an ahci
2806 * controller with the Intel vendor id is present. This approach
2807 * allows mdadm to leverage the kernel's ahci detection logic, with the
2808 * caveat that if ahci.ko is not loaded mdadm will not be able to
2809 * detect platform raid capabilities. The option-rom resides in a
2810 * platform "Adapter ROM". We scan for its signature to retrieve the
2811 * platform capabilities. If raid support is disabled in the BIOS the
2812 * option-rom capability structure will not be available.
2814 const struct orom_entry
*entry
;
2815 struct sys_dev
*list
, *hba
;
2816 struct devid_list
*devid
;
2821 if (enumerate_only
) {
2822 if (check_no_platform())
2825 list
= find_intel_devices();
2829 for (hba
= list
; hba
; hba
= hba
->next
)
2830 if (find_imsm_capability(hba
))
2835 list
= find_intel_devices();
2838 pr_err("no active Intel(R) RAID controller found.\n");
2840 } else if (verbose
> 0)
2841 print_found_intel_controllers(list
);
2843 for (hba
= list
; hba
; hba
= hba
->next
) {
2844 if (controller_path
&& (compare_paths(hba
->path
, controller_path
) != 0))
2846 if (!find_imsm_capability(hba
)) {
2849 pr_err("imsm capabilities not found for controller: %s (type %s)\n",
2850 hba
->type
== SYS_DEV_VMD
|| hba
->type
== SYS_DEV_SATA_VMD
?
2851 vmd_domain_to_controller(hba
, buf
) :
2852 hba
->path
, get_sys_dev_type(hba
->type
));
2858 if (controller_path
&& result
== 1) {
2859 pr_err("no active Intel(R) RAID controller found under %s\n",
2864 for (entry
= orom_entries
; entry
; entry
= entry
->next
) {
2865 print_imsm_capability(entry
);
2867 if (entry
->type
== SYS_DEV_VMD
|| entry
->type
== SYS_DEV_NVME
) {
2868 for (hba
= list
; hba
; hba
= hba
->next
) {
2871 if (hba
->type
!= entry
->type
)
2874 if (hba
->type
== SYS_DEV_VMD
)
2875 printf(" I/O Controller : %s (%s)\n",
2876 vmd_domain_to_controller(hba
, buf
),
2877 get_sys_dev_type(hba
->type
));
2879 print_nvme_info(hba
);
2885 for (devid
= entry
->devid_list
; devid
; devid
= devid
->next
) {
2886 hba
= device_by_id(devid
->devid
);
2890 printf(" I/O Controller : %s (%s)\n",
2891 hba
->path
, get_sys_dev_type(hba
->type
));
2892 if (hba
->type
== SYS_DEV_SATA
|| hba
->type
== SYS_DEV_SATA_VMD
) {
2893 host_base
= ahci_get_port_count(hba
->path
, &port_count
);
2894 if (ahci_enumerate_ports(hba
, port_count
, host_base
, verbose
)) {
2896 pr_err("failed to enumerate ports on %s controller at %s.\n",
2897 get_sys_dev_type(hba
->type
), hba
->pci_id
);
2908 static int export_detail_platform_imsm(int verbose
, char *controller_path
)
2910 struct sys_dev
*list
, *hba
;
2913 list
= find_intel_devices();
2916 pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_INTEL_DEVICES\n");
2921 for (hba
= list
; hba
; hba
= hba
->next
) {
2922 if (controller_path
&& (compare_paths(hba
->path
,controller_path
) != 0))
2924 if (!find_imsm_capability(hba
) && verbose
> 0) {
2926 pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_IMSM_CAPABLE_DEVICE_UNDER_%s\n",
2927 hba
->type
== SYS_DEV_VMD
|| hba
->type
== SYS_DEV_SATA_VMD
?
2928 vmd_domain_to_controller(hba
, buf
) : hba
->path
);
2934 const struct orom_entry
*entry
;
2936 for (entry
= orom_entries
; entry
; entry
= entry
->next
) {
2937 if (entry
->type
== SYS_DEV_VMD
|| entry
->type
== SYS_DEV_SATA_VMD
) {
2938 for (hba
= list
; hba
; hba
= hba
->next
)
2939 print_imsm_capability_export(&entry
->orom
);
2942 print_imsm_capability_export(&entry
->orom
);
2948 static int match_home_imsm(struct supertype
*st
, char *homehost
)
2950 /* the imsm metadata format does not specify any host
2951 * identification information. We return -1 since we can never
2952 * confirm nor deny whether a given array is "meant" for this
2953 * host. We rely on compare_super and the 'family_num' fields to
2954 * exclude member disks that do not belong, and we rely on
2955 * mdadm.conf to specify the arrays that should be assembled.
2956 * Auto-assembly may still pick up "foreign" arrays.
2962 static void uuid_from_super_imsm(struct supertype
*st
, int uuid
[4])
2964 /* The uuid returned here is used for:
2965 * uuid to put into bitmap file (Create, Grow)
2966 * uuid for backup header when saving critical section (Grow)
2967 * comparing uuids when re-adding a device into an array
2968 * In these cases the uuid required is that of the data-array,
2969 * not the device-set.
2970 * uuid to recognise same set when adding a missing device back
2971 * to an array. This is a uuid for the device-set.
2973 * For each of these we can make do with a truncated
2974 * or hashed uuid rather than the original, as long as
2976 * In each case the uuid required is that of the data-array,
2977 * not the device-set.
2979 /* imsm does not track uuid's so we synthesis one using sha1 on
2980 * - The signature (Which is constant for all imsm array, but no matter)
2981 * - the orig_family_num of the container
2982 * - the index number of the volume
2983 * - the 'serial' number of the volume.
2984 * Hopefully these are all constant.
2986 struct intel_super
*super
= st
->sb
;
2989 struct sha1_ctx ctx
;
2990 struct imsm_dev
*dev
= NULL
;
2993 /* some mdadm versions failed to set ->orig_family_num, in which
2994 * case fall back to ->family_num. orig_family_num will be
2995 * fixed up with the first metadata update.
2997 family_num
= super
->anchor
->orig_family_num
;
2998 if (family_num
== 0)
2999 family_num
= super
->anchor
->family_num
;
3000 sha1_init_ctx(&ctx
);
3001 sha1_process_bytes(super
->anchor
->sig
, MPB_SIG_LEN
, &ctx
);
3002 sha1_process_bytes(&family_num
, sizeof(__u32
), &ctx
);
3003 if (super
->current_vol
>= 0)
3004 dev
= get_imsm_dev(super
, super
->current_vol
);
3006 __u32 vol
= super
->current_vol
;
3007 sha1_process_bytes(&vol
, sizeof(vol
), &ctx
);
3008 sha1_process_bytes(dev
->volume
, MAX_RAID_SERIAL_LEN
, &ctx
);
3010 sha1_finish_ctx(&ctx
, buf
);
3011 memcpy(uuid
, buf
, 4*4);
3014 static __u32
migr_strip_blocks_resync(struct imsm_dev
*dev
)
3016 /* migr_strip_size when repairing or initializing parity */
3017 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
3018 __u32 chunk
= __le32_to_cpu(map
->blocks_per_strip
);
3020 switch (get_imsm_raid_level(map
)) {
3025 return 128*1024 >> 9;
3029 static __u32
migr_strip_blocks_rebuild(struct imsm_dev
*dev
)
3031 /* migr_strip_size when rebuilding a degraded disk, no idea why
3032 * this is different than migr_strip_size_resync(), but it's good
3035 struct imsm_map
*map
= get_imsm_map(dev
, MAP_1
);
3036 __u32 chunk
= __le32_to_cpu(map
->blocks_per_strip
);
3038 switch (get_imsm_raid_level(map
)) {
3041 if (map
->num_members
% map
->num_domains
== 0)
3042 return 128*1024 >> 9;
3046 return max((__u32
) 64*1024 >> 9, chunk
);
3048 return 128*1024 >> 9;
3052 static __u32
num_stripes_per_unit_resync(struct imsm_dev
*dev
)
3054 struct imsm_map
*lo
= get_imsm_map(dev
, MAP_0
);
3055 struct imsm_map
*hi
= get_imsm_map(dev
, MAP_1
);
3056 __u32 lo_chunk
= __le32_to_cpu(lo
->blocks_per_strip
);
3057 __u32 hi_chunk
= __le32_to_cpu(hi
->blocks_per_strip
);
3059 return max((__u32
) 1, hi_chunk
/ lo_chunk
);
3062 static __u32
num_stripes_per_unit_rebuild(struct imsm_dev
*dev
)
3064 struct imsm_map
*lo
= get_imsm_map(dev
, MAP_0
);
3065 int level
= get_imsm_raid_level(lo
);
3067 if (level
== 1 || level
== 10) {
3068 struct imsm_map
*hi
= get_imsm_map(dev
, MAP_1
);
3070 return hi
->num_domains
;
3072 return num_stripes_per_unit_resync(dev
);
3075 static unsigned long long calc_component_size(struct imsm_map
*map
,
3076 struct imsm_dev
*dev
)
3078 unsigned long long component_size
;
3079 unsigned long long dev_size
= imsm_dev_size(dev
);
3080 long long calc_dev_size
= 0;
3081 unsigned int member_disks
= imsm_num_data_members(map
);
3083 if (member_disks
== 0)
3086 component_size
= per_dev_array_size(map
);
3087 calc_dev_size
= component_size
* member_disks
;
3089 /* Component size is rounded to 1MB so difference between size from
3090 * metadata and size calculated from num_data_stripes equals up to
3091 * 2048 blocks per each device. If the difference is higher it means
3092 * that array size was expanded and num_data_stripes was not updated.
3094 if (llabs(calc_dev_size
- (long long)dev_size
) >
3095 (1 << SECT_PER_MB_SHIFT
) * member_disks
) {
3096 component_size
= dev_size
/ member_disks
;
3097 dprintf("Invalid num_data_stripes in metadata; expected=%llu, found=%llu\n",
3098 component_size
/ map
->blocks_per_strip
,
3099 num_data_stripes(map
));
3102 return component_size
;
3105 static __u32
parity_segment_depth(struct imsm_dev
*dev
)
3107 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
3108 __u32 chunk
= __le32_to_cpu(map
->blocks_per_strip
);
3110 switch(get_imsm_raid_level(map
)) {
3113 return chunk
* map
->num_domains
;
3115 return chunk
* map
->num_members
;
3121 static __u32
map_migr_block(struct imsm_dev
*dev
, __u32 block
)
3123 struct imsm_map
*map
= get_imsm_map(dev
, MAP_1
);
3124 __u32 chunk
= __le32_to_cpu(map
->blocks_per_strip
);
3125 __u32 strip
= block
/ chunk
;
3127 switch (get_imsm_raid_level(map
)) {
3130 __u32 vol_strip
= (strip
* map
->num_domains
) + 1;
3131 __u32 vol_stripe
= vol_strip
/ map
->num_members
;
3133 return vol_stripe
* chunk
+ block
% chunk
;
3135 __u32 stripe
= strip
/ (map
->num_members
- 1);
3137 return stripe
* chunk
+ block
% chunk
;
3144 static __u64
blocks_per_migr_unit(struct intel_super
*super
,
3145 struct imsm_dev
*dev
)
3147 /* calculate the conversion factor between per member 'blocks'
3148 * (md/{resync,rebuild}_start) and imsm migration units, return
3149 * 0 for the 'not migrating' and 'unsupported migration' cases
3151 if (!dev
->vol
.migr_state
)
3154 switch (migr_type(dev
)) {
3155 case MIGR_GEN_MIGR
: {
3156 struct migr_record
*migr_rec
= super
->migr_rec
;
3157 return __le32_to_cpu(migr_rec
->blocks_per_unit
);
3162 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
3163 __u32 stripes_per_unit
;
3164 __u32 blocks_per_unit
;
3173 /* yes, this is really the translation of migr_units to
3174 * per-member blocks in the 'resync' case
3176 stripes_per_unit
= num_stripes_per_unit_resync(dev
);
3177 migr_chunk
= migr_strip_blocks_resync(dev
);
3178 disks
= imsm_num_data_members(map
);
3179 blocks_per_unit
= stripes_per_unit
* migr_chunk
* disks
;
3180 stripe
= __le16_to_cpu(map
->blocks_per_strip
) * disks
;
3181 segment
= blocks_per_unit
/ stripe
;
3182 block_rel
= blocks_per_unit
- segment
* stripe
;
3183 parity_depth
= parity_segment_depth(dev
);
3184 block_map
= map_migr_block(dev
, block_rel
);
3185 return block_map
+ parity_depth
* segment
;
3187 case MIGR_REBUILD
: {
3188 __u32 stripes_per_unit
;
3191 stripes_per_unit
= num_stripes_per_unit_rebuild(dev
);
3192 migr_chunk
= migr_strip_blocks_rebuild(dev
);
3193 return migr_chunk
* stripes_per_unit
;
3195 case MIGR_STATE_CHANGE
:
3201 static int imsm_level_to_layout(int level
)
3209 return ALGORITHM_LEFT_ASYMMETRIC
;
3216 /*******************************************************************************
3217 * Function: read_imsm_migr_rec
3218 * Description: Function reads imsm migration record from last sector of disk
3220 * fd : disk descriptor
3221 * super : metadata info
3225 ******************************************************************************/
3226 static int read_imsm_migr_rec(int fd
, struct intel_super
*super
)
3229 unsigned int sector_size
= super
->sector_size
;
3230 unsigned long long dsize
;
3232 get_dev_size(fd
, NULL
, &dsize
);
3233 if (lseek64(fd
, dsize
- (sector_size
*MIGR_REC_SECTOR_POSITION
),
3235 pr_err("Cannot seek to anchor block: %s\n",
3239 if ((unsigned int)read(fd
, super
->migr_rec_buf
,
3240 MIGR_REC_BUF_SECTORS
*sector_size
) !=
3241 MIGR_REC_BUF_SECTORS
*sector_size
) {
3242 pr_err("Cannot read migr record block: %s\n",
3247 if (sector_size
== 4096)
3248 convert_from_4k_imsm_migr_rec(super
);
3254 static struct imsm_dev
*imsm_get_device_during_migration(
3255 struct intel_super
*super
)
3258 struct intel_dev
*dv
;
3260 for (dv
= super
->devlist
; dv
; dv
= dv
->next
) {
3261 if (is_gen_migration(dv
->dev
))
3267 /*******************************************************************************
3268 * Function: load_imsm_migr_rec
3269 * Description: Function reads imsm migration record (it is stored at the last
3272 * super : imsm internal array info
3276 * -2 : no migration in progress
3277 ******************************************************************************/
3278 static int load_imsm_migr_rec(struct intel_super
*super
)
3284 struct imsm_dev
*dev
;
3285 struct imsm_map
*map
;
3289 /* find map under migration */
3290 dev
= imsm_get_device_during_migration(super
);
3291 /* nothing to load,no migration in progress?
3296 map
= get_imsm_map(dev
, MAP_0
);
3300 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
3301 /* skip spare and failed disks
3305 /* read only from one of the first two slots
3307 slot
= get_imsm_disk_slot(map
, dl
->index
);
3308 if (slot
> 1 || slot
< 0)
3311 if (!is_fd_valid(dl
->fd
)) {
3312 sprintf(nm
, "%d:%d", dl
->major
, dl
->minor
);
3313 fd
= dev_open(nm
, O_RDONLY
);
3315 if (is_fd_valid(fd
)) {
3325 if (!is_fd_valid(fd
))
3327 retval
= read_imsm_migr_rec(fd
, super
);
3334 /*******************************************************************************
3335 * function: imsm_create_metadata_checkpoint_update
3336 * Description: It creates update for checkpoint change.
3338 * super : imsm internal array info
3339 * u : pointer to prepared update
3342 * If length is equal to 0, input pointer u contains no update
3343 ******************************************************************************/
3344 static int imsm_create_metadata_checkpoint_update(
3345 struct intel_super
*super
,
3346 struct imsm_update_general_migration_checkpoint
**u
)
3349 int update_memory_size
= 0;
3351 dprintf("(enter)\n");
3357 /* size of all update data without anchor */
3358 update_memory_size
=
3359 sizeof(struct imsm_update_general_migration_checkpoint
);
3361 *u
= xcalloc(1, update_memory_size
);
3363 dprintf("error: cannot get memory\n");
3366 (*u
)->type
= update_general_migration_checkpoint
;
3367 (*u
)->curr_migr_unit
= current_migr_unit(super
->migr_rec
);
3368 dprintf("prepared for %llu\n", (unsigned long long)(*u
)->curr_migr_unit
);
3370 return update_memory_size
;
3373 static void imsm_update_metadata_locally(struct supertype
*st
,
3374 void *buf
, int len
);
3376 /*******************************************************************************
3377 * Function: write_imsm_migr_rec
3378 * Description: Function writes imsm migration record
3379 * (at the last sector of disk)
3381 * super : imsm internal array info
3385 ******************************************************************************/
3386 static int write_imsm_migr_rec(struct supertype
*st
)
3388 struct intel_super
*super
= st
->sb
;
3389 unsigned int sector_size
= super
->sector_size
;
3390 unsigned long long dsize
;
3394 struct imsm_update_general_migration_checkpoint
*u
;
3395 struct imsm_dev
*dev
;
3396 struct imsm_map
*map
;
3398 /* find map under migration */
3399 dev
= imsm_get_device_during_migration(super
);
3400 /* if no migration, write buffer anyway to clear migr_record
3401 * on disk based on first available device
3404 dev
= get_imsm_dev(super
, super
->current_vol
< 0 ? 0 :
3405 super
->current_vol
);
3407 map
= get_imsm_map(dev
, MAP_0
);
3409 if (sector_size
== 4096)
3410 convert_to_4k_imsm_migr_rec(super
);
3411 for (sd
= super
->disks
; sd
; sd
= sd
->next
) {
3414 /* skip failed and spare devices */
3417 /* write to 2 first slots only */
3419 slot
= get_imsm_disk_slot(map
, sd
->index
);
3420 if (map
== NULL
|| slot
> 1 || slot
< 0)
3423 get_dev_size(sd
->fd
, NULL
, &dsize
);
3424 if (lseek64(sd
->fd
, dsize
- (MIGR_REC_SECTOR_POSITION
*
3427 pr_err("Cannot seek to anchor block: %s\n",
3431 if ((unsigned int)write(sd
->fd
, super
->migr_rec_buf
,
3432 MIGR_REC_BUF_SECTORS
*sector_size
) !=
3433 MIGR_REC_BUF_SECTORS
*sector_size
) {
3434 pr_err("Cannot write migr record block: %s\n",
3439 if (sector_size
== 4096)
3440 convert_from_4k_imsm_migr_rec(super
);
3441 /* update checkpoint information in metadata */
3442 len
= imsm_create_metadata_checkpoint_update(super
, &u
);
3444 dprintf("imsm: Cannot prepare update\n");
3447 /* update metadata locally */
3448 imsm_update_metadata_locally(st
, u
, len
);
3449 /* and possibly remotely */
3450 if (st
->update_tail
) {
3451 append_metadata_update(st
, u
, len
);
3452 /* during reshape we do all work inside metadata handler
3453 * manage_reshape(), so metadata update has to be triggered
3456 flush_metadata_updates(st
);
3457 st
->update_tail
= &st
->updates
;
3466 /* spare/missing disks activations are not allowe when
3467 * array/container performs reshape operation, because
3468 * all arrays in container works on the same disks set
3470 int imsm_reshape_blocks_arrays_changes(struct intel_super
*super
)
3473 struct intel_dev
*i_dev
;
3474 struct imsm_dev
*dev
;
3476 /* check whole container
3478 for (i_dev
= super
->devlist
; i_dev
; i_dev
= i_dev
->next
) {
3480 if (is_gen_migration(dev
)) {
3481 /* No repair during any migration in container
3489 static unsigned long long imsm_component_size_alignment_check(int level
,
3491 unsigned int sector_size
,
3492 unsigned long long component_size
)
3494 unsigned int component_size_alignment
;
3496 /* check component size alignment
3498 component_size_alignment
= component_size
% (chunk_size
/sector_size
);
3500 dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alignment = %u\n",
3501 level
, chunk_size
, component_size
,
3502 component_size_alignment
);
3504 if (component_size_alignment
&& (level
!= 1) && (level
!= UnSet
)) {
3505 dprintf("imsm: reported component size aligned from %llu ",
3507 component_size
-= component_size_alignment
;
3508 dprintf_cont("to %llu (%i).\n",
3509 component_size
, component_size_alignment
);
3512 return component_size
;
3515 /*******************************************************************************
3516 * Function: get_bitmap_header_sector
3517 * Description: Returns the sector where the bitmap header is placed.
3519 * st : supertype information
3520 * dev_idx : index of the device with bitmap
3523 * The sector where the bitmap header is placed
3524 ******************************************************************************/
3525 static unsigned long long get_bitmap_header_sector(struct intel_super
*super
,
3528 struct imsm_dev
*dev
= get_imsm_dev(super
, dev_idx
);
3529 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
3531 if (!super
->sector_size
) {
3532 dprintf("sector size is not set\n");
3536 return pba_of_lba0(map
) + calc_component_size(map
, dev
) +
3537 (IMSM_BITMAP_HEADER_OFFSET
/ super
->sector_size
);
3540 /*******************************************************************************
3541 * Function: get_bitmap_sector
3542 * Description: Returns the sector where the bitmap is placed.
3544 * st : supertype information
3545 * dev_idx : index of the device with bitmap
3548 * The sector where the bitmap is placed
3549 ******************************************************************************/
3550 static unsigned long long get_bitmap_sector(struct intel_super
*super
,
3553 if (!super
->sector_size
) {
3554 dprintf("sector size is not set\n");
3558 return get_bitmap_header_sector(super
, dev_idx
) +
3559 (IMSM_BITMAP_HEADER_SIZE
/ super
->sector_size
);
3562 static unsigned long long get_ppl_sector(struct intel_super
*super
, int dev_idx
)
3564 struct imsm_dev
*dev
= get_imsm_dev(super
, dev_idx
);
3565 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
3567 return pba_of_lba0(map
) +
3568 (num_data_stripes(map
) * map
->blocks_per_strip
);
3571 static void getinfo_super_imsm_volume(struct supertype
*st
, struct mdinfo
*info
, char *dmap
)
3573 struct intel_super
*super
= st
->sb
;
3574 struct migr_record
*migr_rec
= super
->migr_rec
;
3575 struct imsm_dev
*dev
= get_imsm_dev(super
, super
->current_vol
);
3576 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
3577 struct imsm_map
*prev_map
= get_imsm_map(dev
, MAP_1
);
3578 struct imsm_map
*map_to_analyse
= map
;
3580 int map_disks
= info
->array
.raid_disks
;
3582 memset(info
, 0, sizeof(*info
));
3584 map_to_analyse
= prev_map
;
3586 dl
= super
->current_disk
;
3588 info
->container_member
= super
->current_vol
;
3589 info
->array
.raid_disks
= map
->num_members
;
3590 info
->array
.level
= get_imsm_raid_level(map_to_analyse
);
3591 info
->array
.layout
= imsm_level_to_layout(info
->array
.level
);
3592 info
->array
.md_minor
= -1;
3593 info
->array
.ctime
= 0;
3594 info
->array
.utime
= 0;
3595 info
->array
.chunk_size
=
3596 __le16_to_cpu(map_to_analyse
->blocks_per_strip
) << 9;
3597 info
->array
.state
= !(dev
->vol
.dirty
& RAIDVOL_DIRTY
);
3598 info
->custom_array_size
= imsm_dev_size(dev
);
3599 info
->recovery_blocked
= imsm_reshape_blocks_arrays_changes(st
->sb
);
3601 if (is_gen_migration(dev
)) {
3603 * device prev_map should be added if it is in the middle
3608 info
->reshape_active
= 1;
3609 info
->new_level
= get_imsm_raid_level(map
);
3610 info
->new_layout
= imsm_level_to_layout(info
->new_level
);
3611 info
->new_chunk
= __le16_to_cpu(map
->blocks_per_strip
) << 9;
3612 info
->delta_disks
= map
->num_members
- prev_map
->num_members
;
3613 if (info
->delta_disks
) {
3614 /* this needs to be applied to every array
3617 info
->reshape_active
= CONTAINER_RESHAPE
;
3619 /* We shape information that we give to md might have to be
3620 * modify to cope with md's requirement for reshaping arrays.
3621 * For example, when reshaping a RAID0, md requires it to be
3622 * presented as a degraded RAID4.
3623 * Also if a RAID0 is migrating to a RAID5 we need to specify
3624 * the array as already being RAID5, but the 'before' layout
3625 * is a RAID4-like layout.
3627 switch (info
->array
.level
) {
3629 switch(info
->new_level
) {
3631 /* conversion is happening as RAID4 */
3632 info
->array
.level
= 4;
3633 info
->array
.raid_disks
+= 1;
3636 /* conversion is happening as RAID5 */
3637 info
->array
.level
= 5;
3638 info
->array
.layout
= ALGORITHM_PARITY_N
;
3639 info
->delta_disks
-= 1;
3642 /* FIXME error message */
3643 info
->array
.level
= UnSet
;
3649 info
->new_level
= UnSet
;
3650 info
->new_layout
= UnSet
;
3651 info
->new_chunk
= info
->array
.chunk_size
;
3652 info
->delta_disks
= 0;
3656 info
->disk
.major
= dl
->major
;
3657 info
->disk
.minor
= dl
->minor
;
3658 info
->disk
.number
= dl
->index
;
3659 info
->disk
.raid_disk
= get_imsm_disk_slot(map_to_analyse
,
3663 info
->data_offset
= pba_of_lba0(map_to_analyse
);
3664 info
->component_size
= calc_component_size(map
, dev
);
3665 info
->component_size
= imsm_component_size_alignment_check(
3667 info
->array
.chunk_size
,
3669 info
->component_size
);
3670 info
->bb
.supported
= 1;
3672 memset(info
->uuid
, 0, sizeof(info
->uuid
));
3673 info
->recovery_start
= MaxSector
;
3675 if (info
->array
.level
== 5 &&
3676 (dev
->rwh_policy
== RWH_DISTRIBUTED
||
3677 dev
->rwh_policy
== RWH_MULTIPLE_DISTRIBUTED
)) {
3678 info
->consistency_policy
= CONSISTENCY_POLICY_PPL
;
3679 info
->ppl_sector
= get_ppl_sector(super
, super
->current_vol
);
3680 if (dev
->rwh_policy
== RWH_MULTIPLE_DISTRIBUTED
)
3681 info
->ppl_size
= MULTIPLE_PPL_AREA_SIZE_IMSM
>> 9;
3683 info
->ppl_size
= (PPL_HEADER_SIZE
+ PPL_ENTRY_SPACE
)
3685 } else if (info
->array
.level
<= 0) {
3686 info
->consistency_policy
= CONSISTENCY_POLICY_NONE
;
3688 if (dev
->rwh_policy
== RWH_BITMAP
) {
3689 info
->bitmap_offset
= get_bitmap_sector(super
, super
->current_vol
);
3690 info
->consistency_policy
= CONSISTENCY_POLICY_BITMAP
;
3692 info
->consistency_policy
= CONSISTENCY_POLICY_RESYNC
;
3696 info
->reshape_progress
= 0;
3697 info
->resync_start
= MaxSector
;
3698 if ((map_to_analyse
->map_state
== IMSM_T_STATE_UNINITIALIZED
||
3699 !(info
->array
.state
& 1)) &&
3700 imsm_reshape_blocks_arrays_changes(super
) == 0) {
3701 info
->resync_start
= 0;
3703 if (dev
->vol
.migr_state
) {
3704 switch (migr_type(dev
)) {
3707 __u64 blocks_per_unit
= blocks_per_migr_unit(super
,
3709 __u64 units
= vol_curr_migr_unit(dev
);
3711 info
->resync_start
= blocks_per_unit
* units
;
3714 case MIGR_GEN_MIGR
: {
3715 __u64 blocks_per_unit
= blocks_per_migr_unit(super
,
3717 __u64 units
= current_migr_unit(migr_rec
);
3720 if (__le32_to_cpu(migr_rec
->ascending_migr
) &&
3722 (get_num_migr_units(migr_rec
)-1)) &&
3723 (super
->migr_rec
->rec_status
==
3724 __cpu_to_le32(UNIT_SRC_IN_CP_AREA
)))
3727 info
->reshape_progress
= blocks_per_unit
* units
;
3729 dprintf("IMSM: General Migration checkpoint : %llu (%llu) -> read reshape progress : %llu\n",
3730 (unsigned long long)units
,
3731 (unsigned long long)blocks_per_unit
,
3732 info
->reshape_progress
);
3734 used_disks
= imsm_num_data_members(prev_map
);
3735 if (used_disks
> 0) {
3736 info
->custom_array_size
= per_dev_array_size(map
) *
3741 /* we could emulate the checkpointing of
3742 * 'sync_action=check' migrations, but for now
3743 * we just immediately complete them
3746 /* this is handled by container_content_imsm() */
3747 case MIGR_STATE_CHANGE
:
3748 /* FIXME handle other migrations */
3750 /* we are not dirty, so... */
3751 info
->resync_start
= MaxSector
;
3755 strncpy(info
->name
, (char *) dev
->volume
, MAX_RAID_SERIAL_LEN
);
3756 info
->name
[MAX_RAID_SERIAL_LEN
] = 0;
3758 info
->array
.major_version
= -1;
3759 info
->array
.minor_version
= -2;
3760 sprintf(info
->text_version
, "/%s/%d", st
->container_devnm
, info
->container_member
);
3761 info
->safe_mode_delay
= 4000; /* 4 secs like the Matrix driver */
3762 uuid_from_super_imsm(st
, info
->uuid
);
3766 for (i
=0; i
<map_disks
; i
++) {
3768 if (i
< info
->array
.raid_disks
) {
3769 struct imsm_disk
*dsk
;
3770 j
= get_imsm_disk_idx(dev
, i
, MAP_X
);
3771 dsk
= get_imsm_disk(super
, j
);
3772 if (dsk
&& (dsk
->status
& CONFIGURED_DISK
))
3779 static __u8
imsm_check_degraded(struct intel_super
*super
, struct imsm_dev
*dev
,
3780 int failed
, int look_in_map
);
3782 static int imsm_count_failed(struct intel_super
*super
, struct imsm_dev
*dev
,
3785 static void manage_second_map(struct intel_super
*super
, struct imsm_dev
*dev
)
3787 if (is_gen_migration(dev
)) {
3790 struct imsm_map
*map2
= get_imsm_map(dev
, MAP_1
);
3792 failed
= imsm_count_failed(super
, dev
, MAP_1
);
3793 map_state
= imsm_check_degraded(super
, dev
, failed
, MAP_1
);
3794 if (map2
->map_state
!= map_state
) {
3795 map2
->map_state
= map_state
;
3796 super
->updates_pending
++;
3801 static struct imsm_disk
*get_imsm_missing(struct intel_super
*super
, __u8 index
)
3805 for (d
= super
->missing
; d
; d
= d
->next
)
3806 if (d
->index
== index
)
3811 static void getinfo_super_imsm(struct supertype
*st
, struct mdinfo
*info
, char *map
)
3813 struct intel_super
*super
= st
->sb
;
3814 struct imsm_disk
*disk
;
3815 int map_disks
= info
->array
.raid_disks
;
3816 int max_enough
= -1;
3818 struct imsm_super
*mpb
;
3820 if (super
->current_vol
>= 0) {
3821 getinfo_super_imsm_volume(st
, info
, map
);
3824 memset(info
, 0, sizeof(*info
));
3826 /* Set raid_disks to zero so that Assemble will always pull in valid
3829 info
->array
.raid_disks
= 0;
3830 info
->array
.level
= LEVEL_CONTAINER
;
3831 info
->array
.layout
= 0;
3832 info
->array
.md_minor
= -1;
3833 info
->array
.ctime
= 0; /* N/A for imsm */
3834 info
->array
.utime
= 0;
3835 info
->array
.chunk_size
= 0;
3837 info
->disk
.major
= 0;
3838 info
->disk
.minor
= 0;
3839 info
->disk
.raid_disk
= -1;
3840 info
->reshape_active
= 0;
3841 info
->array
.major_version
= -1;
3842 info
->array
.minor_version
= -2;
3843 strcpy(info
->text_version
, "imsm");
3844 info
->safe_mode_delay
= 0;
3845 info
->disk
.number
= -1;
3846 info
->disk
.state
= 0;
3848 info
->recovery_start
= MaxSector
;
3849 info
->recovery_blocked
= imsm_reshape_blocks_arrays_changes(st
->sb
);
3850 info
->bb
.supported
= 1;
3852 /* do we have the all the insync disks that we expect? */
3853 mpb
= super
->anchor
;
3854 info
->events
= __le32_to_cpu(mpb
->generation_num
);
3856 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
3857 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
3858 int failed
, enough
, j
, missing
= 0;
3859 struct imsm_map
*map
;
3862 failed
= imsm_count_failed(super
, dev
, MAP_0
);
3863 state
= imsm_check_degraded(super
, dev
, failed
, MAP_0
);
3864 map
= get_imsm_map(dev
, MAP_0
);
3866 /* any newly missing disks?
3867 * (catches single-degraded vs double-degraded)
3869 for (j
= 0; j
< map
->num_members
; j
++) {
3870 __u32 ord
= get_imsm_ord_tbl_ent(dev
, j
, MAP_0
);
3871 __u32 idx
= ord_to_idx(ord
);
3873 if (super
->disks
&& super
->disks
->index
== (int)idx
)
3874 info
->disk
.raid_disk
= j
;
3876 if (!(ord
& IMSM_ORD_REBUILD
) &&
3877 get_imsm_missing(super
, idx
)) {
3883 if (state
== IMSM_T_STATE_FAILED
)
3885 else if (state
== IMSM_T_STATE_DEGRADED
&&
3886 (state
!= map
->map_state
|| missing
))
3888 else /* we're normal, or already degraded */
3890 if (is_gen_migration(dev
) && missing
) {
3891 /* during general migration we need all disks
3892 * that process is running on.
3893 * No new missing disk is allowed.
3897 /* no more checks necessary
3901 /* in the missing/failed disk case check to see
3902 * if at least one array is runnable
3904 max_enough
= max(max_enough
, enough
);
3907 info
->container_enough
= max_enough
;
3910 __u32 reserved
= imsm_reserved_sectors(super
, super
->disks
);
3912 disk
= &super
->disks
->disk
;
3913 info
->data_offset
= total_blocks(&super
->disks
->disk
) - reserved
;
3914 info
->component_size
= reserved
;
3915 info
->disk
.state
= is_configured(disk
) ? (1 << MD_DISK_ACTIVE
) : 0;
3916 /* we don't change info->disk.raid_disk here because
3917 * this state will be finalized in mdmon after we have
3918 * found the 'most fresh' version of the metadata
3920 info
->disk
.state
|= is_failed(disk
) ? (1 << MD_DISK_FAULTY
) : 0;
3921 info
->disk
.state
|= (is_spare(disk
) || is_journal(disk
)) ?
3922 0 : (1 << MD_DISK_SYNC
);
3925 /* only call uuid_from_super_imsm when this disk is part of a populated container,
3926 * ->compare_super may have updated the 'num_raid_devs' field for spares
3928 if (info
->disk
.state
& (1 << MD_DISK_SYNC
) || super
->anchor
->num_raid_devs
)
3929 uuid_from_super_imsm(st
, info
->uuid
);
3931 memcpy(info
->uuid
, uuid_zero
, sizeof(uuid_zero
));
3933 /* I don't know how to compute 'map' on imsm, so use safe default */
3936 for (i
= 0; i
< map_disks
; i
++)
3942 /* allocates memory and fills disk in mdinfo structure
3943 * for each disk in array */
3944 struct mdinfo
*getinfo_super_disks_imsm(struct supertype
*st
)
3946 struct mdinfo
*mddev
;
3947 struct intel_super
*super
= st
->sb
;
3948 struct imsm_disk
*disk
;
3951 if (!super
|| !super
->disks
)
3954 mddev
= xcalloc(1, sizeof(*mddev
));
3958 tmp
= xcalloc(1, sizeof(*tmp
));
3960 tmp
->next
= mddev
->devs
;
3962 tmp
->disk
.number
= count
++;
3963 tmp
->disk
.major
= dl
->major
;
3964 tmp
->disk
.minor
= dl
->minor
;
3965 tmp
->disk
.state
= is_configured(disk
) ?
3966 (1 << MD_DISK_ACTIVE
) : 0;
3967 tmp
->disk
.state
|= is_failed(disk
) ? (1 << MD_DISK_FAULTY
) : 0;
3968 tmp
->disk
.state
|= is_spare(disk
) ? 0 : (1 << MD_DISK_SYNC
);
3969 tmp
->disk
.raid_disk
= -1;
3975 static int update_super_imsm(struct supertype
*st
, struct mdinfo
*info
,
3976 enum update_opt update
, char *devname
,
3977 int verbose
, int uuid_set
, char *homehost
)
3979 /* For 'assemble' and 'force' we need to return non-zero if any
3980 * change was made. For others, the return value is ignored.
3981 * Update options are:
3982 * force-one : This device looks a bit old but needs to be included,
3983 * update age info appropriately.
3984 * assemble: clear any 'faulty' flag to allow this device to
3986 * force-array: Array is degraded but being forced, mark it clean
3987 * if that will be needed to assemble it.
3989 * newdev: not used ????
3990 * grow: Array has gained a new device - this is currently for
3992 * resync: mark as dirty so a resync will happen.
3993 * name: update the name - preserving the homehost
3994 * uuid: Change the uuid of the array to match watch is given
3996 * Following are not relevant for this imsm:
3997 * sparc2.2 : update from old dodgey metadata
3998 * super-minor: change the preferred_minor number
3999 * summaries: update redundant counters.
4000 * homehost: update the recorded homehost
4001 * _reshape_progress: record new reshape_progress position.
4004 struct intel_super
*super
= st
->sb
;
4005 struct imsm_super
*mpb
;
4007 /* we can only update container info */
4008 if (!super
|| super
->current_vol
>= 0 || !super
->anchor
)
4011 mpb
= super
->anchor
;
4015 /* We take this to mean that the family_num should be updated.
4016 * However that is much smaller than the uuid so we cannot really
4017 * allow an explicit uuid to be given. And it is hard to reliably
4019 * So if !uuid_set we know the current uuid is random and just used
4020 * the first 'int' and copy it to the other 3 positions.
4021 * Otherwise we require the 4 'int's to be the same as would be the
4022 * case if we are using a random uuid. So an explicit uuid will be
4023 * accepted as long as all for ints are the same... which shouldn't hurt
4026 info
->uuid
[1] = info
->uuid
[2] = info
->uuid
[3] = info
->uuid
[0];
4029 if (info
->uuid
[0] != info
->uuid
[1] ||
4030 info
->uuid
[1] != info
->uuid
[2] ||
4031 info
->uuid
[2] != info
->uuid
[3])
4037 mpb
->orig_family_num
= info
->uuid
[0];
4039 case UOPT_SPEC_ASSEMBLE
:
4047 /* successful update? recompute checksum */
4049 mpb
->check_sum
= __le32_to_cpu(__gen_imsm_checksum(mpb
));
4054 static size_t disks_to_mpb_size(int disks
)
4058 size
= sizeof(struct imsm_super
);
4059 size
+= (disks
- 1) * sizeof(struct imsm_disk
);
4060 size
+= 2 * sizeof(struct imsm_dev
);
4061 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
4062 size
+= (4 - 2) * sizeof(struct imsm_map
);
4063 /* 4 possible disk_ord_tbl's */
4064 size
+= 4 * (disks
- 1) * sizeof(__u32
);
4065 /* maximum bbm log */
4066 size
+= sizeof(struct bbm_log
);
4071 static __u64
avail_size_imsm(struct supertype
*st
, __u64 devsize
,
4072 unsigned long long data_offset
)
4074 if (devsize
< (MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
))
4077 return devsize
- (MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
);
4080 static void free_devlist(struct intel_super
*super
)
4082 struct intel_dev
*dv
;
4084 while (super
->devlist
) {
4085 dv
= super
->devlist
->next
;
4086 free(super
->devlist
->dev
);
4087 free(super
->devlist
);
4088 super
->devlist
= dv
;
4092 static void imsm_copy_dev(struct imsm_dev
*dest
, struct imsm_dev
*src
)
4094 memcpy(dest
, src
, sizeof_imsm_dev(src
, 0));
4097 static int compare_super_imsm(struct supertype
*st
, struct supertype
*tst
,
4101 * 0 same, or first was empty, and second was copied
4102 * 1 sb are different
4104 struct intel_super
*first
= st
->sb
;
4105 struct intel_super
*sec
= tst
->sb
;
4113 /* in platform dependent environment test if the disks
4114 * use the same Intel hba
4115 * if not on Intel hba at all, allow anything.
4116 * doesn't check HBAs if num_raid_devs is not set, as it means
4117 * it is a free floating spare, and all spares regardless of HBA type
4118 * will fall into separate container during the assembly
4120 if (first
->hba
&& sec
->hba
&& first
->anchor
->num_raid_devs
!= 0) {
4121 if (first
->hba
->type
!= sec
->hba
->type
) {
4123 pr_err("HBAs of devices do not match %s != %s\n",
4124 get_sys_dev_type(first
->hba
->type
),
4125 get_sys_dev_type(sec
->hba
->type
));
4128 if (first
->orom
!= sec
->orom
) {
4130 pr_err("HBAs of devices do not match %s != %s\n",
4131 first
->hba
->pci_id
, sec
->hba
->pci_id
);
4136 if (first
->anchor
->num_raid_devs
> 0 &&
4137 sec
->anchor
->num_raid_devs
> 0) {
4138 /* Determine if these disks might ever have been
4139 * related. Further disambiguation can only take place
4140 * in load_super_imsm_all
4142 __u32 first_family
= first
->anchor
->orig_family_num
;
4143 __u32 sec_family
= sec
->anchor
->orig_family_num
;
4145 if (memcmp(first
->anchor
->sig
, sec
->anchor
->sig
,
4146 MAX_SIGNATURE_LENGTH
) != 0)
4149 if (first_family
== 0)
4150 first_family
= first
->anchor
->family_num
;
4151 if (sec_family
== 0)
4152 sec_family
= sec
->anchor
->family_num
;
4154 if (first_family
!= sec_family
)
4159 /* if an anchor does not have num_raid_devs set then it is a free
4160 * floating spare. don't assosiate spare with any array, as during assembly
4161 * spares shall fall into separate container, from which they can be moved
4164 if (first
->anchor
->num_raid_devs
^ sec
->anchor
->num_raid_devs
)
4170 static void fd2devname(int fd
, char *name
)
4178 snprintf(name
, MAX_RAID_SERIAL_LEN
, "/dev/%s", nm
);
4181 static int nvme_get_serial(int fd
, void *buf
, size_t buf_len
)
4183 char path
[PATH_MAX
];
4184 char *name
= fd2kname(fd
);
4189 if (strncmp(name
, "nvme", 4) != 0)
4192 if (!diskfd_to_devpath(fd
, 1, path
))
4195 return devpath_to_char(path
, "serial", buf
, buf_len
, 0);
4198 mdadm_status_t
scsi_get_serial(int fd
, void *buf
, size_t buf_len
)
4200 struct sg_io_hdr io_hdr
= {0};
4201 unsigned char rsp_buf
[255];
4202 unsigned char inq_cmd
[] = {INQUIRY
, 1, 0x80, 0, sizeof(rsp_buf
), 0};
4203 unsigned char sense
[32];
4204 unsigned int rsp_len
;
4207 io_hdr
.interface_id
= 'S';
4208 io_hdr
.cmdp
= inq_cmd
;
4209 io_hdr
.cmd_len
= sizeof(inq_cmd
);
4210 io_hdr
.dxferp
= rsp_buf
;
4211 io_hdr
.dxfer_len
= sizeof(rsp_buf
);
4212 io_hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
4214 io_hdr
.mx_sb_len
= sizeof(sense
);
4215 io_hdr
.timeout
= 5000;
4217 rv
= ioctl(fd
, SG_IO
, &io_hdr
);
4220 return MDADM_STATUS_ERROR
;
4222 if ((io_hdr
.info
& SG_INFO_OK_MASK
) != SG_INFO_OK
)
4223 return MDADM_STATUS_ERROR
;
4225 rsp_len
= rsp_buf
[3];
4227 if (!rsp_len
|| buf_len
< rsp_len
)
4228 return MDADM_STATUS_ERROR
;
4230 memcpy(buf
, &rsp_buf
[4], rsp_len
);
4232 return MDADM_STATUS_SUCCESS
;
4236 static int imsm_read_serial(int fd
, char *devname
,
4237 __u8
*serial
, size_t serial_buf_len
)
4246 memset(buf
, 0, sizeof(buf
));
4248 if (check_env("IMSM_DEVNAME_AS_SERIAL")) {
4249 memset(serial
, 0, serial_buf_len
);
4250 fd2devname(fd
, (char *) serial
);
4254 rv
= nvme_get_serial(fd
, buf
, sizeof(buf
));
4257 rv
= scsi_get_serial(fd
, buf
, sizeof(buf
));
4261 pr_err("Failed to retrieve serial for %s\n",
4266 /* trim all whitespace and non-printable characters and convert
4269 for (i
= 0, dest
= buf
; i
< sizeof(buf
) && buf
[i
]; i
++) {
4272 /* ':' is reserved for use in placeholder serial
4273 * numbers for missing disks
4284 if (len
> serial_buf_len
) {
4285 /* truncate leading characters */
4286 dest
+= len
- serial_buf_len
;
4287 len
= serial_buf_len
;
4290 memset(serial
, 0, serial_buf_len
);
4291 memcpy(serial
, dest
, len
);
4296 static int serialcmp(__u8
*s1
, __u8
*s2
)
4298 return strncmp((char *) s1
, (char *) s2
, MAX_RAID_SERIAL_LEN
);
4301 static void serialcpy(__u8
*dest
, __u8
*src
)
4303 strncpy((char *) dest
, (char *) src
, MAX_RAID_SERIAL_LEN
);
4306 static struct dl
*serial_to_dl(__u8
*serial
, struct intel_super
*super
)
4310 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
4311 if (serialcmp(dl
->serial
, serial
) == 0)
4317 static struct imsm_disk
*
4318 __serial_to_disk(__u8
*serial
, struct imsm_super
*mpb
, int *idx
)
4322 for (i
= 0; i
< mpb
->num_disks
; i
++) {
4323 struct imsm_disk
*disk
= __get_imsm_disk(mpb
, i
);
4325 if (serialcmp(disk
->serial
, serial
) == 0) {
4336 load_imsm_disk(int fd
, struct intel_super
*super
, char *devname
, int keep_fd
)
4338 struct imsm_disk
*disk
;
4343 __u8 serial
[MAX_RAID_SERIAL_LEN
];
4345 rv
= imsm_read_serial(fd
, devname
, serial
, MAX_RAID_SERIAL_LEN
);
4350 dl
= xcalloc(1, sizeof(*dl
));
4352 if (fstat(fd
, &stb
) != 0) {
4356 dl
->major
= major(stb
.st_rdev
);
4357 dl
->minor
= minor(stb
.st_rdev
);
4358 dl
->next
= super
->disks
;
4359 dl
->fd
= keep_fd
? fd
: -1;
4360 assert(super
->disks
== NULL
);
4362 serialcpy(dl
->serial
, serial
);
4365 fd2devname(fd
, name
);
4367 dl
->devname
= xstrdup(devname
);
4369 dl
->devname
= xstrdup(name
);
4371 /* look up this disk's index in the current anchor */
4372 disk
= __serial_to_disk(dl
->serial
, super
->anchor
, &dl
->index
);
4375 /* only set index on disks that are a member of a
4376 * populated contianer, i.e. one with raid_devs
4378 if (is_failed(&dl
->disk
))
4380 else if (is_spare(&dl
->disk
) || is_journal(&dl
->disk
))
4387 /* When migrating map0 contains the 'destination' state while map1
4388 * contains the current state. When not migrating map0 contains the
4389 * current state. This routine assumes that map[0].map_state is set to
4390 * the current array state before being called.
4392 * Migration is indicated by one of the following states
4393 * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed)
4394 * 2/ Initialize (migr_state=1 migr_type=MIGR_INIT map0state=normal
4395 * map1state=unitialized)
4396 * 3/ Repair (Resync) (migr_state=1 migr_type=MIGR_REPAIR map0state=normal
4398 * 4/ Rebuild (migr_state=1 migr_type=MIGR_REBUILD map0state=normal
4399 * map1state=degraded)
4400 * 5/ Migration (mig_state=1 migr_type=MIGR_GEN_MIGR map0state=normal
4403 static void migrate(struct imsm_dev
*dev
, struct intel_super
*super
,
4404 __u8 to_state
, int migr_type
)
4406 struct imsm_map
*dest
;
4407 struct imsm_map
*src
= get_imsm_map(dev
, MAP_0
);
4409 dev
->vol
.migr_state
= MIGR_STATE_MIGRATING
;
4410 set_migr_type(dev
, migr_type
);
4411 set_vol_curr_migr_unit(dev
, 0);
4412 dest
= get_imsm_map(dev
, MAP_1
);
4414 /* duplicate and then set the target end state in map[0] */
4415 memcpy(dest
, src
, sizeof_imsm_map(src
));
4416 if (migr_type
== MIGR_GEN_MIGR
) {
4420 for (i
= 0; i
< src
->num_members
; i
++) {
4421 ord
= __le32_to_cpu(src
->disk_ord_tbl
[i
]);
4422 set_imsm_ord_tbl_ent(src
, i
, ord_to_idx(ord
));
4426 if (migr_type
== MIGR_GEN_MIGR
)
4427 /* Clear migration record */
4428 memset(super
->migr_rec
, 0, sizeof(struct migr_record
));
4430 src
->map_state
= to_state
;
4433 static void end_migration(struct imsm_dev
*dev
, struct intel_super
*super
,
4436 /* To avoid compilation error, saying dev can't be NULL when
4437 * migr_state is assigned.
4442 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
4443 struct imsm_map
*prev
= get_imsm_map(dev
, dev
->vol
.migr_state
== MIGR_STATE_NORMAL
?
4447 /* merge any IMSM_ORD_REBUILD bits that were not successfully
4448 * completed in the last migration.
4450 * FIXME add support for raid-level-migration
4452 if (map_state
!= map
->map_state
&& (is_gen_migration(dev
) == false) &&
4453 prev
->map_state
!= IMSM_T_STATE_UNINITIALIZED
) {
4454 /* when final map state is other than expected
4455 * merge maps (not for migration)
4459 for (i
= 0; i
< prev
->num_members
; i
++)
4460 for (j
= 0; j
< map
->num_members
; j
++)
4461 /* during online capacity expansion
4462 * disks position can be changed
4463 * if takeover is used
4465 if (ord_to_idx(map
->disk_ord_tbl
[j
]) ==
4466 ord_to_idx(prev
->disk_ord_tbl
[i
])) {
4467 map
->disk_ord_tbl
[j
] |=
4468 prev
->disk_ord_tbl
[i
];
4471 failed
= imsm_count_failed(super
, dev
, MAP_0
);
4472 map_state
= imsm_check_degraded(super
, dev
, failed
, MAP_0
);
4475 dev
->vol
.migr_state
= MIGR_STATE_NORMAL
;
4476 set_migr_type(dev
, 0);
4477 set_vol_curr_migr_unit(dev
, 0);
4478 map
->map_state
= map_state
;
4481 static int parse_raid_devices(struct intel_super
*super
)
4484 struct imsm_dev
*dev_new
;
4485 size_t len
, len_migr
;
4487 size_t space_needed
= 0;
4488 struct imsm_super
*mpb
= super
->anchor
;
4490 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
4491 struct imsm_dev
*dev_iter
= __get_imsm_dev(super
->anchor
, i
);
4492 struct intel_dev
*dv
;
4494 len
= sizeof_imsm_dev(dev_iter
, 0);
4495 len_migr
= sizeof_imsm_dev(dev_iter
, 1);
4497 space_needed
+= len_migr
- len
;
4499 dv
= xmalloc(sizeof(*dv
));
4500 if (max_len
< len_migr
)
4502 if (max_len
> len_migr
)
4503 space_needed
+= max_len
- len_migr
;
4504 dev_new
= xmalloc(max_len
);
4505 imsm_copy_dev(dev_new
, dev_iter
);
4508 dv
->next
= super
->devlist
;
4509 super
->devlist
= dv
;
4512 /* ensure that super->buf is large enough when all raid devices
4515 if (__le32_to_cpu(mpb
->mpb_size
) + space_needed
> super
->len
) {
4518 len
= ROUND_UP(__le32_to_cpu(mpb
->mpb_size
) + space_needed
,
4519 super
->sector_size
);
4520 if (posix_memalign(&buf
, MAX_SECTOR_SIZE
, len
) != 0)
4523 memcpy(buf
, super
->buf
, super
->len
);
4524 memset(buf
+ super
->len
, 0, len
- super
->len
);
4530 super
->extra_space
+= space_needed
;
4535 /*******************************************************************************
4536 * Function: check_mpb_migr_compatibility
4537 * Description: Function checks for unsupported migration features:
4538 * - migration optimization area (pba_of_lba0)
4539 * - descending reshape (ascending_migr)
4541 * super : imsm metadata information
4543 * 0 : migration is compatible
4544 * -1 : migration is not compatible
4545 ******************************************************************************/
4546 int check_mpb_migr_compatibility(struct intel_super
*super
)
4548 struct imsm_map
*map0
, *map1
;
4549 struct migr_record
*migr_rec
= super
->migr_rec
;
4552 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
4553 struct imsm_dev
*dev_iter
= __get_imsm_dev(super
->anchor
, i
);
4555 if (dev_iter
->vol
.migr_state
== MIGR_STATE_MIGRATING
&&
4556 dev_iter
->vol
.migr_type
== MIGR_GEN_MIGR
) {
4557 /* This device is migrating */
4558 map0
= get_imsm_map(dev_iter
, MAP_0
);
4559 map1
= get_imsm_map(dev_iter
, MAP_1
);
4560 if (pba_of_lba0(map0
) != pba_of_lba0(map1
))
4561 /* migration optimization area was used */
4563 if (migr_rec
->ascending_migr
== 0 &&
4564 migr_rec
->dest_depth_per_unit
> 0)
4565 /* descending reshape not supported yet */
4572 static void __free_imsm(struct intel_super
*super
, int free_disks
);
4574 /* load_imsm_mpb - read matrix metadata
4575 * allocates super->mpb to be freed by free_imsm
4577 static int load_imsm_mpb(int fd
, struct intel_super
*super
, char *devname
)
4579 unsigned long long dsize
;
4580 unsigned long long sectors
;
4581 unsigned int sector_size
= super
->sector_size
;
4583 struct imsm_super
*anchor
;
4586 get_dev_size(fd
, NULL
, &dsize
);
4587 if (dsize
< 2*sector_size
) {
4589 pr_err("%s: device to small for imsm\n",
4594 if (lseek64(fd
, dsize
- (sector_size
* 2), SEEK_SET
) < 0) {
4596 pr_err("Cannot seek to anchor block on %s: %s\n",
4597 devname
, strerror(errno
));
4601 if (posix_memalign((void **)&anchor
, sector_size
, sector_size
) != 0) {
4603 pr_err("Failed to allocate imsm anchor buffer on %s\n", devname
);
4606 if ((unsigned int)read(fd
, anchor
, sector_size
) != sector_size
) {
4608 pr_err("Cannot read anchor block on %s: %s\n",
4609 devname
, strerror(errno
));
4614 if (strncmp((char *) anchor
->sig
, MPB_SIGNATURE
, MPB_SIG_LEN
) != 0) {
4616 pr_err("no IMSM anchor on %s\n", devname
);
4621 __free_imsm(super
, 0);
4622 /* reload capability and hba */
4624 /* capability and hba must be updated with new super allocation */
4625 find_intel_hba_capability(fd
, super
, devname
);
4626 super
->len
= ROUND_UP(anchor
->mpb_size
, sector_size
);
4627 if (posix_memalign(&super
->buf
, MAX_SECTOR_SIZE
, super
->len
) != 0) {
4629 pr_err("unable to allocate %zu byte mpb buffer\n",
4634 memcpy(super
->buf
, anchor
, sector_size
);
4636 sectors
= mpb_sectors(anchor
, sector_size
) - 1;
4639 if (posix_memalign(&super
->migr_rec_buf
, MAX_SECTOR_SIZE
,
4640 MIGR_REC_BUF_SECTORS
*MAX_SECTOR_SIZE
) != 0) {
4641 pr_err("could not allocate migr_rec buffer\n");
4646 super
->clean_migration_record_by_mdmon
= 0;
4649 check_sum
= __gen_imsm_checksum(super
->anchor
);
4650 if (check_sum
!= __le32_to_cpu(super
->anchor
->check_sum
)) {
4652 pr_err("IMSM checksum %x != %x on %s\n",
4654 __le32_to_cpu(super
->anchor
->check_sum
),
4662 /* read the extended mpb */
4663 if (lseek64(fd
, dsize
- (sector_size
* (2 + sectors
)), SEEK_SET
) < 0) {
4665 pr_err("Cannot seek to extended mpb on %s: %s\n",
4666 devname
, strerror(errno
));
4670 if ((unsigned int)read(fd
, super
->buf
+ sector_size
,
4671 super
->len
- sector_size
) != super
->len
- sector_size
) {
4673 pr_err("Cannot read extended mpb on %s: %s\n",
4674 devname
, strerror(errno
));
4678 check_sum
= __gen_imsm_checksum(super
->anchor
);
4679 if (check_sum
!= __le32_to_cpu(super
->anchor
->check_sum
)) {
4681 pr_err("IMSM checksum %x != %x on %s\n",
4682 check_sum
, __le32_to_cpu(super
->anchor
->check_sum
),
4690 static int read_imsm_migr_rec(int fd
, struct intel_super
*super
);
4692 /* clears hi bits in metadata if MPB_ATTRIB_2TB_DISK not set */
4693 static void clear_hi(struct intel_super
*super
)
4695 struct imsm_super
*mpb
= super
->anchor
;
4697 if (mpb
->attributes
& MPB_ATTRIB_2TB_DISK
)
4699 for (i
= 0; i
< mpb
->num_disks
; ++i
) {
4700 struct imsm_disk
*disk
= &mpb
->disk
[i
];
4701 disk
->total_blocks_hi
= 0;
4703 for (i
= 0; i
< mpb
->num_raid_devs
; ++i
) {
4704 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
4705 for (n
= 0; n
< 2; ++n
) {
4706 struct imsm_map
*map
= get_imsm_map(dev
, n
);
4709 map
->pba_of_lba0_hi
= 0;
4710 map
->blocks_per_member_hi
= 0;
4711 map
->num_data_stripes_hi
= 0;
4717 load_and_parse_mpb(int fd
, struct intel_super
*super
, char *devname
, int keep_fd
)
4721 err
= load_imsm_mpb(fd
, super
, devname
);
4724 if (super
->sector_size
== 4096)
4725 convert_from_4k(super
);
4726 err
= load_imsm_disk(fd
, super
, devname
, keep_fd
);
4729 err
= parse_raid_devices(super
);
4732 err
= load_bbm_log(super
);
4737 static void __free_imsm_disk(struct dl
*d
, int do_close
)
4749 static void free_imsm_disks(struct intel_super
*super
)
4753 while (super
->disks
) {
4755 super
->disks
= d
->next
;
4756 __free_imsm_disk(d
, 1);
4758 while (super
->disk_mgmt_list
) {
4759 d
= super
->disk_mgmt_list
;
4760 super
->disk_mgmt_list
= d
->next
;
4761 __free_imsm_disk(d
, 1);
4763 while (super
->missing
) {
4765 super
->missing
= d
->next
;
4766 __free_imsm_disk(d
, 1);
4771 /* free all the pieces hanging off of a super pointer */
4772 static void __free_imsm(struct intel_super
*super
, int free_disks
)
4774 struct intel_hba
*elem
, *next
;
4780 /* unlink capability description */
4782 if (super
->migr_rec_buf
) {
4783 free(super
->migr_rec_buf
);
4784 super
->migr_rec_buf
= NULL
;
4787 free_imsm_disks(super
);
4788 free_devlist(super
);
4792 free((void *)elem
->path
);
4798 free(super
->bbm_log
);
4802 static void free_imsm(struct intel_super
*super
)
4804 __free_imsm(super
, 1);
4805 free(super
->bb
.entries
);
4809 static void free_super_imsm(struct supertype
*st
)
4811 struct intel_super
*super
= st
->sb
;
4820 static struct intel_super
*alloc_super(void)
4822 struct intel_super
*super
= xcalloc(1, sizeof(*super
));
4824 super
->current_vol
= -1;
4825 super
->create_offset
= ~((unsigned long long) 0);
4827 super
->bb
.entries
= xmalloc(BBM_LOG_MAX_ENTRIES
*
4828 sizeof(struct md_bb_entry
));
4829 if (!super
->bb
.entries
) {
4838 * find and allocate hba and OROM/EFI based on valid fd of RAID component device
4840 static int find_intel_hba_capability(int fd
, struct intel_super
*super
, char *devname
)
4842 struct sys_dev
*hba_name
;
4845 if (is_fd_valid(fd
) && test_partition(fd
)) {
4846 pr_err("imsm: %s is a partition, cannot be used in IMSM\n",
4850 if (!is_fd_valid(fd
) || check_no_platform()) {
4855 hba_name
= find_disk_attached_hba(fd
, NULL
);
4858 pr_err("%s is not attached to Intel(R) RAID controller.\n",
4862 rv
= attach_hba_to_super(super
, hba_name
);
4865 struct intel_hba
*hba
= super
->hba
;
4867 pr_err("%s is attached to Intel(R) %s %s (%s),\n"
4868 " but the container is assigned to Intel(R) %s %s (",
4870 get_sys_dev_type(hba_name
->type
),
4871 hba_name
->type
== SYS_DEV_VMD
|| hba_name
->type
== SYS_DEV_SATA_VMD
?
4872 "domain" : "RAID controller",
4873 hba_name
->pci_id
? : "Err!",
4874 get_sys_dev_type(super
->hba
->type
),
4875 hba
->type
== SYS_DEV_VMD
|| hba_name
->type
== SYS_DEV_SATA_VMD
?
4876 "domain" : "RAID controller");
4879 fprintf(stderr
, "%s", hba
->pci_id
? : "Err!");
4881 fprintf(stderr
, ", ");
4884 fprintf(stderr
, ").\n"
4885 " Mixing devices attached to different controllers is not allowed.\n");
4889 super
->orom
= find_imsm_capability(hba_name
);
4896 /* find_missing - helper routine for load_super_imsm_all that identifies
4897 * disks that have disappeared from the system. This routine relies on
4898 * the mpb being uptodate, which it is at load time.
4900 static int find_missing(struct intel_super
*super
)
4903 struct imsm_super
*mpb
= super
->anchor
;
4905 struct imsm_disk
*disk
;
4907 for (i
= 0; i
< mpb
->num_disks
; i
++) {
4908 disk
= __get_imsm_disk(mpb
, i
);
4909 dl
= serial_to_dl(disk
->serial
, super
);
4913 dl
= xmalloc(sizeof(*dl
));
4917 dl
->devname
= xstrdup("missing");
4919 serialcpy(dl
->serial
, disk
->serial
);
4922 dl
->next
= super
->missing
;
4923 super
->missing
= dl
;
4929 static struct intel_disk
*disk_list_get(__u8
*serial
, struct intel_disk
*disk_list
)
4931 struct intel_disk
*idisk
= disk_list
;
4934 if (serialcmp(idisk
->disk
.serial
, serial
) == 0)
4936 idisk
= idisk
->next
;
4942 static int __prep_thunderdome(struct intel_super
**table
, int tbl_size
,
4943 struct intel_super
*super
,
4944 struct intel_disk
**disk_list
)
4946 struct imsm_disk
*d
= &super
->disks
->disk
;
4947 struct imsm_super
*mpb
= super
->anchor
;
4950 for (i
= 0; i
< tbl_size
; i
++) {
4951 struct imsm_super
*tbl_mpb
= table
[i
]->anchor
;
4952 struct imsm_disk
*tbl_d
= &table
[i
]->disks
->disk
;
4954 if (tbl_mpb
->family_num
== mpb
->family_num
) {
4955 if (tbl_mpb
->check_sum
== mpb
->check_sum
) {
4956 dprintf("mpb from %d:%d matches %d:%d\n",
4957 super
->disks
->major
,
4958 super
->disks
->minor
,
4959 table
[i
]->disks
->major
,
4960 table
[i
]->disks
->minor
);
4964 if (((is_configured(d
) && !is_configured(tbl_d
)) ||
4965 is_configured(d
) == is_configured(tbl_d
)) &&
4966 tbl_mpb
->generation_num
< mpb
->generation_num
) {
4967 /* current version of the mpb is a
4968 * better candidate than the one in
4969 * super_table, but copy over "cross
4970 * generational" status
4972 struct intel_disk
*idisk
;
4974 dprintf("mpb from %d:%d replaces %d:%d\n",
4975 super
->disks
->major
,
4976 super
->disks
->minor
,
4977 table
[i
]->disks
->major
,
4978 table
[i
]->disks
->minor
);
4980 idisk
= disk_list_get(tbl_d
->serial
, *disk_list
);
4981 if (idisk
&& is_failed(&idisk
->disk
))
4982 tbl_d
->status
|= FAILED_DISK
;
4985 struct intel_disk
*idisk
;
4986 struct imsm_disk
*disk
;
4988 /* tbl_mpb is more up to date, but copy
4989 * over cross generational status before
4992 disk
= __serial_to_disk(d
->serial
, mpb
, NULL
);
4993 if (disk
&& is_failed(disk
))
4994 d
->status
|= FAILED_DISK
;
4996 idisk
= disk_list_get(d
->serial
, *disk_list
);
4999 if (disk
&& is_configured(disk
))
5000 idisk
->disk
.status
|= CONFIGURED_DISK
;
5003 dprintf("mpb from %d:%d prefer %d:%d\n",
5004 super
->disks
->major
,
5005 super
->disks
->minor
,
5006 table
[i
]->disks
->major
,
5007 table
[i
]->disks
->minor
);
5015 table
[tbl_size
++] = super
;
5019 /* update/extend the merged list of imsm_disk records */
5020 for (j
= 0; j
< mpb
->num_disks
; j
++) {
5021 struct imsm_disk
*disk
= __get_imsm_disk(mpb
, j
);
5022 struct intel_disk
*idisk
;
5024 idisk
= disk_list_get(disk
->serial
, *disk_list
);
5026 idisk
->disk
.status
|= disk
->status
;
5027 if (is_configured(&idisk
->disk
) ||
5028 is_failed(&idisk
->disk
))
5029 idisk
->disk
.status
&= ~(SPARE_DISK
);
5031 idisk
= xcalloc(1, sizeof(*idisk
));
5032 idisk
->owner
= IMSM_UNKNOWN_OWNER
;
5033 idisk
->disk
= *disk
;
5034 idisk
->next
= *disk_list
;
5038 if (serialcmp(idisk
->disk
.serial
, d
->serial
) == 0)
5045 static struct intel_super
*
5046 validate_members(struct intel_super
*super
, struct intel_disk
*disk_list
,
5049 struct imsm_super
*mpb
= super
->anchor
;
5053 for (i
= 0; i
< mpb
->num_disks
; i
++) {
5054 struct imsm_disk
*disk
= __get_imsm_disk(mpb
, i
);
5055 struct intel_disk
*idisk
;
5057 idisk
= disk_list_get(disk
->serial
, disk_list
);
5059 if (idisk
->owner
== owner
||
5060 idisk
->owner
== IMSM_UNKNOWN_OWNER
)
5063 dprintf("'%.16s' owner %d != %d\n",
5064 disk
->serial
, idisk
->owner
,
5067 dprintf("unknown disk %x [%d]: %.16s\n",
5068 __le32_to_cpu(mpb
->family_num
), i
,
5074 if (ok_count
== mpb
->num_disks
)
5079 static void show_conflicts(__u32 family_num
, struct intel_super
*super_list
)
5081 struct intel_super
*s
;
5083 for (s
= super_list
; s
; s
= s
->next
) {
5084 if (family_num
!= s
->anchor
->family_num
)
5086 pr_err("Conflict, offlining family %#x on '%s'\n",
5087 __le32_to_cpu(family_num
), s
->disks
->devname
);
5091 static struct intel_super
*
5092 imsm_thunderdome(struct intel_super
**super_list
, int len
)
5094 struct intel_super
*super_table
[len
];
5095 struct intel_disk
*disk_list
= NULL
;
5096 struct intel_super
*champion
, *spare
;
5097 struct intel_super
*s
, **del
;
5102 memset(super_table
, 0, sizeof(super_table
));
5103 for (s
= *super_list
; s
; s
= s
->next
)
5104 tbl_size
= __prep_thunderdome(super_table
, tbl_size
, s
, &disk_list
);
5106 for (i
= 0; i
< tbl_size
; i
++) {
5107 struct imsm_disk
*d
;
5108 struct intel_disk
*idisk
;
5111 d
= &s
->disks
->disk
;
5113 /* 'd' must appear in merged disk list for its
5114 * configuration to be valid
5116 idisk
= disk_list_get(d
->serial
, disk_list
);
5117 if (idisk
&& idisk
->owner
== i
)
5118 s
= validate_members(s
, disk_list
, i
);
5123 dprintf("marking family: %#x from %d:%d offline\n",
5124 super_table
[i
]->anchor
->family_num
,
5125 super_table
[i
]->disks
->major
,
5126 super_table
[i
]->disks
->minor
);
5130 /* This is where the mdadm implementation differs from the Windows
5131 * driver which has no strict concept of a container. We can only
5132 * assemble one family from a container, so when returning a prodigal
5133 * array member to this system the code will not be able to disambiguate
5134 * the container contents that should be assembled ("foreign" versus
5135 * "local"). It requires user intervention to set the orig_family_num
5136 * to a new value to establish a new container. The Windows driver in
5137 * this situation fixes up the volume name in place and manages the
5138 * foreign array as an independent entity.
5143 for (i
= 0; i
< tbl_size
; i
++) {
5144 struct intel_super
*tbl_ent
= super_table
[i
];
5150 if (tbl_ent
->anchor
->num_raid_devs
== 0) {
5155 if (s
&& !is_spare
) {
5156 show_conflicts(tbl_ent
->anchor
->family_num
, *super_list
);
5158 } else if (!s
&& !is_spare
)
5171 pr_err("Chose family %#x on '%s', assemble conflicts to new container with '--update=uuid'\n",
5172 __le32_to_cpu(s
->anchor
->family_num
), s
->disks
->devname
);
5174 /* collect all dl's onto 'champion', and update them to
5175 * champion's version of the status
5177 for (s
= *super_list
; s
; s
= s
->next
) {
5178 struct imsm_super
*mpb
= champion
->anchor
;
5179 struct dl
*dl
= s
->disks
;
5184 mpb
->attributes
|= s
->anchor
->attributes
& MPB_ATTRIB_2TB_DISK
;
5186 for (i
= 0; i
< mpb
->num_disks
; i
++) {
5187 struct imsm_disk
*disk
;
5189 disk
= __serial_to_disk(dl
->serial
, mpb
, &dl
->index
);
5192 /* only set index on disks that are a member of
5193 * a populated contianer, i.e. one with
5196 if (is_failed(&dl
->disk
))
5198 else if (is_spare(&dl
->disk
))
5204 if (i
>= mpb
->num_disks
) {
5205 struct intel_disk
*idisk
;
5207 idisk
= disk_list_get(dl
->serial
, disk_list
);
5208 if (idisk
&& is_spare(&idisk
->disk
) &&
5209 !is_failed(&idisk
->disk
) && !is_configured(&idisk
->disk
))
5217 dl
->next
= champion
->disks
;
5218 champion
->disks
= dl
;
5222 /* delete 'champion' from super_list */
5223 for (del
= super_list
; *del
; ) {
5224 if (*del
== champion
) {
5225 *del
= (*del
)->next
;
5228 del
= &(*del
)->next
;
5230 champion
->next
= NULL
;
5234 struct intel_disk
*idisk
= disk_list
;
5236 disk_list
= disk_list
->next
;
5244 get_sra_super_block(int fd
, struct intel_super
**super_list
, char *devname
, int *max
, int keep_fd
);
5245 static int get_super_block(struct intel_super
**super_list
, char *devnm
, char *devname
,
5246 int major
, int minor
, int keep_fd
);
5248 get_devlist_super_block(struct md_list
*devlist
, struct intel_super
**super_list
,
5249 int *max
, int keep_fd
);
5251 static int load_super_imsm_all(struct supertype
*st
, int fd
, void **sbp
,
5252 char *devname
, struct md_list
*devlist
,
5255 struct intel_super
*super_list
= NULL
;
5256 struct intel_super
*super
= NULL
;
5260 if (is_fd_valid(fd
))
5261 /* 'fd' is an opened container */
5262 err
= get_sra_super_block(fd
, &super_list
, devname
, &i
, keep_fd
);
5264 /* get super block from devlist devices */
5265 err
= get_devlist_super_block(devlist
, &super_list
, &i
, keep_fd
);
5268 /* all mpbs enter, maybe one leaves */
5269 super
= imsm_thunderdome(&super_list
, i
);
5275 if (find_missing(super
) != 0) {
5281 /* load migration record */
5282 err
= load_imsm_migr_rec(super
);
5284 /* migration is in progress,
5285 * but migr_rec cannot be loaded,
5291 /* Check migration compatibility */
5292 if (err
== 0 && check_mpb_migr_compatibility(super
) != 0) {
5293 pr_err("Unsupported migration detected");
5295 fprintf(stderr
, " on %s\n", devname
);
5297 fprintf(stderr
, " (IMSM).\n");
5306 while (super_list
) {
5307 struct intel_super
*s
= super_list
;
5309 super_list
= super_list
->next
;
5317 if (is_fd_valid(fd
))
5318 strcpy(st
->container_devnm
, fd2devnm(fd
));
5320 st
->container_devnm
[0] = 0;
5321 if (err
== 0 && st
->ss
== NULL
) {
5322 st
->ss
= &super_imsm
;
5323 st
->minor_version
= 0;
5324 st
->max_devs
= IMSM_MAX_DEVICES
;
5330 get_devlist_super_block(struct md_list
*devlist
, struct intel_super
**super_list
,
5331 int *max
, int keep_fd
)
5333 struct md_list
*tmpdev
;
5337 for (i
= 0, tmpdev
= devlist
; tmpdev
; tmpdev
= tmpdev
->next
) {
5338 if (tmpdev
->used
!= 1)
5340 if (tmpdev
->container
== 1) {
5342 int fd
= dev_open(tmpdev
->devname
, O_RDONLY
|O_EXCL
);
5343 if (!is_fd_valid(fd
)) {
5344 pr_err("cannot open device %s: %s\n",
5345 tmpdev
->devname
, strerror(errno
));
5349 err
= get_sra_super_block(fd
, super_list
,
5350 tmpdev
->devname
, &lmax
,
5359 int major
= major(tmpdev
->st_rdev
);
5360 int minor
= minor(tmpdev
->st_rdev
);
5361 err
= get_super_block(super_list
,
5378 static int get_super_block(struct intel_super
**super_list
, char *devnm
, char *devname
,
5379 int major
, int minor
, int keep_fd
)
5381 struct intel_super
*s
;
5393 sprintf(nm
, "%d:%d", major
, minor
);
5394 dfd
= dev_open(nm
, O_RDWR
);
5395 if (!is_fd_valid(dfd
)) {
5400 if (!get_dev_sector_size(dfd
, NULL
, &s
->sector_size
)) {
5404 find_intel_hba_capability(dfd
, s
, devname
);
5405 err
= load_and_parse_mpb(dfd
, s
, NULL
, keep_fd
);
5407 /* retry the load if we might have raced against mdmon */
5408 if (err
== 3 && devnm
&& mdmon_running(devnm
))
5409 for (retry
= 0; retry
< 3; retry
++) {
5410 sleep_for(0, MSEC_TO_NSEC(3), true);
5411 err
= load_and_parse_mpb(dfd
, s
, NULL
, keep_fd
);
5417 s
->next
= *super_list
;
5431 get_sra_super_block(int fd
, struct intel_super
**super_list
, char *devname
, int *max
, int keep_fd
)
5438 sra
= sysfs_read(fd
, NULL
, GET_LEVEL
|GET_VERSION
|GET_DEVS
|GET_STATE
);
5442 if (sra
->array
.major_version
!= -1 ||
5443 sra
->array
.minor_version
!= -2 ||
5444 strcmp(sra
->text_version
, "imsm") != 0) {
5449 devnm
= fd2devnm(fd
);
5450 for (sd
= sra
->devs
, i
= 0; sd
; sd
= sd
->next
, i
++) {
5451 if (get_super_block(super_list
, devnm
, devname
,
5452 sd
->disk
.major
, sd
->disk
.minor
, keep_fd
) != 0) {
5463 static int load_container_imsm(struct supertype
*st
, int fd
, char *devname
)
5465 return load_super_imsm_all(st
, fd
, &st
->sb
, devname
, NULL
, 1);
5468 static int load_super_imsm(struct supertype
*st
, int fd
, char *devname
)
5470 struct intel_super
*super
;
5474 if (test_partition(fd
))
5475 /* IMSM not allowed on partitions */
5478 free_super_imsm(st
);
5480 super
= alloc_super();
5484 if (!get_dev_sector_size(fd
, NULL
, &super
->sector_size
)) {
5488 /* Load hba and capabilities if they exist.
5489 * But do not preclude loading metadata in case capabilities or hba are
5490 * non-compliant and ignore_hw_compat is set.
5492 rv
= find_intel_hba_capability(fd
, super
, devname
);
5493 /* no orom/efi or non-intel hba of the disk */
5494 if (rv
!= 0 && st
->ignore_hw_compat
== 0) {
5496 pr_err("No OROM/EFI properties for %s\n", devname
);
5500 rv
= load_and_parse_mpb(fd
, super
, devname
, 0);
5502 /* retry the load if we might have raced against mdmon */
5504 struct mdstat_ent
*mdstat
= NULL
;
5505 char *name
= fd2kname(fd
);
5508 mdstat
= mdstat_by_component(name
);
5510 if (mdstat
&& mdmon_running(mdstat
->devnm
) && getpid() != mdmon_pid(mdstat
->devnm
)) {
5511 for (retry
= 0; retry
< 3; retry
++) {
5512 sleep_for(0, MSEC_TO_NSEC(3), true);
5513 rv
= load_and_parse_mpb(fd
, super
, devname
, 0);
5519 free_mdstat(mdstat
);
5524 pr_err("Failed to load all information sections on %s\n", devname
);
5530 if (st
->ss
== NULL
) {
5531 st
->ss
= &super_imsm
;
5532 st
->minor_version
= 0;
5533 st
->max_devs
= IMSM_MAX_DEVICES
;
5536 /* load migration record */
5537 if (load_imsm_migr_rec(super
) == 0) {
5538 /* Check for unsupported migration features */
5539 if (check_mpb_migr_compatibility(super
) != 0) {
5540 pr_err("Unsupported migration detected");
5542 fprintf(stderr
, " on %s\n", devname
);
5544 fprintf(stderr
, " (IMSM).\n");
5552 static __u16
info_to_blocks_per_strip(mdu_array_info_t
*info
)
5554 if (info
->level
== 1)
5556 return info
->chunk_size
>> 9;
5559 static unsigned long long info_to_blocks_per_member(mdu_array_info_t
*info
,
5560 unsigned long long size
)
5562 if (info
->level
== 1)
5565 return (size
* 2) & ~(info_to_blocks_per_strip(info
) - 1);
5568 static void imsm_write_signature(struct imsm_super
*mpb
)
5570 /* It is safer to eventually truncate version rather than left it not NULL ended */
5571 snprintf((char *) mpb
->sig
, MAX_SIGNATURE_LENGTH
, MPB_SIGNATURE MPB_VERSION_ATTRIBS
);
5574 static void imsm_update_version_info(struct intel_super
*super
)
5576 /* update the version and attributes */
5577 struct imsm_super
*mpb
= super
->anchor
;
5578 struct imsm_dev
*dev
;
5579 struct imsm_map
*map
;
5582 mpb
->attributes
|= MPB_ATTRIB_CHECKSUM_VERIFY
;
5584 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
5585 dev
= get_imsm_dev(super
, i
);
5586 map
= get_imsm_map(dev
, MAP_0
);
5588 if (__le32_to_cpu(dev
->size_high
) > 0)
5589 mpb
->attributes
|= MPB_ATTRIB_2TB
;
5591 switch (get_imsm_raid_level(map
)) {
5593 mpb
->attributes
|= MPB_ATTRIB_RAID0
;
5596 mpb
->attributes
|= MPB_ATTRIB_RAID1
;
5599 mpb
->attributes
|= MPB_ATTRIB_RAID5
;
5602 mpb
->attributes
|= MPB_ATTRIB_RAID10
;
5603 if (map
->num_members
> 4)
5604 mpb
->attributes
|= MPB_ATTRIB_RAID10_EXT
;
5609 imsm_write_signature(mpb
);
5613 * imsm_check_name() - check imsm naming criteria.
5614 * @super: &intel_super pointer, not NULL.
5615 * @name: name to check.
5616 * @verbose: verbose level.
5618 * Name must be no longer than &MAX_RAID_SERIAL_LEN and must be unique across volumes.
5620 * Returns: &true if @name matches, &false otherwise.
5622 static bool imsm_is_name_allowed(struct intel_super
*super
, const char * const name
,
5625 struct imsm_super
*mpb
= super
->anchor
;
5628 if (is_string_lq(name
, MAX_RAID_SERIAL_LEN
+ 1) == false) {
5629 pr_vrb("imsm: Name \"%s\" is too long\n", name
);
5633 if (name
[0] == '.') {
5634 pr_vrb("imsm: Name \"%s\" has forbidden leading dot", name
);
5638 if (is_name_posix_compatible(name
) == false) {
5639 pr_vrb("imsm: Name \"%s\" doesn't follow POSIX portable file name character set",
5644 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
5645 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
5647 if (strncmp((char *) dev
->volume
, name
, MAX_RAID_SERIAL_LEN
) == 0) {
5648 pr_vrb("imsm: Name \"%s\" already exists\n", name
);
5656 static int init_super_imsm_volume(struct supertype
*st
, mdu_array_info_t
*info
,
5657 struct shape
*s
, char *name
,
5658 char *homehost
, int *uuid
,
5659 long long data_offset
)
5661 /* We are creating a volume inside a pre-existing container.
5662 * so st->sb is already set.
5664 struct intel_super
*super
= st
->sb
;
5665 unsigned int sector_size
= super
->sector_size
;
5666 struct imsm_super
*mpb
= super
->anchor
;
5667 struct intel_dev
*dv
;
5668 struct imsm_dev
*dev
;
5669 struct imsm_vol
*vol
;
5670 struct imsm_map
*map
;
5671 int idx
= mpb
->num_raid_devs
;
5674 unsigned long long array_blocks
;
5675 size_t size_old
, size_new
;
5676 unsigned int data_disks
;
5677 unsigned long long size_per_member
;
5679 if (super
->orom
&& mpb
->num_raid_devs
>= super
->orom
->vpa
) {
5680 pr_err("This imsm-container already has the maximum of %d volumes\n", super
->orom
->vpa
);
5684 /* ensure the mpb is large enough for the new data */
5685 size_old
= __le32_to_cpu(mpb
->mpb_size
);
5686 size_new
= disks_to_mpb_size(info
->nr_disks
);
5687 if (size_new
> size_old
) {
5689 size_t size_round
= ROUND_UP(size_new
, sector_size
);
5691 if (posix_memalign(&mpb_new
, sector_size
, size_round
) != 0) {
5692 pr_err("could not allocate new mpb\n");
5695 if (posix_memalign(&super
->migr_rec_buf
, MAX_SECTOR_SIZE
,
5696 MIGR_REC_BUF_SECTORS
*
5697 MAX_SECTOR_SIZE
) != 0) {
5698 pr_err("could not allocate migr_rec buffer\n");
5704 memcpy(mpb_new
, mpb
, size_old
);
5707 super
->anchor
= mpb_new
;
5708 mpb
->mpb_size
= __cpu_to_le32(size_new
);
5709 memset(mpb_new
+ size_old
, 0, size_round
- size_old
);
5710 super
->len
= size_round
;
5712 super
->current_vol
= idx
;
5714 /* handle 'failed_disks' by either:
5715 * a) create dummy disk entries in the table if this the first
5716 * volume in the array. We add them here as this is the only
5717 * opportunity to add them. add_to_super_imsm_volume()
5718 * handles the non-failed disks and continues incrementing
5720 * b) validate that 'failed_disks' matches the current number
5721 * of missing disks if the container is populated
5723 if (super
->current_vol
== 0) {
5725 for (i
= 0; i
< info
->failed_disks
; i
++) {
5726 struct imsm_disk
*disk
;
5729 disk
= __get_imsm_disk(mpb
, i
);
5730 disk
->status
= CONFIGURED_DISK
| FAILED_DISK
;
5731 disk
->scsi_id
= __cpu_to_le32(~(__u32
)0);
5732 snprintf((char *) disk
->serial
, MAX_RAID_SERIAL_LEN
,
5733 "missing:%d", (__u8
)i
);
5735 find_missing(super
);
5740 for (d
= super
->missing
; d
; d
= d
->next
)
5742 if (info
->failed_disks
> missing
) {
5743 pr_err("unable to add 'missing' disk to container\n");
5748 if (imsm_is_name_allowed(super
, name
, 1) == false)
5751 dv
= xmalloc(sizeof(*dv
));
5752 dev
= xcalloc(1, sizeof(*dev
) + sizeof(__u32
) * (info
->raid_disks
- 1));
5754 * Explicitly allow truncating to not confuse gcc's
5755 * -Werror=stringop-truncation
5757 namelen
= min((int) strlen(name
), MAX_RAID_SERIAL_LEN
);
5758 memcpy(dev
->volume
, name
, namelen
);
5759 array_blocks
= calc_array_size(info
->level
, info
->raid_disks
,
5760 info
->layout
, info
->chunk_size
,
5761 s
->size
* BLOCKS_PER_KB
);
5762 data_disks
= get_data_disks(info
->level
, info
->layout
,
5764 array_blocks
= round_size_to_mb(array_blocks
, data_disks
);
5765 size_per_member
= array_blocks
/ data_disks
;
5767 set_imsm_dev_size(dev
, array_blocks
);
5768 dev
->status
= (DEV_READ_COALESCING
| DEV_WRITE_COALESCING
);
5770 vol
->migr_state
= MIGR_STATE_NORMAL
;
5771 set_migr_type(dev
, MIGR_INIT
);
5772 vol
->dirty
= !info
->state
;
5773 set_vol_curr_migr_unit(dev
, 0);
5774 map
= get_imsm_map(dev
, MAP_0
);
5775 set_pba_of_lba0(map
, super
->create_offset
);
5776 map
->blocks_per_strip
= __cpu_to_le16(info_to_blocks_per_strip(info
));
5777 map
->failed_disk_num
= ~0;
5778 if (info
->level
> IMSM_T_RAID0
)
5779 map
->map_state
= (info
->state
? IMSM_T_STATE_NORMAL
5780 : IMSM_T_STATE_UNINITIALIZED
);
5782 map
->map_state
= info
->failed_disks
? IMSM_T_STATE_FAILED
:
5783 IMSM_T_STATE_NORMAL
;
5786 if (info
->level
== IMSM_T_RAID1
&& info
->raid_disks
> 2) {
5789 pr_err("imsm does not support more than 2 disks in a raid1 volume\n");
5792 map
->num_members
= info
->raid_disks
;
5794 update_imsm_raid_level(map
, info
->level
);
5795 set_num_domains(map
);
5797 size_per_member
+= NUM_BLOCKS_DIRTY_STRIPE_REGION
;
5798 set_blocks_per_member(map
, info_to_blocks_per_member(info
,
5802 update_num_data_stripes(map
, array_blocks
);
5803 for (i
= 0; i
< map
->num_members
; i
++) {
5804 /* initialized in add_to_super */
5805 set_imsm_ord_tbl_ent(map
, i
, IMSM_ORD_REBUILD
);
5807 mpb
->num_raid_devs
++;
5808 mpb
->num_raid_devs_created
++;
5809 dev
->my_vol_raid_dev_num
= mpb
->num_raid_devs_created
;
5811 if (s
->consistency_policy
<= CONSISTENCY_POLICY_RESYNC
) {
5812 dev
->rwh_policy
= RWH_MULTIPLE_OFF
;
5813 } else if (s
->consistency_policy
== CONSISTENCY_POLICY_PPL
) {
5814 dev
->rwh_policy
= RWH_MULTIPLE_DISTRIBUTED
;
5818 pr_err("imsm does not support consistency policy %s\n",
5819 map_num_s(consistency_policies
, s
->consistency_policy
));
5824 dv
->index
= super
->current_vol
;
5825 dv
->next
= super
->devlist
;
5826 super
->devlist
= dv
;
5828 imsm_update_version_info(super
);
5833 static int init_super_imsm(struct supertype
*st
, mdu_array_info_t
*info
,
5834 struct shape
*s
, char *name
,
5835 char *homehost
, int *uuid
,
5836 unsigned long long data_offset
)
5838 /* This is primarily called by Create when creating a new array.
5839 * We will then get add_to_super called for each component, and then
5840 * write_init_super called to write it out to each device.
5841 * For IMSM, Create can create on fresh devices or on a pre-existing
5843 * To create on a pre-existing array a different method will be called.
5844 * This one is just for fresh drives.
5846 struct intel_super
*super
;
5847 struct imsm_super
*mpb
;
5850 if (data_offset
!= INVALID_SECTORS
) {
5851 pr_err("data-offset not supported by imsm\n");
5856 return init_super_imsm_volume(st
, info
, s
, name
, homehost
, uuid
,
5860 mpb_size
= disks_to_mpb_size(info
->nr_disks
);
5862 mpb_size
= MAX_SECTOR_SIZE
;
5864 super
= alloc_super();
5866 posix_memalign(&super
->buf
, MAX_SECTOR_SIZE
, mpb_size
) != 0) {
5871 pr_err("could not allocate superblock\n");
5874 if (posix_memalign(&super
->migr_rec_buf
, MAX_SECTOR_SIZE
,
5875 MIGR_REC_BUF_SECTORS
*MAX_SECTOR_SIZE
) != 0) {
5876 pr_err("could not allocate migr_rec buffer\n");
5881 memset(super
->buf
, 0, mpb_size
);
5883 mpb
->mpb_size
= __cpu_to_le32(mpb_size
);
5887 /* zeroing superblock */
5891 imsm_update_version_info(super
);
5895 static int drive_validate_sector_size(struct intel_super
*super
, struct dl
*dl
)
5897 unsigned int member_sector_size
;
5899 if (!is_fd_valid(dl
->fd
)) {
5900 pr_err("Invalid file descriptor for %s\n", dl
->devname
);
5904 if (!get_dev_sector_size(dl
->fd
, dl
->devname
, &member_sector_size
))
5906 if (member_sector_size
!= super
->sector_size
)
5911 static int add_to_super_imsm_volume(struct supertype
*st
, mdu_disk_info_t
*dk
,
5912 int fd
, char *devname
)
5914 struct intel_super
*super
= st
->sb
;
5915 struct imsm_super
*mpb
= super
->anchor
;
5916 struct imsm_disk
*_disk
;
5917 struct imsm_dev
*dev
;
5918 struct imsm_map
*map
;
5923 if (!is_fd_valid(fd
))
5926 dev
= get_imsm_dev(super
, super
->current_vol
);
5927 map
= get_imsm_map(dev
, MAP_0
);
5929 if (! (dk
->state
& (1<<MD_DISK_SYNC
))) {
5930 pr_err("%s: Cannot add spare devices to IMSM volume\n",
5935 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
5937 if (dl
->raiddisk
== dk
->raid_disk
)
5939 } else if (dl
->major
== dk
->major
&& dl
->minor
== dk
->minor
)
5945 pr_err("%s is not a member of the same container.\n",
5950 if (!autolayout
&& super
->current_vol
> 0) {
5951 int _slot
= get_disk_slot_in_dev(super
, 0, dl
->index
);
5953 if (_slot
!= dk
->raid_disk
) {
5954 pr_err("Member %s is in %d slot for the first volume, but is in %d slot for a new volume.\n",
5955 dl
->devname
, _slot
, dk
->raid_disk
);
5956 pr_err("Raid members are in different order than for the first volume, aborting.\n");
5961 if (mpb
->num_disks
== 0)
5962 if (!get_dev_sector_size(dl
->fd
, dl
->devname
,
5963 &super
->sector_size
))
5966 if (!drive_validate_sector_size(super
, dl
)) {
5967 pr_err("Combining drives of different sector size in one volume is not allowed\n");
5971 /* add a pristine spare to the metadata */
5972 if (dl
->index
< 0) {
5973 dl
->index
= super
->anchor
->num_disks
;
5974 super
->anchor
->num_disks
++;
5976 /* Check the device has not already been added */
5977 slot
= get_imsm_disk_slot(map
, dl
->index
);
5979 (get_imsm_ord_tbl_ent(dev
, slot
, MAP_X
) & IMSM_ORD_REBUILD
) == 0) {
5980 pr_err("%s has been included in this array twice\n",
5984 set_imsm_ord_tbl_ent(map
, dk
->raid_disk
, dl
->index
);
5985 dl
->disk
.status
= CONFIGURED_DISK
;
5987 /* update size of 'missing' disks to be at least as large as the
5988 * largest acitve member (we only have dummy missing disks when
5989 * creating the first volume)
5991 if (super
->current_vol
== 0) {
5992 for (df
= super
->missing
; df
; df
= df
->next
) {
5993 if (total_blocks(&dl
->disk
) > total_blocks(&df
->disk
))
5994 set_total_blocks(&df
->disk
, total_blocks(&dl
->disk
));
5995 _disk
= __get_imsm_disk(mpb
, df
->index
);
6000 /* refresh unset/failed slots to point to valid 'missing' entries */
6001 for (df
= super
->missing
; df
; df
= df
->next
)
6002 for (slot
= 0; slot
< mpb
->num_disks
; slot
++) {
6003 __u32 ord
= get_imsm_ord_tbl_ent(dev
, slot
, MAP_X
);
6005 if ((ord
& IMSM_ORD_REBUILD
) == 0)
6007 set_imsm_ord_tbl_ent(map
, slot
, df
->index
| IMSM_ORD_REBUILD
);
6008 if (is_gen_migration(dev
)) {
6009 struct imsm_map
*map2
= get_imsm_map(dev
,
6011 int slot2
= get_imsm_disk_slot(map2
, df
->index
);
6012 if (slot2
< map2
->num_members
&& slot2
>= 0) {
6013 __u32 ord2
= get_imsm_ord_tbl_ent(dev
,
6016 if ((unsigned)df
->index
==
6018 set_imsm_ord_tbl_ent(map2
,
6024 dprintf("set slot:%d to missing disk:%d\n", slot
, df
->index
);
6028 /* if we are creating the first raid device update the family number */
6029 if (super
->current_vol
== 0) {
6031 struct imsm_dev
*_dev
= __get_imsm_dev(mpb
, 0);
6033 _disk
= __get_imsm_disk(mpb
, dl
->index
);
6035 pr_err("BUG mpb setup error\n");
6041 sum
+= __gen_imsm_checksum(mpb
);
6042 mpb
->family_num
= __cpu_to_le32(sum
);
6043 mpb
->orig_family_num
= mpb
->family_num
;
6044 mpb
->creation_time
= __cpu_to_le64((__u64
)time(NULL
));
6046 super
->current_disk
= dl
;
6051 * Function marks disk as spare and restores disk serial
6052 * in case it was previously marked as failed by takeover operation
6054 * -1 : critical error
6055 * 0 : disk is marked as spare but serial is not set
6058 int mark_spare(struct dl
*disk
)
6060 __u8 serial
[MAX_RAID_SERIAL_LEN
];
6067 if (!imsm_read_serial(disk
->fd
, NULL
, serial
, MAX_RAID_SERIAL_LEN
)) {
6068 /* Restore disk serial number, because takeover marks disk
6069 * as failed and adds to serial ':0' before it becomes
6072 serialcpy(disk
->serial
, serial
);
6073 serialcpy(disk
->disk
.serial
, serial
);
6076 disk
->disk
.status
= SPARE_DISK
;
6083 static int write_super_imsm_spare(struct intel_super
*super
, struct dl
*d
);
6085 static int add_to_super_imsm(struct supertype
*st
, mdu_disk_info_t
*dk
,
6086 int fd
, char *devname
,
6087 unsigned long long data_offset
)
6089 struct intel_super
*super
= st
->sb
;
6090 unsigned int member_sector_size
;
6091 unsigned long long size
;
6097 /* If we are on an RAID enabled platform check that the disk is
6098 * attached to the raid controller.
6099 * We do not need to test disks attachment for container based additions,
6100 * they shall be already tested when container was created/assembled.
6102 rv
= find_intel_hba_capability(fd
, super
, devname
);
6103 /* no orom/efi or non-intel hba of the disk */
6105 dprintf("capability: %p fd: %d ret: %d\n", super
->orom
, fd
, rv
);
6106 return MDADM_STATUS_ERROR
;
6109 if (super
->current_vol
>= 0)
6110 return add_to_super_imsm_volume(st
, dk
, fd
, devname
);
6112 if (fstat(fd
, &stb
) != 0)
6113 return MDADM_STATUS_ERROR
;
6115 dd
= xcalloc(sizeof(*dd
), 1);
6118 dd
->devname
= xstrdup(devname
);
6120 if (sysfs_disk_to_scsi_id(fd
, &id
) == 0)
6121 dd
->disk
.scsi_id
= __cpu_to_le32(id
);
6123 dd
->major
= major(stb
.st_rdev
);
6124 dd
->minor
= minor(stb
.st_rdev
);
6125 dd
->action
= DISK_ADD
;
6128 rv
= imsm_read_serial(fd
, devname
, dd
->serial
, MAX_RAID_SERIAL_LEN
);
6130 pr_err("failed to retrieve scsi serial, aborting\n");
6134 if (super
->hba
&& ((super
->hba
->type
== SYS_DEV_NVME
) ||
6135 (super
->hba
->type
== SYS_DEV_VMD
))) {
6136 char pci_dev_path
[PATH_MAX
];
6137 char cntrl_path
[PATH_MAX
];
6139 if (!diskfd_to_devpath(fd
, 2, pci_dev_path
) ||
6140 !diskfd_to_devpath(fd
, 1, cntrl_path
)) {
6141 pr_err("failed to get dev paths, aborting\n");
6145 if (is_multipath_nvme(fd
))
6146 pr_err("%s controller supports Multi-Path I/O, Intel (R) VROC does not support multipathing\n",
6147 basename(cntrl_path
));
6149 if (super
->orom
&& devpath_to_vendor(pci_dev_path
) != 0x8086 &&
6150 !imsm_orom_has_tpv_support(super
->orom
)) {
6151 pr_err("\tPlatform configuration does not support non-Intel NVMe drives.\n"
6152 "\tPlease refer to Intel(R) RSTe/VROC user guide.\n");
6157 if (!get_dev_size(fd
, NULL
, &size
) || !get_dev_sector_size(fd
, NULL
, &member_sector_size
))
6160 if (super
->sector_size
== 0)
6161 /* this a first device, so sector_size is not set yet */
6162 super
->sector_size
= member_sector_size
;
6164 /* clear migr_rec when adding disk to container */
6165 memset(super
->migr_rec_buf
, 0, MIGR_REC_BUF_SECTORS
* MAX_SECTOR_SIZE
);
6167 if (lseek64(fd
, (size
- MIGR_REC_SECTOR_POSITION
* member_sector_size
), SEEK_SET
) >= 0) {
6168 unsigned int nbytes
= MIGR_REC_BUF_SECTORS
* member_sector_size
;
6170 if ((unsigned int)write(fd
, super
->migr_rec_buf
, nbytes
) != nbytes
)
6171 perror("Write migr_rec failed");
6175 serialcpy(dd
->disk
.serial
, dd
->serial
);
6176 set_total_blocks(&dd
->disk
, size
);
6178 if (__le32_to_cpu(dd
->disk
.total_blocks_hi
) > 0) {
6179 struct imsm_super
*mpb
= super
->anchor
;
6181 mpb
->attributes
|= MPB_ATTRIB_2TB_DISK
;
6186 if (st
->update_tail
) {
6187 dd
->next
= super
->disk_mgmt_list
;
6188 super
->disk_mgmt_list
= dd
;
6190 /* this is called outside of mdmon
6191 * write initial spare metadata
6192 * mdmon will overwrite it.
6194 dd
->next
= super
->disks
;
6196 write_super_imsm_spare(super
, dd
);
6199 return MDADM_STATUS_SUCCESS
;
6202 __free_imsm_disk(dd
, 0);
6203 return MDADM_STATUS_ERROR
;
6206 static int remove_from_super_imsm(struct supertype
*st
, mdu_disk_info_t
*dk
)
6208 struct intel_super
*super
= st
->sb
;
6211 /* remove from super works only in mdmon - for communication
6212 * manager - monitor. Check if communication memory buffer
6215 if (!st
->update_tail
) {
6216 pr_err("shall be used in mdmon context only\n");
6219 dd
= xcalloc(1, sizeof(*dd
));
6220 dd
->major
= dk
->major
;
6221 dd
->minor
= dk
->minor
;
6224 dd
->action
= DISK_REMOVE
;
6226 dd
->next
= super
->disk_mgmt_list
;
6227 super
->disk_mgmt_list
= dd
;
6232 static int store_imsm_mpb(int fd
, struct imsm_super
*mpb
);
6235 char buf
[MAX_SECTOR_SIZE
];
6236 struct imsm_super anchor
;
6237 } spare_record
__attribute__ ((aligned(MAX_SECTOR_SIZE
)));
6240 static int write_super_imsm_spare(struct intel_super
*super
, struct dl
*d
)
6242 struct imsm_super
*spare
= &spare_record
.anchor
;
6248 spare
->mpb_size
= __cpu_to_le32(sizeof(struct imsm_super
));
6249 spare
->generation_num
= __cpu_to_le32(1UL);
6250 spare
->num_disks
= 1;
6251 spare
->num_raid_devs
= 0;
6252 spare
->pwr_cycle_count
= __cpu_to_le32(1);
6254 imsm_write_signature(spare
);
6256 spare
->disk
[0] = d
->disk
;
6257 if (__le32_to_cpu(d
->disk
.total_blocks_hi
) > 0)
6258 spare
->attributes
|= MPB_ATTRIB_2TB_DISK
;
6260 if (super
->sector_size
== 4096)
6261 convert_to_4k_imsm_disk(&spare
->disk
[0]);
6263 sum
= __gen_imsm_checksum(spare
);
6264 spare
->family_num
= __cpu_to_le32(sum
);
6265 spare
->orig_family_num
= 0;
6266 sum
= __gen_imsm_checksum(spare
);
6267 spare
->check_sum
= __cpu_to_le32(sum
);
6269 if (store_imsm_mpb(d
->fd
, spare
)) {
6270 pr_err("failed for device %d:%d %s\n",
6271 d
->major
, d
->minor
, strerror(errno
));
6277 /* spare records have their own family number and do not have any defined raid
6280 static int write_super_imsm_spares(struct intel_super
*super
, int doclose
)
6284 for (d
= super
->disks
; d
; d
= d
->next
) {
6288 if (write_super_imsm_spare(super
, d
))
6298 static int write_super_imsm(struct supertype
*st
, int doclose
)
6300 struct intel_super
*super
= st
->sb
;
6301 unsigned int sector_size
= super
->sector_size
;
6302 struct imsm_super
*mpb
= super
->anchor
;
6308 __u32 mpb_size
= sizeof(struct imsm_super
) - sizeof(struct imsm_disk
);
6310 int clear_migration_record
= 1;
6313 /* 'generation' is incremented everytime the metadata is written */
6314 generation
= __le32_to_cpu(mpb
->generation_num
);
6316 mpb
->generation_num
= __cpu_to_le32(generation
);
6318 /* fix up cases where previous mdadm releases failed to set
6321 if (mpb
->orig_family_num
== 0)
6322 mpb
->orig_family_num
= mpb
->family_num
;
6324 for (d
= super
->disks
; d
; d
= d
->next
) {
6328 mpb
->disk
[d
->index
] = d
->disk
;
6332 for (d
= super
->missing
; d
; d
= d
->next
) {
6333 mpb
->disk
[d
->index
] = d
->disk
;
6336 mpb
->num_disks
= num_disks
;
6337 mpb_size
+= sizeof(struct imsm_disk
) * mpb
->num_disks
;
6339 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
6340 struct imsm_dev
*dev
= __get_imsm_dev(mpb
, i
);
6341 struct imsm_dev
*dev2
= get_imsm_dev(super
, i
);
6343 imsm_copy_dev(dev
, dev2
);
6344 mpb_size
+= sizeof_imsm_dev(dev
, 0);
6346 if (is_gen_migration(dev2
))
6347 clear_migration_record
= 0;
6350 bbm_log_size
= get_imsm_bbm_log_size(super
->bbm_log
);
6353 memcpy((void *)mpb
+ mpb_size
, super
->bbm_log
, bbm_log_size
);
6354 mpb
->attributes
|= MPB_ATTRIB_BBM
;
6356 mpb
->attributes
&= ~MPB_ATTRIB_BBM
;
6358 super
->anchor
->bbm_log_size
= __cpu_to_le32(bbm_log_size
);
6359 mpb_size
+= bbm_log_size
;
6360 mpb
->mpb_size
= __cpu_to_le32(mpb_size
);
6363 assert(super
->len
== 0 || mpb_size
<= super
->len
);
6366 /* recalculate checksum */
6367 sum
= __gen_imsm_checksum(mpb
);
6368 mpb
->check_sum
= __cpu_to_le32(sum
);
6370 if (super
->clean_migration_record_by_mdmon
) {
6371 clear_migration_record
= 1;
6372 super
->clean_migration_record_by_mdmon
= 0;
6374 if (clear_migration_record
)
6375 memset(super
->migr_rec_buf
, 0,
6376 MIGR_REC_BUF_SECTORS
*MAX_SECTOR_SIZE
);
6378 if (sector_size
== 4096)
6379 convert_to_4k(super
);
6381 /* write the mpb for disks that compose raid devices */
6382 for (d
= super
->disks
; d
; d
= d
->next
) {
6383 if (d
->index
< 0 || is_failed(&d
->disk
))
6386 if (clear_migration_record
) {
6387 unsigned long long dsize
;
6389 get_dev_size(d
->fd
, NULL
, &dsize
);
6390 if (lseek64(d
->fd
, dsize
- sector_size
,
6392 if ((unsigned int)write(d
->fd
,
6393 super
->migr_rec_buf
,
6394 MIGR_REC_BUF_SECTORS
*sector_size
) !=
6395 MIGR_REC_BUF_SECTORS
*sector_size
)
6396 perror("Write migr_rec failed");
6400 if (store_imsm_mpb(d
->fd
, mpb
))
6402 "failed for device %d:%d (fd: %d)%s\n",
6404 d
->fd
, strerror(errno
));
6411 return write_super_imsm_spares(super
, doclose
);
6416 static int create_array(struct supertype
*st
, int dev_idx
)
6419 struct imsm_update_create_array
*u
;
6420 struct intel_super
*super
= st
->sb
;
6421 struct imsm_dev
*dev
= get_imsm_dev(super
, dev_idx
);
6422 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
6423 struct disk_info
*inf
;
6424 struct imsm_disk
*disk
;
6427 len
= sizeof(*u
) - sizeof(*dev
) + sizeof_imsm_dev(dev
, 0) +
6428 sizeof(*inf
) * map
->num_members
;
6430 u
->type
= update_create_array
;
6431 u
->dev_idx
= dev_idx
;
6432 imsm_copy_dev(&u
->dev
, dev
);
6433 inf
= get_disk_info(u
);
6434 for (i
= 0; i
< map
->num_members
; i
++) {
6435 int idx
= get_imsm_disk_idx(dev
, i
, MAP_X
);
6437 disk
= get_imsm_disk(super
, idx
);
6439 disk
= get_imsm_missing(super
, idx
);
6440 serialcpy(inf
[i
].serial
, disk
->serial
);
6442 append_metadata_update(st
, u
, len
);
6447 static int mgmt_disk(struct supertype
*st
)
6449 struct intel_super
*super
= st
->sb
;
6451 struct imsm_update_add_remove_disk
*u
;
6453 if (!super
->disk_mgmt_list
)
6458 u
->type
= update_add_remove_disk
;
6459 append_metadata_update(st
, u
, len
);
6464 __u32
crc32c_le(__u32 crc
, unsigned char const *p
, size_t len
);
6466 static int write_ppl_header(unsigned long long ppl_sector
, int fd
, void *buf
)
6468 struct ppl_header
*ppl_hdr
= buf
;
6471 ppl_hdr
->checksum
= __cpu_to_le32(~crc32c_le(~0, buf
, PPL_HEADER_SIZE
));
6473 if (lseek64(fd
, ppl_sector
* 512, SEEK_SET
) < 0) {
6475 perror("Failed to seek to PPL header location");
6479 if (write(fd
, buf
, PPL_HEADER_SIZE
) != PPL_HEADER_SIZE
) {
6481 perror("Write PPL header failed");
6490 static int write_init_ppl_imsm(struct supertype
*st
, struct mdinfo
*info
, int fd
)
6492 struct intel_super
*super
= st
->sb
;
6494 struct ppl_header
*ppl_hdr
;
6497 /* first clear entire ppl space */
6498 ret
= zero_disk_range(fd
, info
->ppl_sector
, info
->ppl_size
);
6502 ret
= posix_memalign(&buf
, MAX_SECTOR_SIZE
, PPL_HEADER_SIZE
);
6504 pr_err("Failed to allocate PPL header buffer\n");
6508 memset(buf
, 0, PPL_HEADER_SIZE
);
6510 memset(ppl_hdr
->reserved
, 0xff, PPL_HDR_RESERVED
);
6511 ppl_hdr
->signature
= __cpu_to_le32(super
->anchor
->orig_family_num
);
6513 if (info
->mismatch_cnt
) {
6515 * We are overwriting an invalid ppl. Make one entry with wrong
6516 * checksum to prevent the kernel from skipping resync.
6518 ppl_hdr
->entries_count
= __cpu_to_le32(1);
6519 ppl_hdr
->entries
[0].checksum
= ~0;
6522 ret
= write_ppl_header(info
->ppl_sector
, fd
, buf
);
6528 static int is_rebuilding(struct imsm_dev
*dev
);
6530 static int validate_ppl_imsm(struct supertype
*st
, struct mdinfo
*info
,
6531 struct mdinfo
*disk
)
6533 struct intel_super
*super
= st
->sb
;
6535 void *buf_orig
, *buf
, *buf_prev
= NULL
;
6537 struct ppl_header
*ppl_hdr
= NULL
;
6539 struct imsm_dev
*dev
;
6542 unsigned long long ppl_offset
= 0;
6543 unsigned long long prev_gen_num
= 0;
6545 if (disk
->disk
.raid_disk
< 0)
6548 dev
= get_imsm_dev(super
, info
->container_member
);
6549 idx
= get_imsm_disk_idx(dev
, disk
->disk
.raid_disk
, MAP_0
);
6550 d
= get_imsm_dl_disk(super
, idx
);
6552 if (!d
|| d
->index
< 0 || is_failed(&d
->disk
))
6555 if (posix_memalign(&buf_orig
, MAX_SECTOR_SIZE
, PPL_HEADER_SIZE
* 2)) {
6556 pr_err("Failed to allocate PPL header buffer\n");
6562 while (ppl_offset
< MULTIPLE_PPL_AREA_SIZE_IMSM
) {
6565 dprintf("Checking potential PPL at offset: %llu\n", ppl_offset
);
6567 if (lseek64(d
->fd
, info
->ppl_sector
* 512 + ppl_offset
,
6569 perror("Failed to seek to PPL header location");
6574 if (read(d
->fd
, buf
, PPL_HEADER_SIZE
) != PPL_HEADER_SIZE
) {
6575 perror("Read PPL header failed");
6582 crc
= __le32_to_cpu(ppl_hdr
->checksum
);
6583 ppl_hdr
->checksum
= 0;
6585 if (crc
!= ~crc32c_le(~0, buf
, PPL_HEADER_SIZE
)) {
6586 dprintf("Wrong PPL header checksum on %s\n",
6591 if (prev_gen_num
> __le64_to_cpu(ppl_hdr
->generation
)) {
6592 /* previous was newest, it was already checked */
6596 if ((__le32_to_cpu(ppl_hdr
->signature
) !=
6597 super
->anchor
->orig_family_num
)) {
6598 dprintf("Wrong PPL header signature on %s\n",
6605 prev_gen_num
= __le64_to_cpu(ppl_hdr
->generation
);
6607 ppl_offset
+= PPL_HEADER_SIZE
;
6608 for (i
= 0; i
< __le32_to_cpu(ppl_hdr
->entries_count
); i
++)
6610 __le32_to_cpu(ppl_hdr
->entries
[i
].pp_size
);
6613 buf_prev
= buf
+ PPL_HEADER_SIZE
;
6625 * Update metadata to use mutliple PPLs area (1MB).
6626 * This is done once for all RAID members
6628 if (info
->consistency_policy
== CONSISTENCY_POLICY_PPL
&&
6629 info
->ppl_size
!= (MULTIPLE_PPL_AREA_SIZE_IMSM
>> 9)) {
6631 struct mdinfo
*member_dev
;
6633 sprintf(subarray
, "%d", info
->container_member
);
6635 if (mdmon_running(st
->container_devnm
))
6636 st
->update_tail
= &st
->updates
;
6638 if (st
->ss
->update_subarray(st
, subarray
, UOPT_PPL
, NULL
)) {
6639 pr_err("Failed to update subarray %s\n",
6642 if (st
->update_tail
)
6643 flush_metadata_updates(st
);
6645 st
->ss
->sync_metadata(st
);
6646 info
->ppl_size
= (MULTIPLE_PPL_AREA_SIZE_IMSM
>> 9);
6647 for (member_dev
= info
->devs
; member_dev
;
6648 member_dev
= member_dev
->next
)
6649 member_dev
->ppl_size
=
6650 (MULTIPLE_PPL_AREA_SIZE_IMSM
>> 9);
6655 struct imsm_map
*map
= get_imsm_map(dev
, MAP_X
);
6657 if (map
->map_state
== IMSM_T_STATE_UNINITIALIZED
||
6658 (map
->map_state
== IMSM_T_STATE_NORMAL
&&
6659 !(dev
->vol
.dirty
& RAIDVOL_DIRTY
)) ||
6660 (is_rebuilding(dev
) &&
6661 vol_curr_migr_unit(dev
) == 0 &&
6662 get_imsm_disk_idx(dev
, disk
->disk
.raid_disk
, MAP_1
) != idx
))
6663 ret
= st
->ss
->write_init_ppl(st
, info
, d
->fd
);
6665 info
->mismatch_cnt
++;
6666 } else if (ret
== 0 &&
6667 ppl_hdr
->entries_count
== 0 &&
6668 is_rebuilding(dev
) &&
6669 info
->resync_start
== 0) {
6671 * The header has no entries - add a single empty entry and
6672 * rewrite the header to prevent the kernel from going into
6673 * resync after an interrupted rebuild.
6675 ppl_hdr
->entries_count
= __cpu_to_le32(1);
6676 ret
= write_ppl_header(info
->ppl_sector
, d
->fd
, buf
);
6684 static int write_init_ppl_imsm_all(struct supertype
*st
, struct mdinfo
*info
)
6686 struct intel_super
*super
= st
->sb
;
6690 if (info
->consistency_policy
!= CONSISTENCY_POLICY_PPL
||
6691 info
->array
.level
!= 5)
6694 for (d
= super
->disks
; d
; d
= d
->next
) {
6695 if (d
->index
< 0 || is_failed(&d
->disk
))
6698 ret
= st
->ss
->write_init_ppl(st
, info
, d
->fd
);
6706 /*******************************************************************************
6707 * Function: write_init_bitmap_imsm_vol
6708 * Description: Write a bitmap header and prepares the area for the bitmap.
6710 * st : supertype information
6711 * vol_idx : the volume index to use
6716 ******************************************************************************/
6717 static int write_init_bitmap_imsm_vol(struct supertype
*st
, int vol_idx
)
6719 struct intel_super
*super
= st
->sb
;
6720 int prev_current_vol
= super
->current_vol
;
6724 super
->current_vol
= vol_idx
;
6725 for (d
= super
->disks
; d
; d
= d
->next
) {
6726 if (d
->index
< 0 || is_failed(&d
->disk
))
6728 ret
= st
->ss
->write_bitmap(st
, d
->fd
, NoUpdate
);
6732 super
->current_vol
= prev_current_vol
;
6736 /*******************************************************************************
6737 * Function: write_init_bitmap_imsm_all
6738 * Description: Write a bitmap header and prepares the area for the bitmap.
6739 * Operation is executed for volumes with CONSISTENCY_POLICY_BITMAP.
6741 * st : supertype information
6742 * info : info about the volume where the bitmap should be written
6743 * vol_idx : the volume index to use
6748 ******************************************************************************/
6749 static int write_init_bitmap_imsm_all(struct supertype
*st
, struct mdinfo
*info
,
6754 if (info
&& (info
->consistency_policy
== CONSISTENCY_POLICY_BITMAP
))
6755 ret
= write_init_bitmap_imsm_vol(st
, vol_idx
);
6760 static int write_init_super_imsm(struct supertype
*st
)
6762 struct intel_super
*super
= st
->sb
;
6763 int current_vol
= super
->current_vol
;
6767 getinfo_super_imsm(st
, &info
, NULL
);
6769 /* we are done with current_vol reset it to point st at the container */
6770 super
->current_vol
= -1;
6772 if (st
->update_tail
) {
6773 /* queue the recently created array / added disk
6774 * as a metadata update */
6776 /* determine if we are creating a volume or adding a disk */
6777 if (current_vol
< 0) {
6778 /* in the mgmt (add/remove) disk case we are running
6779 * in mdmon context, so don't close fd's
6783 /* adding the second volume to the array */
6784 rv
= write_init_ppl_imsm_all(st
, &info
);
6786 rv
= write_init_bitmap_imsm_all(st
, &info
, current_vol
);
6788 rv
= create_array(st
, current_vol
);
6792 for (d
= super
->disks
; d
; d
= d
->next
)
6793 Kill(d
->devname
, NULL
, 0, -1, 1);
6794 if (current_vol
>= 0) {
6795 rv
= write_init_ppl_imsm_all(st
, &info
);
6797 rv
= write_init_bitmap_imsm_all(st
, &info
, current_vol
);
6801 rv
= write_super_imsm(st
, 1);
6807 static int store_super_imsm(struct supertype
*st
, int fd
)
6809 struct intel_super
*super
= st
->sb
;
6810 struct imsm_super
*mpb
= super
? super
->anchor
: NULL
;
6815 if (super
->sector_size
== 4096)
6816 convert_to_4k(super
);
6817 return store_imsm_mpb(fd
, mpb
);
6820 static int validate_geometry_imsm_container(struct supertype
*st
, int level
,
6822 unsigned long long data_offset
,
6824 unsigned long long *freesize
,
6828 unsigned long long ldsize
;
6829 struct intel_super
*super
= NULL
;
6832 if (!is_container(level
))
6837 fd
= dev_open(dev
, O_RDONLY
|O_EXCL
);
6838 if (!is_fd_valid(fd
)) {
6839 pr_vrb("imsm: Cannot open %s: %s\n", dev
, strerror(errno
));
6842 if (!get_dev_size(fd
, dev
, &ldsize
))
6845 /* capabilities retrieve could be possible
6846 * note that there is no fd for the disks in array.
6848 super
= alloc_super();
6852 if (!get_dev_sector_size(fd
, NULL
, &super
->sector_size
))
6855 rv
= find_intel_hba_capability(fd
, super
, verbose
> 0 ? dev
: NULL
);
6859 fd2devname(fd
, str
);
6860 dprintf("fd: %d %s orom: %p rv: %d raiddisk: %d\n",
6861 fd
, str
, super
->orom
, rv
, raiddisks
);
6863 /* no orom/efi or non-intel hba of the disk */
6868 if (raiddisks
> super
->orom
->tds
) {
6870 pr_err("%d exceeds maximum number of platform supported disks: %d\n",
6871 raiddisks
, super
->orom
->tds
);
6874 if ((super
->orom
->attr
& IMSM_OROM_ATTR_2TB_DISK
) == 0 &&
6875 (ldsize
>> 9) >> 32 > 0) {
6877 pr_err("%s exceeds maximum platform supported size\n", dev
);
6881 if (super
->hba
->type
== SYS_DEV_VMD
||
6882 super
->hba
->type
== SYS_DEV_NVME
) {
6883 if (!imsm_is_nvme_namespace_supported(fd
, 1)) {
6885 pr_err("NVMe namespace %s is not supported by IMSM\n",
6892 *freesize
= avail_size_imsm(st
, ldsize
>> 9, data_offset
);
6902 static unsigned long long find_size(struct extent
*e
, int *idx
, int num_extents
)
6904 const unsigned long long base_start
= e
[*idx
].start
;
6905 unsigned long long end
= base_start
+ e
[*idx
].size
;
6908 if (base_start
== end
)
6912 for (i
= *idx
; i
< num_extents
; i
++) {
6913 /* extend overlapping extents */
6914 if (e
[i
].start
>= base_start
&&
6915 e
[i
].start
<= end
) {
6918 if (e
[i
].start
+ e
[i
].size
> end
)
6919 end
= e
[i
].start
+ e
[i
].size
;
6920 } else if (e
[i
].start
> end
) {
6926 return end
- base_start
;
6929 /** merge_extents() - analyze extents and get free size.
6930 * @super: Intel metadata, not NULL.
6931 * @expanding: if set, we are expanding &super->current_vol.
6933 * Build a composite disk with all known extents and generate a size given the
6934 * "all disks in an array must share a common start offset" constraint.
6935 * If a volume is expanded, then return free space after the volume.
6937 * Return: Free space or 0 on failure.
6939 static unsigned long long merge_extents(struct intel_super
*super
, const bool expanding
)
6943 int i
, j
, pos_vol_idx
= -1;
6945 int sum_extents
= 0;
6946 unsigned long long pos
= 0;
6947 unsigned long long start
= 0;
6948 unsigned long long free_size
= 0;
6950 unsigned long pre_reservation
= 0;
6951 unsigned long post_reservation
= IMSM_RESERVED_SECTORS
;
6952 unsigned long reservation_size
;
6954 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
6956 sum_extents
+= dl
->extent_cnt
;
6957 e
= xcalloc(sum_extents
, sizeof(struct extent
));
6959 /* coalesce and sort all extents. also, check to see if we need to
6960 * reserve space between member arrays
6963 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
6966 for (i
= 0; i
< dl
->extent_cnt
; i
++)
6969 qsort(e
, sum_extents
, sizeof(*e
), cmp_extent
);
6974 while (i
< sum_extents
) {
6975 e
[j
].start
= e
[i
].start
;
6976 e
[j
].vol
= e
[i
].vol
;
6977 e
[j
].size
= find_size(e
, &i
, sum_extents
);
6979 if (e
[j
-1].size
== 0)
6985 unsigned long long esize
= e
[i
].start
- pos
;
6987 if (expanding
? pos_vol_idx
== super
->current_vol
: esize
>= free_size
) {
6993 pos
= e
[i
].start
+ e
[i
].size
;
6994 pos_vol_idx
= e
[i
].vol
;
6997 } while (e
[i
-1].size
);
6999 if (free_size
== 0) {
7000 dprintf("imsm: Cannot find free size.\n");
7005 if (!expanding
&& extent_idx
!= 0)
7007 * Not a real first volume in a container is created, pre_reservation is needed.
7009 pre_reservation
= IMSM_RESERVED_SECTORS
;
7011 if (e
[extent_idx
].size
== 0)
7013 * extent_idx points to the metadata, post_reservation is allready done.
7015 post_reservation
= 0;
7018 reservation_size
= pre_reservation
+ post_reservation
;
7020 if (free_size
< reservation_size
) {
7021 dprintf("imsm: Reservation size is greater than free space.\n");
7025 super
->create_offset
= start
+ pre_reservation
;
7026 return free_size
- reservation_size
;
7030 * is_raid_level_supported() - check if this count of drives and level is supported by platform.
7031 * @orom: hardware properties, could be NULL.
7032 * @level: requested raid level.
7033 * @raiddisks: requested disk count.
7035 * IMSM UEFI/OROM does not provide information about supported count of raid disks
7036 * for particular level. That is why it is hardcoded.
7037 * It is recommended to not allow of usage other levels than supported,
7038 * IMSM code is not tested against different level implementations.
7040 * Return: true if supported, false otherwise.
7042 static bool is_raid_level_supported(const struct imsm_orom
*orom
, int level
, int raiddisks
)
7046 for (idx
= 0; imsm_level_ops
[idx
].name
; idx
++) {
7047 if (imsm_level_ops
[idx
].level
== level
)
7051 if (!imsm_level_ops
[idx
].name
)
7054 if (!imsm_level_ops
[idx
].is_raiddisks_count_supported(raiddisks
))
7060 if (imsm_level_ops
[idx
].is_level_supported(orom
))
7067 active_arrays_by_format(char *name
, char* hba
, struct md_list
**devlist
,
7068 int dpa
, int verbose
)
7070 struct mdstat_ent
*mdstat
= mdstat_read(0, 0);
7071 struct mdstat_ent
*memb
;
7077 for (memb
= mdstat
; memb
; memb
= memb
->next
) {
7078 if (is_mdstat_ent_external(memb
) && !is_subarray(memb
->metadata_version
+ 9) &&
7079 strcmp(&memb
->metadata_version
[9], name
) == 0 && memb
->members
) {
7080 struct dev_member
*dev
= memb
->members
;
7083 while (dev
&& !is_fd_valid(fd
)) {
7084 char path
[PATH_MAX
];
7086 num
= snprintf(path
, PATH_MAX
, "%s%s", "/dev/", dev
->name
);
7088 fd
= open(path
, O_RDONLY
, 0);
7089 if (num
<= 0 || !is_fd_valid(fd
)) {
7090 pr_vrb("Cannot open %s: %s\n",
7091 dev
->name
, strerror(errno
));
7096 if (is_fd_valid(fd
) && disk_attached_to_hba(fd
, hba
)) {
7097 struct mdstat_ent
*vol
;
7098 for (vol
= mdstat
; vol
; vol
= vol
->next
) {
7099 if (vol
->active
> 0 &&
7100 is_container_member(vol
, memb
->devnm
)) {
7105 if (*devlist
&& (found
< dpa
)) {
7106 dv
= xcalloc(1, sizeof(*dv
));
7107 dv
->devname
= xmalloc(strlen(memb
->devnm
) + strlen("/dev/") + 1);
7108 sprintf(dv
->devname
, "%s%s", "/dev/", memb
->devnm
);
7111 dv
->next
= *devlist
;
7118 free_mdstat(mdstat
);
7123 static struct md_list
*
7124 get_loop_devices(void)
7127 struct md_list
*devlist
= NULL
;
7130 for(i
= 0; i
< 12; i
++) {
7131 dv
= xcalloc(1, sizeof(*dv
));
7132 dv
->devname
= xmalloc(40);
7133 sprintf(dv
->devname
, "/dev/loop%d", i
);
7141 static struct md_list
*
7142 get_devices(const char *hba_path
)
7144 struct md_list
*devlist
= NULL
;
7150 devlist
= get_loop_devices();
7153 /* scroll through /sys/dev/block looking for devices attached to
7156 dir
= opendir("/sys/dev/block");
7157 for (ent
= dir
? readdir(dir
) : NULL
; ent
; ent
= readdir(dir
)) {
7162 if (sscanf(ent
->d_name
, "%d:%d", &major
, &minor
) != 2)
7164 path
= devt_to_devpath(makedev(major
, minor
), 1, NULL
);
7167 if (!is_path_attached_to_hba(path
, hba_path
)) {
7174 fd
= dev_open(ent
->d_name
, O_RDONLY
);
7175 if (is_fd_valid(fd
)) {
7176 fd2devname(fd
, buf
);
7179 pr_err("cannot open device: %s\n",
7184 dv
= xcalloc(1, sizeof(*dv
));
7185 dv
->devname
= xstrdup(buf
);
7194 count_volumes_list(struct md_list
*devlist
, char *homehost
,
7195 int verbose
, int *found
)
7197 struct md_list
*tmpdev
;
7199 struct supertype
*st
;
7201 /* first walk the list of devices to find a consistent set
7202 * that match the criterea, if that is possible.
7203 * We flag the ones we like with 'used'.
7206 st
= match_metadata_desc_imsm("imsm");
7208 pr_vrb("cannot allocate memory for imsm supertype\n");
7212 for (tmpdev
= devlist
; tmpdev
; tmpdev
= tmpdev
->next
) {
7213 char *devname
= tmpdev
->devname
;
7215 struct supertype
*tst
;
7217 if (tmpdev
->used
> 1)
7219 tst
= dup_super(st
);
7221 pr_vrb("cannot allocate memory for imsm supertype\n");
7224 tmpdev
->container
= 0;
7225 dfd
= dev_open(devname
, O_RDONLY
|O_EXCL
);
7226 if (!is_fd_valid(dfd
)) {
7227 dprintf("cannot open device %s: %s\n",
7228 devname
, strerror(errno
));
7230 } else if (!fstat_is_blkdev(dfd
, devname
, &rdev
)) {
7232 } else if (must_be_container(dfd
)) {
7233 struct supertype
*cst
;
7234 cst
= super_by_fd(dfd
, NULL
);
7236 dprintf("cannot recognize container type %s\n",
7239 } else if (tst
->ss
!= st
->ss
) {
7240 dprintf("non-imsm container - ignore it: %s\n",
7243 } else if (!tst
->ss
->load_container
||
7244 tst
->ss
->load_container(tst
, dfd
, NULL
))
7247 tmpdev
->container
= 1;
7250 cst
->ss
->free_super(cst
);
7252 tmpdev
->st_rdev
= rdev
;
7253 if (tst
->ss
->load_super(tst
,dfd
, NULL
)) {
7254 dprintf("no RAID superblock on %s\n",
7257 } else if (tst
->ss
->compare_super
== NULL
) {
7258 dprintf("Cannot assemble %s metadata on %s\n",
7259 tst
->ss
->name
, devname
);
7265 if (tmpdev
->used
== 2 || tmpdev
->used
== 4) {
7266 /* Ignore unrecognised devices during auto-assembly */
7271 tst
->ss
->getinfo_super(tst
, &info
, NULL
);
7273 if (st
->minor_version
== -1)
7274 st
->minor_version
= tst
->minor_version
;
7276 if (memcmp(info
.uuid
, uuid_zero
,
7277 sizeof(int[4])) == 0) {
7278 /* this is a floating spare. It cannot define
7279 * an array unless there are no more arrays of
7280 * this type to be found. It can be included
7281 * in an array of this type though.
7287 if (st
->ss
!= tst
->ss
||
7288 st
->minor_version
!= tst
->minor_version
||
7289 st
->ss
->compare_super(st
, tst
, 1) != 0) {
7290 /* Some mismatch. If exactly one array matches this host,
7291 * we can resolve on that one.
7292 * Or, if we are auto assembling, we just ignore the second
7295 dprintf("superblock on %s doesn't match others - assembly aborted\n",
7301 dprintf("found: devname: %s\n", devname
);
7305 tst
->ss
->free_super(tst
);
7309 if ((err
= load_super_imsm_all(st
, -1, &st
->sb
, NULL
, devlist
, 0)) == 0) {
7310 struct mdinfo
*iter
, *head
= st
->ss
->container_content(st
, NULL
);
7311 for (iter
= head
; iter
; iter
= iter
->next
) {
7312 dprintf("content->text_version: %s vol\n",
7313 iter
->text_version
);
7314 if (iter
->array
.state
& (1<<MD_SB_BLOCK_VOLUME
)) {
7315 /* do not assemble arrays with unsupported
7317 dprintf("Cannot activate member %s.\n",
7318 iter
->text_version
);
7325 dprintf("No valid super block on device list: err: %d %p\n",
7329 dprintf("no more devices to examine\n");
7332 for (tmpdev
= devlist
; tmpdev
; tmpdev
= tmpdev
->next
) {
7333 if (tmpdev
->used
== 1 && tmpdev
->found
) {
7335 if (count
< tmpdev
->found
)
7338 count
-= tmpdev
->found
;
7341 if (tmpdev
->used
== 1)
7346 st
->ss
->free_super(st
);
7350 static int __count_volumes(char *hba_path
, int dpa
, int verbose
,
7353 struct sys_dev
*idev
, *intel_devices
= find_intel_devices();
7355 const struct orom_entry
*entry
;
7356 struct devid_list
*dv
, *devid_list
;
7361 for (idev
= intel_devices
; idev
; idev
= idev
->next
) {
7362 if (strstr(idev
->path
, hba_path
))
7366 if (!idev
|| !idev
->dev_id
)
7369 entry
= get_orom_entry_by_device_id(idev
->dev_id
);
7371 if (!entry
|| !entry
->devid_list
)
7374 devid_list
= entry
->devid_list
;
7375 for (dv
= devid_list
; dv
; dv
= dv
->next
) {
7376 struct md_list
*devlist
;
7377 struct sys_dev
*device
= NULL
;
7382 device
= device_by_id_and_path(dv
->devid
, hba_path
);
7384 device
= device_by_id(dv
->devid
);
7387 hpath
= device
->path
;
7391 devlist
= get_devices(hpath
);
7392 /* if no intel devices return zero volumes */
7393 if (devlist
== NULL
)
7396 count
+= active_arrays_by_format("imsm", hpath
, &devlist
, dpa
,
7398 dprintf("path: %s active arrays: %d\n", hpath
, count
);
7399 if (devlist
== NULL
)
7403 count
+= count_volumes_list(devlist
,
7407 dprintf("found %d count: %d\n", found
, count
);
7410 dprintf("path: %s total number of volumes: %d\n", hpath
, count
);
7413 struct md_list
*dv
= devlist
;
7414 devlist
= devlist
->next
;
7422 static int count_volumes(struct intel_hba
*hba
, int dpa
, int verbose
)
7426 if (hba
->type
== SYS_DEV_VMD
) {
7427 struct sys_dev
*dev
;
7430 for (dev
= find_intel_devices(); dev
; dev
= dev
->next
) {
7431 if (dev
->type
== SYS_DEV_VMD
)
7432 count
+= __count_volumes(dev
->path
, dpa
,
7437 return __count_volumes(hba
->path
, dpa
, verbose
, 0);
7440 static int imsm_default_chunk(const struct imsm_orom
*orom
)
7442 /* up to 512 if the plaform supports it, otherwise the platform max.
7443 * 128 if no platform detected
7445 int fs
= max(7, orom
? fls(orom
->sss
) : 0);
7447 return min(512, (1 << fs
));
7451 validate_geometry_imsm_orom(struct intel_super
*super
, int level
, int layout
,
7452 int raiddisks
, int *chunk
, unsigned long long size
, int verbose
)
7454 /* check/set platform and metadata limits/defaults */
7455 if (super
->orom
&& raiddisks
> super
->orom
->dpa
) {
7456 pr_vrb("platform supports a maximum of %d disks per array\n",
7461 /* capabilities of OROM tested - copied from validate_geometry_imsm_volume */
7462 if (!is_raid_level_supported(super
->orom
, level
, raiddisks
)) {
7463 pr_vrb("platform does not support raid%d with %d disk%s\n",
7464 level
, raiddisks
, raiddisks
> 1 ? "s" : "");
7468 if (*chunk
== 0 || *chunk
== UnSet
)
7469 *chunk
= imsm_default_chunk(super
->orom
);
7471 if (super
->orom
&& !imsm_orom_has_chunk(super
->orom
, *chunk
)) {
7472 pr_vrb("platform does not support a chunk size of: %d\n", *chunk
);
7476 if (layout
!= imsm_level_to_layout(level
)) {
7478 pr_vrb("imsm raid 5 only supports the left-asymmetric layout\n");
7479 else if (level
== 10)
7480 pr_vrb("imsm raid 10 only supports the n2 layout\n");
7482 pr_vrb("imsm unknown layout %#x for this raid level %d\n",
7487 if (super
->orom
&& (super
->orom
->attr
& IMSM_OROM_ATTR_2TB
) == 0 &&
7488 (calc_array_size(level
, raiddisks
, layout
, *chunk
, size
) >> 32) > 0) {
7489 pr_vrb("platform does not support a volume size over 2TB\n");
7496 /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
7497 * FIX ME add ahci details
7499 static int validate_geometry_imsm_volume(struct supertype
*st
, int level
,
7500 int layout
, int raiddisks
, int *chunk
,
7501 unsigned long long size
,
7502 unsigned long long data_offset
,
7504 unsigned long long *freesize
,
7508 struct intel_super
*super
= st
->sb
;
7509 struct imsm_super
*mpb
;
7511 unsigned long long pos
= 0;
7512 unsigned long long maxsize
;
7516 /* We must have the container info already read in. */
7520 mpb
= super
->anchor
;
7522 if (!validate_geometry_imsm_orom(super
, level
, layout
, raiddisks
, chunk
, size
, verbose
)) {
7523 pr_err("RAID geometry validation failed. Cannot proceed with the action(s).\n");
7527 /* General test: make sure there is space for
7528 * 'raiddisks' device extents of size 'size' at a given
7531 unsigned long long minsize
= size
;
7532 unsigned long long start_offset
= MaxSector
;
7535 minsize
= MPB_SECTOR_CNT
+ IMSM_RESERVED_SECTORS
;
7536 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
7541 e
= get_extents(super
, dl
, 0);
7544 unsigned long long esize
;
7545 esize
= e
[i
].start
- pos
;
7546 if (esize
>= minsize
)
7548 if (found
&& start_offset
== MaxSector
) {
7551 } else if (found
&& pos
!= start_offset
) {
7555 pos
= e
[i
].start
+ e
[i
].size
;
7557 } while (e
[i
-1].size
);
7562 if (dcnt
< raiddisks
) {
7564 pr_err("imsm: Not enough devices with space for this array (%d < %d)\n",
7571 /* This device must be a member of the set */
7572 if (!stat_is_blkdev(dev
, &rdev
))
7574 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
7575 if (dl
->major
== (int)major(rdev
) &&
7576 dl
->minor
== (int)minor(rdev
))
7581 pr_err("%s is not in the same imsm set\n", dev
);
7583 } else if (super
->orom
&& dl
->index
< 0 && mpb
->num_raid_devs
) {
7584 /* If a volume is present then the current creation attempt
7585 * cannot incorporate new spares because the orom may not
7586 * understand this configuration (all member disks must be
7587 * members of each array in the container).
7589 pr_err("%s is a spare and a volume is already defined for this container\n", dev
);
7590 pr_err("The option-rom requires all member disks to be a member of all volumes\n");
7592 } else if (super
->orom
&& mpb
->num_raid_devs
> 0 &&
7593 mpb
->num_disks
!= raiddisks
) {
7594 pr_err("The option-rom requires all member disks to be a member of all volumes\n");
7598 /* retrieve the largest free space block */
7599 e
= get_extents(super
, dl
, 0);
7604 unsigned long long esize
;
7606 esize
= e
[i
].start
- pos
;
7607 if (esize
>= maxsize
)
7609 pos
= e
[i
].start
+ e
[i
].size
;
7611 } while (e
[i
-1].size
);
7616 pr_err("unable to determine free space for: %s\n",
7620 if (maxsize
< size
) {
7622 pr_err("%s not enough space (%llu < %llu)\n",
7623 dev
, maxsize
, size
);
7627 maxsize
= merge_extents(super
, false);
7629 if (mpb
->num_raid_devs
> 0 && size
&& size
!= maxsize
)
7630 pr_err("attempting to create a second volume with size less then remaining space.\n");
7632 if (maxsize
< size
|| maxsize
== 0) {
7635 pr_err("no free space left on device. Aborting...\n");
7637 pr_err("not enough space to create volume of given size (%llu < %llu). Aborting...\n",
7643 *freesize
= maxsize
;
7646 int count
= count_volumes(super
->hba
,
7647 super
->orom
->dpa
, verbose
);
7648 if (super
->orom
->vphba
<= count
) {
7649 pr_vrb("platform does not support more than %d raid volumes.\n",
7650 super
->orom
->vphba
);
7658 * imsm_get_free_size() - get the biggest, common free space from members.
7659 * @super: &intel_super pointer, not NULL.
7660 * @raiddisks: number of raid disks.
7661 * @size: requested size, could be 0 (means max size).
7662 * @chunk: requested chunk size in KiB.
7663 * @freesize: pointer for returned size value.
7665 * Return: &IMSM_STATUS_OK or &IMSM_STATUS_ERROR.
7667 * @freesize is set to meaningful value, this can be @size, or calculated
7669 * super->create_offset value is modified and set appropriately in
7670 * merge_extends() for further creation.
7672 static imsm_status_t
imsm_get_free_size(struct intel_super
*super
,
7673 const int raiddisks
,
7674 unsigned long long size
,
7676 unsigned long long *freesize
,
7679 struct imsm_super
*mpb
= super
->anchor
;
7685 unsigned long long maxsize
;
7686 unsigned long long minsize
= size
;
7689 minsize
= chunk
* 2;
7691 /* find the largest common start free region of the possible disks */
7692 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
7698 /* don't activate new spares if we are orom constrained
7699 * and there is already a volume active in the container
7701 if (super
->orom
&& dl
->index
< 0 && mpb
->num_raid_devs
)
7704 e
= get_extents(super
, dl
, 0);
7707 for (i
= 1; e
[i
-1].size
; i
++)
7714 maxsize
= merge_extents(super
, expanding
);
7715 if (maxsize
< minsize
) {
7716 pr_err("imsm: Free space is %llu but must be equal or larger than %llu.\n",
7718 return IMSM_STATUS_ERROR
;
7721 if (cnt
< raiddisks
|| (super
->orom
&& used
&& used
!= raiddisks
)) {
7722 pr_err("imsm: Not enough devices with space to create array.\n");
7723 return IMSM_STATUS_ERROR
;
7734 if (mpb
->num_raid_devs
> 0 && size
&& size
!= maxsize
)
7735 pr_err("attempting to create a second volume with size less then remaining space.\n");
7738 dprintf("imsm: imsm_get_free_size() returns : %llu\n", size
);
7740 return IMSM_STATUS_OK
;
7744 * autolayout_imsm() - automatically layout a new volume.
7745 * @super: &intel_super pointer, not NULL.
7746 * @raiddisks: number of raid disks.
7747 * @size: requested size, could be 0 (means max size).
7748 * @chunk: requested chunk.
7749 * @freesize: pointer for returned size value.
7751 * We are being asked to automatically layout a new volume based on the current
7752 * contents of the container. If the parameters can be satisfied autolayout_imsm
7753 * will record the disks, start offset, and will return size of the volume to
7754 * be created. See imsm_get_free_size() for details.
7755 * add_to_super() and getinfo_super() detect when autolayout is in progress.
7756 * If first volume exists, slots are set consistently to it.
7758 * Return: &IMSM_STATUS_OK on success, &IMSM_STATUS_ERROR otherwise.
7760 * Disks are marked for creation via dl->raiddisk.
7762 static imsm_status_t
autolayout_imsm(struct intel_super
*super
,
7763 const int raiddisks
,
7764 unsigned long long size
, const int chunk
,
7765 unsigned long long *freesize
)
7769 int vol_cnt
= super
->anchor
->num_raid_devs
;
7772 rv
= imsm_get_free_size(super
, raiddisks
, size
, chunk
, freesize
, false);
7773 if (rv
!= IMSM_STATUS_OK
)
7774 return IMSM_STATUS_ERROR
;
7776 for (disk
= super
->disks
; disk
; disk
= disk
->next
) {
7780 if (curr_slot
== raiddisks
)
7784 disk
->raiddisk
= curr_slot
;
7786 int _slot
= get_disk_slot_in_dev(super
, 0, disk
->index
);
7789 pr_err("Disk %s is not used in first volume, aborting\n",
7791 return IMSM_STATUS_ERROR
;
7793 disk
->raiddisk
= _slot
;
7798 return IMSM_STATUS_OK
;
7801 static int validate_geometry_imsm(struct supertype
*st
, int level
, int layout
,
7802 int raiddisks
, int *chunk
, unsigned long long size
,
7803 unsigned long long data_offset
,
7804 char *dev
, unsigned long long *freesize
,
7805 int consistency_policy
, int verbose
)
7807 struct intel_super
*super
= st
->sb
;
7814 * if given unused devices create a container
7815 * if given given devices in a container create a member volume
7817 if (is_container(level
))
7818 /* Must be a fresh device to add to a container */
7819 return validate_geometry_imsm_container(st
, level
, raiddisks
,
7824 * Size is given in sectors.
7826 if (size
&& (size
< 2048)) {
7827 pr_err("Given size must be greater than 1M.\n");
7828 /* Depends on algorithm in Create.c :
7829 * if container was given (dev == NULL) return -1,
7830 * if block device was given ( dev != NULL) return 0.
7832 return dev
? -1 : 0;
7837 * Autolayout mode, st->sb must be set.
7841 pr_vrb("superblock must be set for autolayout, aborting\n");
7845 if (!validate_geometry_imsm_orom(st
->sb
, level
, layout
,
7846 raiddisks
, chunk
, size
,
7851 int count
= count_volumes(super
->hba
, super
->orom
->dpa
, verbose
);
7853 if (super
->orom
->vphba
<= count
) {
7854 pr_vrb("platform does not support more than %d raid volumes.\n",
7855 super
->orom
->vphba
);
7861 rv
= autolayout_imsm(super
, raiddisks
, size
, *chunk
, freesize
);
7862 if (rv
!= IMSM_STATUS_OK
)
7869 /* creating in a given container */
7870 return validate_geometry_imsm_volume(st
, level
, layout
,
7871 raiddisks
, chunk
, size
,
7873 dev
, freesize
, verbose
);
7876 /* This device needs to be a device in an 'imsm' container */
7877 fd
= open(dev
, O_RDONLY
|O_EXCL
, 0);
7879 if (is_fd_valid(fd
)) {
7880 pr_vrb("Cannot create this array on device %s\n", dev
);
7885 fd
= open(dev
, O_RDONLY
, 0);
7887 if (!is_fd_valid(fd
)) {
7888 pr_vrb("Cannot open %s: %s\n", dev
, strerror(errno
));
7892 /* Well, it is in use by someone, maybe an 'imsm' container. */
7893 cfd
= open_container(fd
);
7896 if (!is_fd_valid(cfd
)) {
7897 pr_vrb("Cannot use %s: It is busy\n", dev
);
7900 sra
= sysfs_read(cfd
, NULL
, GET_VERSION
);
7901 if (sra
&& sra
->array
.major_version
== -1 &&
7902 strcmp(sra
->text_version
, "imsm") == 0)
7906 /* This is a member of a imsm container. Load the container
7907 * and try to create a volume
7909 struct intel_super
*super
;
7911 if (load_super_imsm_all(st
, cfd
, (void **) &super
, NULL
, NULL
, 1) == 0) {
7913 strcpy(st
->container_devnm
, fd2devnm(cfd
));
7915 return validate_geometry_imsm_volume(st
, level
, layout
,
7917 size
, data_offset
, dev
,
7924 pr_err("failed container membership check\n");
7930 static void default_geometry_imsm(struct supertype
*st
, int *level
, int *layout
, int *chunk
)
7932 struct intel_super
*super
= st
->sb
;
7934 if (level
&& *level
== UnSet
)
7935 *level
= LEVEL_CONTAINER
;
7937 if (level
&& layout
&& *layout
== UnSet
)
7938 *layout
= imsm_level_to_layout(*level
);
7940 if (chunk
&& (*chunk
== UnSet
|| *chunk
== 0))
7941 *chunk
= imsm_default_chunk(super
->orom
);
7944 static void handle_missing(struct intel_super
*super
, struct imsm_dev
*dev
);
7946 static int kill_subarray_imsm(struct supertype
*st
, char *subarray_id
)
7948 /* remove the subarray currently referenced by subarray_id */
7950 struct intel_dev
**dp
;
7951 struct intel_super
*super
= st
->sb
;
7952 __u8 current_vol
= strtoul(subarray_id
, NULL
, 10);
7953 struct imsm_super
*mpb
= super
->anchor
;
7955 if (mpb
->num_raid_devs
== 0)
7958 /* block deletions that would change the uuid of active subarrays
7960 * FIXME when immutable ids are available, but note that we'll
7961 * also need to fixup the invalidated/active subarray indexes in
7964 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
7967 if (i
< current_vol
)
7969 snprintf(subarray
, sizeof(subarray
), "%u", i
);
7970 if (is_subarray_active(subarray
, st
->devnm
)) {
7971 pr_err("deleting subarray-%d would change the UUID of active subarray-%d, aborting\n",
7978 if (st
->update_tail
) {
7979 struct imsm_update_kill_array
*u
= xmalloc(sizeof(*u
));
7981 u
->type
= update_kill_array
;
7982 u
->dev_idx
= current_vol
;
7983 append_metadata_update(st
, u
, sizeof(*u
));
7988 for (dp
= &super
->devlist
; *dp
;)
7989 if ((*dp
)->index
== current_vol
) {
7992 handle_missing(super
, (*dp
)->dev
);
7993 if ((*dp
)->index
> current_vol
)
7998 /* no more raid devices, all active components are now spares,
7999 * but of course failed are still failed
8001 if (--mpb
->num_raid_devs
== 0) {
8004 for (d
= super
->disks
; d
; d
= d
->next
)
8009 super
->updates_pending
++;
8015 * get_rwh_policy_from_update() - Get the rwh policy for update option.
8016 * @update: Update option.
8018 static int get_rwh_policy_from_update(enum update_opt update
)
8022 return RWH_MULTIPLE_DISTRIBUTED
;
8024 return RWH_MULTIPLE_OFF
;
8027 case UOPT_NO_BITMAP
:
8032 return UOPT_UNDEFINED
;
8035 static int update_subarray_imsm(struct supertype
*st
, char *subarray
,
8036 enum update_opt update
, struct mddev_ident
*ident
)
8038 /* update the subarray currently referenced by ->current_vol */
8039 struct intel_super
*super
= st
->sb
;
8040 struct imsm_super
*mpb
= super
->anchor
;
8042 if (update
== UOPT_NAME
) {
8043 char *name
= ident
->name
;
8047 if (imsm_is_name_allowed(super
, name
, 1) == false)
8050 vol
= strtoul(subarray
, &ep
, 10);
8051 if (*ep
!= '\0' || vol
>= super
->anchor
->num_raid_devs
)
8054 if (st
->update_tail
) {
8055 struct imsm_update_rename_array
*u
= xmalloc(sizeof(*u
));
8057 u
->type
= update_rename_array
;
8059 strncpy((char *) u
->name
, name
, MAX_RAID_SERIAL_LEN
);
8060 u
->name
[MAX_RAID_SERIAL_LEN
-1] = '\0';
8061 append_metadata_update(st
, u
, sizeof(*u
));
8063 struct imsm_dev
*dev
;
8066 dev
= get_imsm_dev(super
, vol
);
8067 memset(dev
->volume
, '\0', MAX_RAID_SERIAL_LEN
);
8068 namelen
= min((int)strlen(name
), MAX_RAID_SERIAL_LEN
);
8069 memcpy(dev
->volume
, name
, namelen
);
8070 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
8071 dev
= get_imsm_dev(super
, i
);
8072 handle_missing(super
, dev
);
8074 super
->updates_pending
++;
8076 } else if (get_rwh_policy_from_update(update
) != UOPT_UNDEFINED
) {
8079 int vol
= strtoul(subarray
, &ep
, 10);
8081 if (*ep
!= '\0' || vol
>= super
->anchor
->num_raid_devs
)
8084 new_policy
= get_rwh_policy_from_update(update
);
8086 if (st
->update_tail
) {
8087 struct imsm_update_rwh_policy
*u
= xmalloc(sizeof(*u
));
8089 u
->type
= update_rwh_policy
;
8091 u
->new_policy
= new_policy
;
8092 append_metadata_update(st
, u
, sizeof(*u
));
8094 struct imsm_dev
*dev
;
8096 dev
= get_imsm_dev(super
, vol
);
8097 dev
->rwh_policy
= new_policy
;
8098 super
->updates_pending
++;
8100 if (new_policy
== RWH_BITMAP
)
8101 return write_init_bitmap_imsm_vol(st
, vol
);
8108 static bool is_gen_migration(struct imsm_dev
*dev
)
8110 if (dev
&& dev
->vol
.migr_state
&&
8111 migr_type(dev
) == MIGR_GEN_MIGR
)
8117 static int is_rebuilding(struct imsm_dev
*dev
)
8119 struct imsm_map
*migr_map
;
8121 if (!dev
->vol
.migr_state
)
8124 if (migr_type(dev
) != MIGR_REBUILD
)
8127 migr_map
= get_imsm_map(dev
, MAP_1
);
8129 if (migr_map
->map_state
== IMSM_T_STATE_DEGRADED
)
8135 static int is_initializing(struct imsm_dev
*dev
)
8137 struct imsm_map
*migr_map
;
8139 if (!dev
->vol
.migr_state
)
8142 if (migr_type(dev
) != MIGR_INIT
)
8145 migr_map
= get_imsm_map(dev
, MAP_1
);
8147 if (migr_map
->map_state
== IMSM_T_STATE_UNINITIALIZED
)
8153 static void update_recovery_start(struct intel_super
*super
,
8154 struct imsm_dev
*dev
,
8155 struct mdinfo
*array
)
8157 struct mdinfo
*rebuild
= NULL
;
8161 if (!is_rebuilding(dev
))
8164 /* Find the rebuild target, but punt on the dual rebuild case */
8165 for (d
= array
->devs
; d
; d
= d
->next
)
8166 if (d
->recovery_start
== 0) {
8173 /* (?) none of the disks are marked with
8174 * IMSM_ORD_REBUILD, so assume they are missing and the
8175 * disk_ord_tbl was not correctly updated
8177 dprintf("failed to locate out-of-sync disk\n");
8181 units
= vol_curr_migr_unit(dev
);
8182 rebuild
->recovery_start
= units
* blocks_per_migr_unit(super
, dev
);
8185 static int recover_backup_imsm(struct supertype
*st
, struct mdinfo
*info
);
8187 static struct mdinfo
*container_content_imsm(struct supertype
*st
, char *subarray
)
8189 /* Given a container loaded by load_super_imsm_all,
8190 * extract information about all the arrays into
8192 * If 'subarray' is given, just extract info about that array.
8194 * For each imsm_dev create an mdinfo, fill it in,
8195 * then look for matching devices in super->disks
8196 * and create appropriate device mdinfo.
8198 struct intel_super
*super
= st
->sb
;
8199 struct imsm_super
*mpb
= super
->anchor
;
8200 struct mdinfo
*rest
= NULL
;
8204 int spare_disks
= 0;
8205 int current_vol
= super
->current_vol
;
8207 /* do not assemble arrays when not all attributes are supported */
8208 if (imsm_check_attributes(mpb
->attributes
) == false) {
8210 pr_err("Unsupported attributes in IMSM metadata. Arrays activation is blocked.\n");
8213 /* count spare devices, not used in maps
8215 for (d
= super
->disks
; d
; d
= d
->next
)
8219 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
8220 struct imsm_dev
*dev
;
8221 struct imsm_map
*map
;
8222 struct imsm_map
*map2
;
8223 struct mdinfo
*this;
8230 (i
!= strtoul(subarray
, &ep
, 10) || *ep
!= '\0'))
8233 dev
= get_imsm_dev(super
, i
);
8234 map
= get_imsm_map(dev
, MAP_0
);
8235 map2
= get_imsm_map(dev
, MAP_1
);
8236 level
= get_imsm_raid_level(map
);
8238 /* do not publish arrays that are in the middle of an
8239 * unsupported migration
8241 if (dev
->vol
.migr_state
&&
8242 (migr_type(dev
) == MIGR_STATE_CHANGE
)) {
8243 pr_err("cannot assemble volume '%.16s': unsupported migration in progress\n",
8247 /* do not publish arrays that are not support by controller's
8251 this = xmalloc(sizeof(*this));
8253 super
->current_vol
= i
;
8254 getinfo_super_imsm_volume(st
, this, NULL
);
8256 chunk
= __le16_to_cpu(map
->blocks_per_strip
) >> 1;
8257 /* mdadm does not support all metadata features- set the bit in all arrays state */
8258 if (!validate_geometry_imsm_orom(super
,
8259 level
, /* RAID level */
8260 imsm_level_to_layout(level
),
8261 map
->num_members
, /* raid disks */
8262 &chunk
, imsm_dev_size(dev
),
8264 pr_err("IMSM RAID geometry validation failed. Array %s activation is blocked.\n",
8266 this->array
.state
|=
8267 (1<<MD_SB_BLOCK_CONTAINER_RESHAPE
) |
8268 (1<<MD_SB_BLOCK_VOLUME
);
8271 /* if array has bad blocks, set suitable bit in all arrays state */
8273 this->array
.state
|=
8274 (1<<MD_SB_BLOCK_CONTAINER_RESHAPE
) |
8275 (1<<MD_SB_BLOCK_VOLUME
);
8277 for (slot
= 0 ; slot
< map
->num_members
; slot
++) {
8278 unsigned long long recovery_start
;
8279 struct mdinfo
*info_d
;
8287 idx
= get_imsm_disk_idx(dev
, slot
, MAP_0
);
8288 ord
= get_imsm_ord_tbl_ent(dev
, slot
, MAP_X
);
8289 for (d
= super
->disks
; d
; d
= d
->next
)
8290 if (d
->index
== idx
)
8293 recovery_start
= MaxSector
;
8296 if (d
&& is_failed(&d
->disk
))
8298 if (!skip
&& (ord
& IMSM_ORD_REBUILD
))
8300 if (!(ord
& IMSM_ORD_REBUILD
))
8301 this->array
.working_disks
++;
8303 * if we skip some disks the array will be assmebled degraded;
8304 * reset resync start to avoid a dirty-degraded
8305 * situation when performing the intial sync
8310 if (!(dev
->vol
.dirty
& RAIDVOL_DIRTY
)) {
8311 if ((!able_to_resync(level
, missing
) ||
8312 recovery_start
== 0))
8313 this->resync_start
= MaxSector
;
8319 info_d
= xcalloc(1, sizeof(*info_d
));
8320 info_d
->next
= this->devs
;
8321 this->devs
= info_d
;
8323 info_d
->disk
.number
= d
->index
;
8324 info_d
->disk
.major
= d
->major
;
8325 info_d
->disk
.minor
= d
->minor
;
8326 info_d
->disk
.raid_disk
= slot
;
8327 info_d
->recovery_start
= recovery_start
;
8329 if (slot
< map2
->num_members
)
8330 info_d
->disk
.state
= (1 << MD_DISK_ACTIVE
);
8332 this->array
.spare_disks
++;
8334 if (slot
< map
->num_members
)
8335 info_d
->disk
.state
= (1 << MD_DISK_ACTIVE
);
8337 this->array
.spare_disks
++;
8340 info_d
->events
= __le32_to_cpu(mpb
->generation_num
);
8341 info_d
->data_offset
= pba_of_lba0(map
);
8342 info_d
->component_size
= calc_component_size(map
, dev
);
8344 if (map
->raid_level
== IMSM_T_RAID5
) {
8345 info_d
->ppl_sector
= this->ppl_sector
;
8346 info_d
->ppl_size
= this->ppl_size
;
8347 if (this->consistency_policy
== CONSISTENCY_POLICY_PPL
&&
8348 recovery_start
== 0)
8349 this->resync_start
= 0;
8352 info_d
->bb
.supported
= 1;
8353 get_volume_badblocks(super
->bbm_log
, ord_to_idx(ord
),
8354 info_d
->data_offset
,
8355 info_d
->component_size
,
8358 /* now that the disk list is up-to-date fixup recovery_start */
8359 update_recovery_start(super
, dev
, this);
8360 this->array
.spare_disks
+= spare_disks
;
8362 /* check for reshape */
8363 if (this->reshape_active
== 1)
8364 recover_backup_imsm(st
, this);
8368 super
->current_vol
= current_vol
;
8372 static __u8
imsm_check_degraded(struct intel_super
*super
, struct imsm_dev
*dev
,
8373 int failed
, int look_in_map
)
8375 struct imsm_map
*map
;
8377 map
= get_imsm_map(dev
, look_in_map
);
8380 return map
->map_state
== IMSM_T_STATE_UNINITIALIZED
?
8381 IMSM_T_STATE_UNINITIALIZED
: IMSM_T_STATE_NORMAL
;
8383 switch (get_imsm_raid_level(map
)) {
8385 return IMSM_T_STATE_FAILED
;
8388 if (failed
< map
->num_members
)
8389 return IMSM_T_STATE_DEGRADED
;
8391 return IMSM_T_STATE_FAILED
;
8396 * check to see if any mirrors have failed, otherwise we
8397 * are degraded. Even numbered slots are mirrored on
8401 /* gcc -Os complains that this is unused */
8402 int insync
= insync
;
8404 for (i
= 0; i
< map
->num_members
; i
++) {
8405 __u32 ord
= get_imsm_ord_tbl_ent(dev
, i
, MAP_X
);
8406 int idx
= ord_to_idx(ord
);
8407 struct imsm_disk
*disk
;
8409 /* reset the potential in-sync count on even-numbered
8410 * slots. num_copies is always 2 for imsm raid10
8415 disk
= get_imsm_disk(super
, idx
);
8416 if (!disk
|| is_failed(disk
) || ord
& IMSM_ORD_REBUILD
)
8419 /* no in-sync disks left in this mirror the
8423 return IMSM_T_STATE_FAILED
;
8426 return IMSM_T_STATE_DEGRADED
;
8430 return IMSM_T_STATE_DEGRADED
;
8432 return IMSM_T_STATE_FAILED
;
8438 return map
->map_state
;
8441 static int imsm_count_failed(struct intel_super
*super
, struct imsm_dev
*dev
,
8446 struct imsm_disk
*disk
;
8447 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
8448 struct imsm_map
*prev
= get_imsm_map(dev
, MAP_1
);
8449 struct imsm_map
*map_for_loop
;
8454 /* at the beginning of migration we set IMSM_ORD_REBUILD on
8455 * disks that are being rebuilt. New failures are recorded to
8456 * map[0]. So we look through all the disks we started with and
8457 * see if any failures are still present, or if any new ones
8461 if (prev
&& (map
->num_members
< prev
->num_members
))
8462 map_for_loop
= prev
;
8464 for (i
= 0; i
< map_for_loop
->num_members
; i
++) {
8466 /* when MAP_X is passed both maps failures are counted
8469 (look_in_map
== MAP_1
|| look_in_map
== MAP_X
) &&
8470 i
< prev
->num_members
) {
8471 ord
= __le32_to_cpu(prev
->disk_ord_tbl
[i
]);
8472 idx_1
= ord_to_idx(ord
);
8474 disk
= get_imsm_disk(super
, idx_1
);
8475 if (!disk
|| is_failed(disk
) || ord
& IMSM_ORD_REBUILD
)
8478 if ((look_in_map
== MAP_0
|| look_in_map
== MAP_X
) &&
8479 i
< map
->num_members
) {
8480 ord
= __le32_to_cpu(map
->disk_ord_tbl
[i
]);
8481 idx
= ord_to_idx(ord
);
8484 disk
= get_imsm_disk(super
, idx
);
8485 if (!disk
|| is_failed(disk
) ||
8486 ord
& IMSM_ORD_REBUILD
)
8495 static int imsm_prepare_update(struct supertype
*st
,
8496 struct metadata_update
*update
);
8497 static int imsm_open_new(struct supertype
*c
, struct active_array
*a
,
8500 struct intel_super
*super
= c
->sb
;
8501 struct imsm_super
*mpb
= super
->anchor
;
8502 struct imsm_update_prealloc_bb_mem
*u
;
8503 struct metadata_update mu
;
8505 if (inst
>= mpb
->num_raid_devs
) {
8506 pr_err("subarry index %d, out of range\n", inst
);
8510 dprintf("imsm: open_new %d\n", inst
);
8511 a
->info
.container_member
= inst
;
8513 u
= xmalloc(sizeof(*u
));
8514 u
->type
= update_prealloc_badblocks_mem
;
8515 mu
.len
= sizeof(*u
);
8517 imsm_prepare_update(c
, &mu
);
8519 append_metadata_update(c
, u
, sizeof(*u
));
8524 static int is_resyncing(struct imsm_dev
*dev
)
8526 struct imsm_map
*migr_map
;
8528 if (!dev
->vol
.migr_state
)
8531 if (migr_type(dev
) == MIGR_INIT
||
8532 migr_type(dev
) == MIGR_REPAIR
)
8535 if (migr_type(dev
) == MIGR_GEN_MIGR
)
8538 migr_map
= get_imsm_map(dev
, MAP_1
);
8540 if (migr_map
->map_state
== IMSM_T_STATE_NORMAL
&&
8541 dev
->vol
.migr_type
!= MIGR_GEN_MIGR
)
8547 /* return true if we recorded new information */
8548 static int mark_failure(struct intel_super
*super
,
8549 struct imsm_dev
*dev
, struct imsm_disk
*disk
, int idx
)
8553 struct imsm_map
*map
;
8554 char buf
[MAX_RAID_SERIAL_LEN
+3];
8555 unsigned int len
, shift
= 0;
8557 /* new failures are always set in map[0] */
8558 map
= get_imsm_map(dev
, MAP_0
);
8560 slot
= get_imsm_disk_slot(map
, idx
);
8564 ord
= __le32_to_cpu(map
->disk_ord_tbl
[slot
]);
8565 if (is_failed(disk
) && (ord
& IMSM_ORD_REBUILD
))
8568 memcpy(buf
, disk
->serial
, MAX_RAID_SERIAL_LEN
);
8569 buf
[MAX_RAID_SERIAL_LEN
] = '\000';
8571 if ((len
= strlen(buf
)) >= MAX_RAID_SERIAL_LEN
)
8572 shift
= len
- MAX_RAID_SERIAL_LEN
+ 1;
8573 memcpy(disk
->serial
, &buf
[shift
], len
+ 1 - shift
);
8575 disk
->status
|= FAILED_DISK
;
8576 set_imsm_ord_tbl_ent(map
, slot
, idx
| IMSM_ORD_REBUILD
);
8577 /* mark failures in second map if second map exists and this disk
8579 * This is valid for migration, initialization and rebuild
8581 if (dev
->vol
.migr_state
) {
8582 struct imsm_map
*map2
= get_imsm_map(dev
, MAP_1
);
8583 int slot2
= get_imsm_disk_slot(map2
, idx
);
8585 if (slot2
< map2
->num_members
&& slot2
>= 0)
8586 set_imsm_ord_tbl_ent(map2
, slot2
,
8587 idx
| IMSM_ORD_REBUILD
);
8589 if (map
->failed_disk_num
== 0xff ||
8590 (!is_rebuilding(dev
) && map
->failed_disk_num
> slot
))
8591 map
->failed_disk_num
= slot
;
8593 clear_disk_badblocks(super
->bbm_log
, ord_to_idx(ord
));
8598 static void mark_missing(struct intel_super
*super
,
8599 struct imsm_dev
*dev
, struct imsm_disk
*disk
, int idx
)
8601 mark_failure(super
, dev
, disk
, idx
);
8603 if (disk
->scsi_id
== __cpu_to_le32(~(__u32
)0))
8606 disk
->scsi_id
= __cpu_to_le32(~(__u32
)0);
8607 memmove(&disk
->serial
[0], &disk
->serial
[1], MAX_RAID_SERIAL_LEN
- 1);
8610 static void handle_missing(struct intel_super
*super
, struct imsm_dev
*dev
)
8614 if (!super
->missing
)
8617 /* When orom adds replacement for missing disk it does
8618 * not remove entry of missing disk, but just updates map with
8619 * new added disk. So it is not enough just to test if there is
8620 * any missing disk, we have to look if there are any failed disks
8621 * in map to stop migration */
8623 dprintf("imsm: mark missing\n");
8624 /* end process for initialization and rebuild only
8626 if (is_gen_migration(dev
) == false) {
8627 int failed
= imsm_count_failed(super
, dev
, MAP_0
);
8631 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
8632 struct imsm_map
*map1
;
8633 int i
, ord
, ord_map1
;
8636 for (i
= 0; i
< map
->num_members
; i
++) {
8637 ord
= get_imsm_ord_tbl_ent(dev
, i
, MAP_0
);
8638 if (!(ord
& IMSM_ORD_REBUILD
))
8641 map1
= get_imsm_map(dev
, MAP_1
);
8645 ord_map1
= __le32_to_cpu(map1
->disk_ord_tbl
[i
]);
8646 if (ord_map1
& IMSM_ORD_REBUILD
)
8651 map_state
= imsm_check_degraded(super
, dev
,
8653 end_migration(dev
, super
, map_state
);
8657 for (dl
= super
->missing
; dl
; dl
= dl
->next
)
8658 mark_missing(super
, dev
, &dl
->disk
, dl
->index
);
8659 super
->updates_pending
++;
8662 static unsigned long long imsm_set_array_size(struct imsm_dev
*dev
,
8665 unsigned long long array_blocks
;
8666 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
8667 int used_disks
= imsm_num_data_members(map
);
8669 if (used_disks
== 0) {
8670 /* when problems occures
8671 * return current array_blocks value
8673 array_blocks
= imsm_dev_size(dev
);
8675 return array_blocks
;
8678 /* set array size in metadata
8681 /* OLCE size change is caused by added disks
8683 array_blocks
= per_dev_array_size(map
) * used_disks
;
8685 /* Online Volume Size Change
8686 * Using available free space
8688 array_blocks
= new_size
;
8690 array_blocks
= round_size_to_mb(array_blocks
, used_disks
);
8691 set_imsm_dev_size(dev
, array_blocks
);
8693 return array_blocks
;
8696 static void imsm_set_disk(struct active_array
*a
, int n
, int state
);
8698 static void imsm_progress_container_reshape(struct intel_super
*super
)
8700 /* if no device has a migr_state, but some device has a
8701 * different number of members than the previous device, start
8702 * changing the number of devices in this device to match
8705 struct imsm_super
*mpb
= super
->anchor
;
8706 int prev_disks
= -1;
8710 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
8711 struct imsm_dev
*dev
= get_imsm_dev(super
, i
);
8712 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
8713 struct imsm_map
*map2
;
8714 int prev_num_members
;
8716 if (dev
->vol
.migr_state
)
8719 if (prev_disks
== -1)
8720 prev_disks
= map
->num_members
;
8721 if (prev_disks
== map
->num_members
)
8724 /* OK, this array needs to enter reshape mode.
8725 * i.e it needs a migr_state
8728 copy_map_size
= sizeof_imsm_map(map
);
8729 prev_num_members
= map
->num_members
;
8730 map
->num_members
= prev_disks
;
8731 dev
->vol
.migr_state
= MIGR_STATE_MIGRATING
;
8732 set_vol_curr_migr_unit(dev
, 0);
8733 set_migr_type(dev
, MIGR_GEN_MIGR
);
8734 for (i
= prev_num_members
;
8735 i
< map
->num_members
; i
++)
8736 set_imsm_ord_tbl_ent(map
, i
, i
);
8737 map2
= get_imsm_map(dev
, MAP_1
);
8738 /* Copy the current map */
8739 memcpy(map2
, map
, copy_map_size
);
8740 map2
->num_members
= prev_num_members
;
8742 imsm_set_array_size(dev
, -1);
8743 super
->clean_migration_record_by_mdmon
= 1;
8744 super
->updates_pending
++;
8748 /* Handle dirty -> clean transititions, resync and reshape. Degraded and rebuild
8749 * states are handled in imsm_set_disk() with one exception, when a
8750 * resync is stopped due to a new failure this routine will set the
8751 * 'degraded' state for the array.
8753 static int imsm_set_array_state(struct active_array
*a
, int consistent
)
8755 int inst
= a
->info
.container_member
;
8756 struct intel_super
*super
= a
->container
->sb
;
8757 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
8758 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
8759 int failed
= imsm_count_failed(super
, dev
, MAP_0
);
8760 __u8 map_state
= imsm_check_degraded(super
, dev
, failed
, MAP_0
);
8761 __u32 blocks_per_unit
;
8763 if (dev
->vol
.migr_state
&&
8764 dev
->vol
.migr_type
== MIGR_GEN_MIGR
) {
8765 /* array state change is blocked due to reshape action
8767 * - abort the reshape (if last_checkpoint is 0 and action!= reshape)
8768 * - finish the reshape (if last_checkpoint is big and action != reshape)
8769 * - update vol_curr_migr_unit
8771 if (a
->curr_action
== reshape
) {
8772 /* still reshaping, maybe update vol_curr_migr_unit */
8773 goto mark_checkpoint
;
8775 if (a
->last_checkpoint
>= a
->info
.component_size
) {
8776 unsigned long long array_blocks
;
8780 used_disks
= imsm_num_data_members(map
);
8781 if (used_disks
> 0) {
8783 per_dev_array_size(map
) *
8786 round_size_to_mb(array_blocks
,
8788 a
->info
.custom_array_size
= array_blocks
;
8789 /* encourage manager to update array
8793 a
->check_reshape
= 1;
8795 /* finalize online capacity expansion/reshape */
8796 for (mdi
= a
->info
.devs
; mdi
; mdi
= mdi
->next
)
8798 mdi
->disk
.raid_disk
,
8801 imsm_progress_container_reshape(super
);
8806 /* before we activate this array handle any missing disks */
8807 if (consistent
== 2)
8808 handle_missing(super
, dev
);
8810 if (consistent
== 2 &&
8811 (!is_resync_complete(&a
->info
) ||
8812 map_state
!= IMSM_T_STATE_NORMAL
||
8813 dev
->vol
.migr_state
))
8816 if (is_resync_complete(&a
->info
)) {
8817 /* complete intialization / resync,
8818 * recovery and interrupted recovery is completed in
8821 if (is_resyncing(dev
)) {
8822 dprintf("imsm: mark resync done\n");
8823 end_migration(dev
, super
, map_state
);
8824 super
->updates_pending
++;
8825 a
->last_checkpoint
= 0;
8827 } else if ((!is_resyncing(dev
) && !failed
) &&
8828 (imsm_reshape_blocks_arrays_changes(super
) == 0)) {
8829 /* mark the start of the init process if nothing is failed */
8830 dprintf("imsm: mark resync start\n");
8831 if (map
->map_state
== IMSM_T_STATE_UNINITIALIZED
)
8832 migrate(dev
, super
, IMSM_T_STATE_NORMAL
, MIGR_INIT
);
8834 migrate(dev
, super
, IMSM_T_STATE_NORMAL
, MIGR_REPAIR
);
8835 super
->updates_pending
++;
8838 if (a
->prev_action
== idle
)
8839 goto skip_mark_checkpoint
;
8842 /* skip checkpointing for general migration,
8843 * it is controlled in mdadm
8845 if (is_gen_migration(dev
))
8846 goto skip_mark_checkpoint
;
8848 /* check if we can update vol_curr_migr_unit from resync_start,
8851 blocks_per_unit
= blocks_per_migr_unit(super
, dev
);
8852 if (blocks_per_unit
) {
8853 set_vol_curr_migr_unit(dev
,
8854 a
->last_checkpoint
/ blocks_per_unit
);
8855 dprintf("imsm: mark checkpoint (%llu)\n",
8856 vol_curr_migr_unit(dev
));
8857 super
->updates_pending
++;
8860 skip_mark_checkpoint
:
8861 /* mark dirty / clean */
8862 if (((dev
->vol
.dirty
& RAIDVOL_DIRTY
) && consistent
) ||
8863 (!(dev
->vol
.dirty
& RAIDVOL_DIRTY
) && !consistent
)) {
8864 dprintf("imsm: mark '%s'\n", consistent
? "clean" : "dirty");
8866 dev
->vol
.dirty
= RAIDVOL_CLEAN
;
8868 dev
->vol
.dirty
= RAIDVOL_DIRTY
;
8869 if (dev
->rwh_policy
== RWH_DISTRIBUTED
||
8870 dev
->rwh_policy
== RWH_MULTIPLE_DISTRIBUTED
)
8871 dev
->vol
.dirty
|= RAIDVOL_DSRECORD_VALID
;
8873 super
->updates_pending
++;
8879 static int imsm_disk_slot_to_ord(struct active_array
*a
, int slot
)
8881 int inst
= a
->info
.container_member
;
8882 struct intel_super
*super
= a
->container
->sb
;
8883 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
8884 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
8886 if (slot
> map
->num_members
) {
8887 pr_err("imsm: imsm_disk_slot_to_ord %d out of range 0..%d\n",
8888 slot
, map
->num_members
- 1);
8895 return get_imsm_ord_tbl_ent(dev
, slot
, MAP_0
);
8898 static void imsm_set_disk(struct active_array
*a
, int n
, int state
)
8900 int inst
= a
->info
.container_member
;
8901 struct intel_super
*super
= a
->container
->sb
;
8902 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
8903 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
8904 struct imsm_disk
*disk
;
8906 int recovery_not_finished
= 0;
8910 int rebuild_done
= 0;
8913 ord
= get_imsm_ord_tbl_ent(dev
, n
, MAP_X
);
8917 dprintf("imsm: set_disk %d:%x\n", n
, state
);
8918 disk
= get_imsm_disk(super
, ord_to_idx(ord
));
8920 /* check for new failures */
8921 if (disk
&& (state
& DS_FAULTY
)) {
8922 if (mark_failure(super
, dev
, disk
, ord_to_idx(ord
)))
8923 super
->updates_pending
++;
8926 /* check if in_sync */
8927 if (state
& DS_INSYNC
&& ord
& IMSM_ORD_REBUILD
&& is_rebuilding(dev
)) {
8928 struct imsm_map
*migr_map
= get_imsm_map(dev
, MAP_1
);
8930 set_imsm_ord_tbl_ent(migr_map
, n
, ord_to_idx(ord
));
8932 super
->updates_pending
++;
8935 failed
= imsm_count_failed(super
, dev
, MAP_0
);
8936 map_state
= imsm_check_degraded(super
, dev
, failed
, MAP_0
);
8938 /* check if recovery complete, newly degraded, or failed */
8939 dprintf("imsm: Detected transition to state ");
8940 switch (map_state
) {
8941 case IMSM_T_STATE_NORMAL
: /* transition to normal state */
8942 dprintf("normal: ");
8943 if (is_rebuilding(dev
)) {
8944 dprintf_cont("while rebuilding");
8945 /* check if recovery is really finished */
8946 for (mdi
= a
->info
.devs
; mdi
; mdi
= mdi
->next
)
8947 if (mdi
->recovery_start
!= MaxSector
) {
8948 recovery_not_finished
= 1;
8951 if (recovery_not_finished
) {
8953 dprintf("Rebuild has not finished yet, state not changed");
8954 if (a
->last_checkpoint
< mdi
->recovery_start
) {
8955 a
->last_checkpoint
= mdi
->recovery_start
;
8956 super
->updates_pending
++;
8960 end_migration(dev
, super
, map_state
);
8961 map
->failed_disk_num
= ~0;
8962 super
->updates_pending
++;
8963 a
->last_checkpoint
= 0;
8966 if (is_gen_migration(dev
)) {
8967 dprintf_cont("while general migration");
8968 if (a
->last_checkpoint
>= a
->info
.component_size
)
8969 end_migration(dev
, super
, map_state
);
8971 map
->map_state
= map_state
;
8972 map
->failed_disk_num
= ~0;
8973 super
->updates_pending
++;
8977 case IMSM_T_STATE_DEGRADED
: /* transition to degraded state */
8978 dprintf_cont("degraded: ");
8979 if (map
->map_state
!= map_state
&& !dev
->vol
.migr_state
) {
8980 dprintf_cont("mark degraded");
8981 map
->map_state
= map_state
;
8982 super
->updates_pending
++;
8983 a
->last_checkpoint
= 0;
8986 if (is_rebuilding(dev
)) {
8987 dprintf_cont("while rebuilding ");
8988 if (state
& DS_FAULTY
) {
8989 dprintf_cont("removing failed drive ");
8990 if (n
== map
->failed_disk_num
) {
8991 dprintf_cont("end migration");
8992 end_migration(dev
, super
, map_state
);
8993 a
->last_checkpoint
= 0;
8995 dprintf_cont("fail detected during rebuild, changing map state");
8996 map
->map_state
= map_state
;
8998 super
->updates_pending
++;
9004 /* check if recovery is really finished */
9005 for (mdi
= a
->info
.devs
; mdi
; mdi
= mdi
->next
)
9006 if (mdi
->recovery_start
!= MaxSector
) {
9007 recovery_not_finished
= 1;
9010 if (recovery_not_finished
) {
9012 dprintf_cont("Rebuild has not finished yet");
9013 if (a
->last_checkpoint
< mdi
->recovery_start
) {
9014 a
->last_checkpoint
=
9015 mdi
->recovery_start
;
9016 super
->updates_pending
++;
9021 dprintf_cont(" Rebuild done, still degraded");
9022 end_migration(dev
, super
, map_state
);
9023 a
->last_checkpoint
= 0;
9024 super
->updates_pending
++;
9026 for (i
= 0; i
< map
->num_members
; i
++) {
9027 int idx
= get_imsm_ord_tbl_ent(dev
, i
, MAP_0
);
9029 if (idx
& IMSM_ORD_REBUILD
)
9030 map
->failed_disk_num
= i
;
9032 super
->updates_pending
++;
9035 if (is_gen_migration(dev
)) {
9036 dprintf_cont("while general migration");
9037 if (a
->last_checkpoint
>= a
->info
.component_size
)
9038 end_migration(dev
, super
, map_state
);
9040 map
->map_state
= map_state
;
9041 manage_second_map(super
, dev
);
9043 super
->updates_pending
++;
9046 if (is_initializing(dev
)) {
9047 dprintf_cont("while initialization.");
9048 map
->map_state
= map_state
;
9049 super
->updates_pending
++;
9053 case IMSM_T_STATE_FAILED
: /* transition to failed state */
9054 dprintf_cont("failed: ");
9055 if (is_gen_migration(dev
)) {
9056 dprintf_cont("while general migration");
9057 map
->map_state
= map_state
;
9058 super
->updates_pending
++;
9061 if (map
->map_state
!= map_state
) {
9062 dprintf_cont("mark failed");
9063 end_migration(dev
, super
, map_state
);
9064 super
->updates_pending
++;
9065 a
->last_checkpoint
= 0;
9070 dprintf_cont("state %i\n", map_state
);
9075 static int store_imsm_mpb(int fd
, struct imsm_super
*mpb
)
9078 __u32 mpb_size
= __le32_to_cpu(mpb
->mpb_size
);
9079 unsigned long long dsize
;
9080 unsigned long long sectors
;
9081 unsigned int sector_size
;
9083 if (!get_dev_sector_size(fd
, NULL
, §or_size
))
9085 get_dev_size(fd
, NULL
, &dsize
);
9087 if (mpb_size
> sector_size
) {
9088 /* -1 to account for anchor */
9089 sectors
= mpb_sectors(mpb
, sector_size
) - 1;
9091 /* write the extended mpb to the sectors preceeding the anchor */
9092 if (lseek64(fd
, dsize
- (sector_size
* (2 + sectors
)),
9096 if ((unsigned long long)write(fd
, buf
+ sector_size
,
9097 sector_size
* sectors
) != sector_size
* sectors
)
9101 /* first block is stored on second to last sector of the disk */
9102 if (lseek64(fd
, dsize
- (sector_size
* 2), SEEK_SET
) < 0)
9105 if ((unsigned int)write(fd
, buf
, sector_size
) != sector_size
)
9111 static void imsm_sync_metadata(struct supertype
*container
)
9113 struct intel_super
*super
= container
->sb
;
9115 dprintf("sync metadata: %d\n", super
->updates_pending
);
9116 if (!super
->updates_pending
)
9119 write_super_imsm(container
, 0);
9121 super
->updates_pending
= 0;
9124 static struct dl
*imsm_readd(struct intel_super
*super
, int idx
, struct active_array
*a
)
9126 struct imsm_dev
*dev
= get_imsm_dev(super
, a
->info
.container_member
);
9127 int i
= get_imsm_disk_idx(dev
, idx
, MAP_X
);
9130 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
9134 if (dl
&& is_failed(&dl
->disk
))
9138 dprintf("found %x:%x\n", dl
->major
, dl
->minor
);
9143 static struct dl
*imsm_add_spare(struct intel_super
*super
, int slot
,
9144 struct active_array
*a
, int activate_new
,
9145 struct mdinfo
*additional_test_list
)
9147 struct imsm_dev
*dev
= get_imsm_dev(super
, a
->info
.container_member
);
9148 int idx
= get_imsm_disk_idx(dev
, slot
, MAP_X
);
9149 struct imsm_super
*mpb
= super
->anchor
;
9150 struct imsm_map
*map
;
9151 unsigned long long pos
;
9156 __u32 array_start
= 0;
9157 __u32 array_end
= 0;
9159 struct mdinfo
*test_list
;
9161 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
9162 /* If in this array, skip */
9163 for (d
= a
->info
.devs
; d
; d
= d
->next
)
9164 if (is_fd_valid(d
->state_fd
) &&
9165 d
->disk
.major
== dl
->major
&&
9166 d
->disk
.minor
== dl
->minor
) {
9167 dprintf("%x:%x already in array\n",
9168 dl
->major
, dl
->minor
);
9173 test_list
= additional_test_list
;
9175 if (test_list
->disk
.major
== dl
->major
&&
9176 test_list
->disk
.minor
== dl
->minor
) {
9177 dprintf("%x:%x already in additional test list\n",
9178 dl
->major
, dl
->minor
);
9181 test_list
= test_list
->next
;
9186 /* skip in use or failed drives */
9187 if (is_failed(&dl
->disk
) || idx
== dl
->index
||
9189 dprintf("%x:%x status (failed: %d index: %d)\n",
9190 dl
->major
, dl
->minor
, is_failed(&dl
->disk
), idx
);
9194 /* skip pure spares when we are looking for partially
9195 * assimilated drives
9197 if (dl
->index
== -1 && !activate_new
)
9200 if (!drive_validate_sector_size(super
, dl
))
9203 /* Does this unused device have the requisite free space?
9204 * It needs to be able to cover all member volumes
9206 ex
= get_extents(super
, dl
, 1);
9208 dprintf("cannot get extents\n");
9211 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
9212 dev
= get_imsm_dev(super
, i
);
9213 map
= get_imsm_map(dev
, MAP_0
);
9215 /* check if this disk is already a member of
9218 if (get_imsm_disk_slot(map
, dl
->index
) >= 0)
9224 array_start
= pba_of_lba0(map
);
9225 array_end
= array_start
+
9226 per_dev_array_size(map
) - 1;
9229 /* check that we can start at pba_of_lba0 with
9230 * num_data_stripes*blocks_per_stripe of space
9232 if (array_start
>= pos
&& array_end
< ex
[j
].start
) {
9236 pos
= ex
[j
].start
+ ex
[j
].size
;
9238 } while (ex
[j
-1].size
);
9245 if (i
< mpb
->num_raid_devs
) {
9246 dprintf("%x:%x does not have %u to %u available\n",
9247 dl
->major
, dl
->minor
, array_start
, array_end
);
9257 static int imsm_rebuild_allowed(struct supertype
*cont
, int dev_idx
, int failed
)
9259 struct imsm_dev
*dev2
;
9260 struct imsm_map
*map
;
9266 dev2
= get_imsm_dev(cont
->sb
, dev_idx
);
9268 state
= imsm_check_degraded(cont
->sb
, dev2
, failed
, MAP_0
);
9269 if (state
== IMSM_T_STATE_FAILED
) {
9270 map
= get_imsm_map(dev2
, MAP_0
);
9271 for (slot
= 0; slot
< map
->num_members
; slot
++) {
9273 * Check if failed disks are deleted from intel
9274 * disk list or are marked to be deleted
9276 idx
= get_imsm_disk_idx(dev2
, slot
, MAP_X
);
9277 idisk
= get_imsm_dl_disk(cont
->sb
, idx
);
9279 * Do not rebuild the array if failed disks
9280 * from failed sub-array are not removed from
9284 is_failed(&idisk
->disk
) &&
9285 (idisk
->action
!= DISK_REMOVE
))
9292 static struct mdinfo
*imsm_activate_spare(struct active_array
*a
,
9293 struct metadata_update
**updates
)
9296 * Find a device with unused free space and use it to replace a
9297 * failed/vacant region in an array. We replace failed regions one a
9298 * array at a time. The result is that a new spare disk will be added
9299 * to the first failed array and after the monitor has finished
9300 * propagating failures the remainder will be consumed.
9302 * FIXME add a capability for mdmon to request spares from another
9306 struct intel_super
*super
= a
->container
->sb
;
9307 int inst
= a
->info
.container_member
;
9308 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
9309 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
9310 int failed
= a
->info
.array
.raid_disks
;
9311 struct mdinfo
*rv
= NULL
;
9314 struct metadata_update
*mu
;
9316 struct imsm_update_activate_spare
*u
;
9321 for (d
= a
->info
.devs
; d
; d
= d
->next
) {
9322 if (!is_fd_valid(d
->state_fd
))
9325 if (d
->curr_state
& DS_FAULTY
)
9326 /* wait for Removal to happen */
9332 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
9333 inst
, failed
, a
->info
.array
.raid_disks
, a
->info
.array
.level
);
9335 if (imsm_reshape_blocks_arrays_changes(super
))
9338 /* Cannot activate another spare if rebuild is in progress already
9340 if (is_rebuilding(dev
)) {
9341 dprintf("imsm: No spare activation allowed. Rebuild in progress already.\n");
9345 if (a
->info
.array
.level
== 4)
9346 /* No repair for takeovered array
9347 * imsm doesn't support raid4
9351 if (imsm_check_degraded(super
, dev
, failed
, MAP_0
) !=
9352 IMSM_T_STATE_DEGRADED
)
9355 if (get_imsm_map(dev
, MAP_0
)->map_state
== IMSM_T_STATE_UNINITIALIZED
) {
9356 dprintf("imsm: No spare activation allowed. Volume is not initialized.\n");
9361 * If there are any failed disks check state of the other volume.
9362 * Block rebuild if the another one is failed until failed disks
9363 * are removed from container.
9366 dprintf("found failed disks in %.*s, check if there anotherfailed sub-array.\n",
9367 MAX_RAID_SERIAL_LEN
, dev
->volume
);
9368 /* check if states of the other volumes allow for rebuild */
9369 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
9371 allowed
= imsm_rebuild_allowed(a
->container
,
9379 /* For each slot, if it is not working, find a spare */
9380 for (i
= 0; i
< a
->info
.array
.raid_disks
; i
++) {
9381 for (d
= a
->info
.devs
; d
; d
= d
->next
)
9382 if (d
->disk
.raid_disk
== i
)
9384 dprintf("found %d: %p %x\n", i
, d
, d
?d
->curr_state
:0);
9385 if (d
&& is_fd_valid(d
->state_fd
))
9389 * OK, this device needs recovery. Try to re-add the
9390 * previous occupant of this slot, if this fails see if
9391 * we can continue the assimilation of a spare that was
9392 * partially assimilated, finally try to activate a new
9395 dl
= imsm_readd(super
, i
, a
);
9397 dl
= imsm_add_spare(super
, i
, a
, 0, rv
);
9399 dl
= imsm_add_spare(super
, i
, a
, 1, rv
);
9403 /* found a usable disk with enough space */
9404 di
= xcalloc(1, sizeof(*di
));
9406 /* dl->index will be -1 in the case we are activating a
9407 * pristine spare. imsm_process_update() will create a
9408 * new index in this case. Once a disk is found to be
9409 * failed in all member arrays it is kicked from the
9412 di
->disk
.number
= dl
->index
;
9414 /* (ab)use di->devs to store a pointer to the device
9417 di
->devs
= (struct mdinfo
*) dl
;
9419 di
->disk
.raid_disk
= i
;
9420 di
->disk
.major
= dl
->major
;
9421 di
->disk
.minor
= dl
->minor
;
9423 di
->recovery_start
= 0;
9424 di
->data_offset
= pba_of_lba0(map
);
9425 di
->component_size
= a
->info
.component_size
;
9426 di
->container_member
= inst
;
9427 di
->bb
.supported
= 1;
9428 if (a
->info
.consistency_policy
== CONSISTENCY_POLICY_PPL
) {
9429 di
->ppl_sector
= get_ppl_sector(super
, inst
);
9430 di
->ppl_size
= MULTIPLE_PPL_AREA_SIZE_IMSM
>> 9;
9432 super
->random
= random32();
9436 dprintf("%x:%x to be %d at %llu\n", dl
->major
, dl
->minor
,
9437 i
, di
->data_offset
);
9441 /* No spares found */
9443 /* Now 'rv' has a list of devices to return.
9444 * Create a metadata_update record to update the
9445 * disk_ord_tbl for the array
9447 mu
= xmalloc(sizeof(*mu
));
9448 mu
->buf
= xcalloc(num_spares
,
9449 sizeof(struct imsm_update_activate_spare
));
9451 mu
->space_list
= NULL
;
9452 mu
->len
= sizeof(struct imsm_update_activate_spare
) * num_spares
;
9453 mu
->next
= *updates
;
9454 u
= (struct imsm_update_activate_spare
*) mu
->buf
;
9456 for (di
= rv
; di
; di
= di
->next
) {
9457 u
->type
= update_activate_spare
;
9458 u
->dl
= (struct dl
*) di
->devs
;
9460 u
->slot
= di
->disk
.raid_disk
;
9471 static int disks_overlap(struct intel_super
*super
, int idx
, struct imsm_update_create_array
*u
)
9473 struct imsm_dev
*dev
= get_imsm_dev(super
, idx
);
9474 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
9475 struct imsm_map
*new_map
= get_imsm_map(&u
->dev
, MAP_0
);
9476 struct disk_info
*inf
= get_disk_info(u
);
9477 struct imsm_disk
*disk
;
9481 for (i
= 0; i
< map
->num_members
; i
++) {
9482 disk
= get_imsm_disk(super
, get_imsm_disk_idx(dev
, i
, MAP_X
));
9483 for (j
= 0; j
< new_map
->num_members
; j
++)
9484 if (serialcmp(disk
->serial
, inf
[j
].serial
) == 0)
9491 static struct dl
*get_disk_super(struct intel_super
*super
, int major
, int minor
)
9495 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
9496 if (dl
->major
== major
&& dl
->minor
== minor
)
9501 static int remove_disk_super(struct intel_super
*super
, int major
, int minor
)
9507 for (dl
= super
->disks
; dl
; dl
= dl
->next
) {
9508 if (dl
->major
== major
&& dl
->minor
== minor
) {
9511 prev
->next
= dl
->next
;
9513 super
->disks
= dl
->next
;
9515 __free_imsm_disk(dl
, 1);
9516 dprintf("removed %x:%x\n", major
, minor
);
9524 static void imsm_delete(struct intel_super
*super
, struct dl
**dlp
, unsigned index
);
9526 static int add_remove_disk_update(struct intel_super
*super
)
9528 int check_degraded
= 0;
9531 /* add/remove some spares to/from the metadata/contrainer */
9532 while (super
->disk_mgmt_list
) {
9533 struct dl
*disk_cfg
;
9535 disk_cfg
= super
->disk_mgmt_list
;
9536 super
->disk_mgmt_list
= disk_cfg
->next
;
9537 disk_cfg
->next
= NULL
;
9539 if (disk_cfg
->action
== DISK_ADD
) {
9540 disk_cfg
->next
= super
->disks
;
9541 super
->disks
= disk_cfg
;
9543 dprintf("added %x:%x\n",
9544 disk_cfg
->major
, disk_cfg
->minor
);
9545 } else if (disk_cfg
->action
== DISK_REMOVE
) {
9546 dprintf("Disk remove action processed: %x.%x\n",
9547 disk_cfg
->major
, disk_cfg
->minor
);
9548 disk
= get_disk_super(super
,
9552 /* store action status */
9553 disk
->action
= DISK_REMOVE
;
9554 /* remove spare disks only */
9555 if (disk
->index
== -1) {
9556 remove_disk_super(super
,
9560 disk_cfg
->fd
= disk
->fd
;
9564 /* release allocate disk structure */
9565 __free_imsm_disk(disk_cfg
, 1);
9568 return check_degraded
;
9571 static int apply_reshape_migration_update(struct imsm_update_reshape_migration
*u
,
9572 struct intel_super
*super
,
9575 struct intel_dev
*id
;
9576 void **tofree
= NULL
;
9579 dprintf("(enter)\n");
9580 if (u
->subdev
< 0 || u
->subdev
> 1) {
9581 dprintf("imsm: Error: Wrong subdev: %i\n", u
->subdev
);
9584 if (space_list
== NULL
|| *space_list
== NULL
) {
9585 dprintf("imsm: Error: Memory is not allocated\n");
9589 for (id
= super
->devlist
; id
; id
= id
->next
) {
9590 if (id
->index
== (unsigned)u
->subdev
) {
9591 struct imsm_dev
*dev
= get_imsm_dev(super
, u
->subdev
);
9592 struct imsm_map
*map
;
9593 struct imsm_dev
*new_dev
=
9594 (struct imsm_dev
*)*space_list
;
9595 struct imsm_map
*migr_map
= get_imsm_map(dev
, MAP_1
);
9597 struct dl
*new_disk
;
9599 if (new_dev
== NULL
)
9601 *space_list
= **space_list
;
9602 memcpy(new_dev
, dev
, sizeof_imsm_dev(dev
, 0));
9603 map
= get_imsm_map(new_dev
, MAP_0
);
9605 dprintf("imsm: Error: migration in progress");
9609 to_state
= map
->map_state
;
9610 if ((u
->new_level
== IMSM_T_RAID5
) && (map
->raid_level
== IMSM_T_RAID0
)) {
9612 /* this should not happen */
9613 if (u
->new_disks
[0] < 0) {
9614 map
->failed_disk_num
=
9615 map
->num_members
- 1;
9616 to_state
= IMSM_T_STATE_DEGRADED
;
9618 to_state
= IMSM_T_STATE_NORMAL
;
9620 migrate(new_dev
, super
, to_state
, MIGR_GEN_MIGR
);
9622 if (u
->new_level
> -1)
9623 update_imsm_raid_level(map
, u
->new_level
);
9625 migr_map
= get_imsm_map(new_dev
, MAP_1
);
9626 if ((u
->new_level
== IMSM_T_RAID5
) &&
9627 (migr_map
->raid_level
== IMSM_T_RAID0
)) {
9628 int ord
= map
->num_members
- 1;
9629 migr_map
->num_members
--;
9630 if (u
->new_disks
[0] < 0)
9631 ord
|= IMSM_ORD_REBUILD
;
9632 set_imsm_ord_tbl_ent(map
,
9633 map
->num_members
- 1,
9637 tofree
= (void **)dev
;
9639 /* update chunk size
9641 if (u
->new_chunksize
> 0) {
9642 struct imsm_map
*dest_map
=
9643 get_imsm_map(dev
, MAP_0
);
9645 imsm_num_data_members(dest_map
);
9647 if (used_disks
== 0)
9650 map
->blocks_per_strip
=
9651 __cpu_to_le16(u
->new_chunksize
* 2);
9652 update_num_data_stripes(map
, imsm_dev_size(dev
));
9655 /* ensure blocks_per_member has valid value
9657 set_blocks_per_member(map
,
9658 per_dev_array_size(map
) +
9659 NUM_BLOCKS_DIRTY_STRIPE_REGION
);
9663 if (u
->new_level
!= IMSM_T_RAID5
|| migr_map
->raid_level
!= IMSM_T_RAID0
||
9664 migr_map
->raid_level
== map
->raid_level
)
9667 if (u
->new_disks
[0] >= 0) {
9670 new_disk
= get_disk_super(super
,
9671 major(u
->new_disks
[0]),
9672 minor(u
->new_disks
[0]));
9673 dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
9674 major(u
->new_disks
[0]),
9675 minor(u
->new_disks
[0]),
9676 new_disk
, new_disk
->index
);
9677 if (new_disk
== NULL
)
9678 goto error_disk_add
;
9680 new_disk
->index
= map
->num_members
- 1;
9681 /* slot to fill in autolayout
9683 new_disk
->raiddisk
= new_disk
->index
;
9684 new_disk
->disk
.status
|= CONFIGURED_DISK
;
9685 new_disk
->disk
.status
&= ~SPARE_DISK
;
9687 goto error_disk_add
;
9690 *tofree
= *space_list
;
9691 /* calculate new size
9693 imsm_set_array_size(new_dev
, -1);
9700 *space_list
= tofree
;
9704 dprintf("Error: imsm: Cannot find disk.\n");
9708 static int apply_size_change_update(struct imsm_update_size_change
*u
,
9709 struct intel_super
*super
)
9711 struct intel_dev
*id
;
9714 dprintf("(enter)\n");
9715 if (u
->subdev
< 0 || u
->subdev
> 1) {
9716 dprintf("imsm: Error: Wrong subdev: %i\n", u
->subdev
);
9720 for (id
= super
->devlist
; id
; id
= id
->next
) {
9721 if (id
->index
== (unsigned)u
->subdev
) {
9722 struct imsm_dev
*dev
= get_imsm_dev(super
, u
->subdev
);
9723 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
9724 int used_disks
= imsm_num_data_members(map
);
9725 unsigned long long blocks_per_member
;
9726 unsigned long long new_size_per_disk
;
9728 if (used_disks
== 0)
9731 /* calculate new size
9733 new_size_per_disk
= u
->new_size
/ used_disks
;
9734 blocks_per_member
= new_size_per_disk
+
9735 NUM_BLOCKS_DIRTY_STRIPE_REGION
;
9737 imsm_set_array_size(dev
, u
->new_size
);
9738 set_blocks_per_member(map
, blocks_per_member
);
9739 update_num_data_stripes(map
, u
->new_size
);
9748 static int prepare_spare_to_activate(struct supertype
*st
,
9749 struct imsm_update_activate_spare
*u
)
9751 struct intel_super
*super
= st
->sb
;
9752 int prev_current_vol
= super
->current_vol
;
9753 struct active_array
*a
;
9756 for (a
= st
->arrays
; a
; a
= a
->next
)
9758 * Additional initialization (adding bitmap header, filling
9759 * the bitmap area with '1's to force initial rebuild for a whole
9760 * data-area) is required when adding the spare to the volume
9761 * with write-intent bitmap.
9763 if (a
->info
.container_member
== u
->array
&&
9764 a
->info
.consistency_policy
== CONSISTENCY_POLICY_BITMAP
) {
9767 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
9773 super
->current_vol
= u
->array
;
9774 if (st
->ss
->write_bitmap(st
, dl
->fd
, NoUpdate
))
9776 super
->current_vol
= prev_current_vol
;
9781 static int apply_update_activate_spare(struct imsm_update_activate_spare
*u
,
9782 struct intel_super
*super
,
9783 struct active_array
*active_array
)
9785 struct imsm_super
*mpb
= super
->anchor
;
9786 struct imsm_dev
*dev
= get_imsm_dev(super
, u
->array
);
9787 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
9788 struct imsm_map
*migr_map
;
9789 struct active_array
*a
;
9790 struct imsm_disk
*disk
;
9797 int second_map_created
= 0;
9799 for (; u
; u
= u
->next
) {
9800 victim
= get_imsm_disk_idx(dev
, u
->slot
, MAP_X
);
9805 for (dl
= super
->disks
; dl
; dl
= dl
->next
)
9810 pr_err("error: imsm_activate_spare passed an unknown disk (index: %d)\n",
9815 /* count failures (excluding rebuilds and the victim)
9816 * to determine map[0] state
9819 for (i
= 0; i
< map
->num_members
; i
++) {
9822 disk
= get_imsm_disk(super
,
9823 get_imsm_disk_idx(dev
, i
, MAP_X
));
9824 if (!disk
|| is_failed(disk
))
9828 /* adding a pristine spare, assign a new index */
9829 if (dl
->index
< 0) {
9830 dl
->index
= super
->anchor
->num_disks
;
9831 super
->anchor
->num_disks
++;
9834 disk
->status
|= CONFIGURED_DISK
;
9835 disk
->status
&= ~SPARE_DISK
;
9838 to_state
= imsm_check_degraded(super
, dev
, failed
, MAP_0
);
9839 if (!second_map_created
) {
9840 second_map_created
= 1;
9841 map
->map_state
= IMSM_T_STATE_DEGRADED
;
9842 migrate(dev
, super
, to_state
, MIGR_REBUILD
);
9844 map
->map_state
= to_state
;
9845 migr_map
= get_imsm_map(dev
, MAP_1
);
9846 set_imsm_ord_tbl_ent(map
, u
->slot
, dl
->index
);
9847 set_imsm_ord_tbl_ent(migr_map
, u
->slot
,
9848 dl
->index
| IMSM_ORD_REBUILD
);
9850 /* update the family_num to mark a new container
9851 * generation, being careful to record the existing
9852 * family_num in orig_family_num to clean up after
9853 * earlier mdadm versions that neglected to set it.
9855 if (mpb
->orig_family_num
== 0)
9856 mpb
->orig_family_num
= mpb
->family_num
;
9857 mpb
->family_num
+= super
->random
;
9859 /* count arrays using the victim in the metadata */
9861 for (a
= active_array
; a
; a
= a
->next
) {
9862 int dev_idx
= a
->info
.container_member
;
9864 if (get_disk_slot_in_dev(super
, dev_idx
, victim
) >= 0)
9868 /* delete the victim if it is no longer being
9874 /* We know that 'manager' isn't touching anything,
9875 * so it is safe to delete
9877 for (dlp
= &super
->disks
; *dlp
; dlp
= &(*dlp
)->next
)
9878 if ((*dlp
)->index
== victim
)
9881 /* victim may be on the missing list */
9883 for (dlp
= &super
->missing
; *dlp
;
9884 dlp
= &(*dlp
)->next
)
9885 if ((*dlp
)->index
== victim
)
9887 imsm_delete(super
, dlp
, victim
);
9894 static int apply_reshape_container_disks_update(struct imsm_update_reshape
*u
,
9895 struct intel_super
*super
,
9898 struct dl
*new_disk
;
9899 struct intel_dev
*id
;
9901 int delta_disks
= u
->new_raid_disks
- u
->old_raid_disks
;
9902 int disk_count
= u
->old_raid_disks
;
9903 void **tofree
= NULL
;
9904 int devices_to_reshape
= 1;
9905 struct imsm_super
*mpb
= super
->anchor
;
9907 unsigned int dev_id
;
9909 dprintf("(enter)\n");
9911 /* enable spares to use in array */
9912 for (i
= 0; i
< delta_disks
; i
++) {
9913 new_disk
= get_disk_super(super
,
9914 major(u
->new_disks
[i
]),
9915 minor(u
->new_disks
[i
]));
9916 dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
9917 major(u
->new_disks
[i
]), minor(u
->new_disks
[i
]),
9918 new_disk
, new_disk
->index
);
9919 if (new_disk
== NULL
||
9920 (new_disk
->index
>= 0 &&
9921 new_disk
->index
< u
->old_raid_disks
))
9922 goto update_reshape_exit
;
9923 new_disk
->index
= disk_count
++;
9924 /* slot to fill in autolayout
9926 new_disk
->raiddisk
= new_disk
->index
;
9927 new_disk
->disk
.status
|=
9929 new_disk
->disk
.status
&= ~SPARE_DISK
;
9932 dprintf("imsm: volume set mpb->num_raid_devs = %i\n",
9933 mpb
->num_raid_devs
);
9934 /* manage changes in volume
9936 for (dev_id
= 0; dev_id
< mpb
->num_raid_devs
; dev_id
++) {
9937 void **sp
= *space_list
;
9938 struct imsm_dev
*newdev
;
9939 struct imsm_map
*newmap
, *oldmap
;
9941 for (id
= super
->devlist
; id
; id
= id
->next
) {
9942 if (id
->index
== dev_id
)
9951 /* Copy the dev, but not (all of) the map */
9952 memcpy(newdev
, id
->dev
, sizeof(*newdev
));
9953 oldmap
= get_imsm_map(id
->dev
, MAP_0
);
9954 newmap
= get_imsm_map(newdev
, MAP_0
);
9955 /* Copy the current map */
9956 memcpy(newmap
, oldmap
, sizeof_imsm_map(oldmap
));
9957 /* update one device only
9959 if (devices_to_reshape
) {
9960 dprintf("imsm: modifying subdev: %i\n",
9962 devices_to_reshape
--;
9963 newdev
->vol
.migr_state
= MIGR_STATE_MIGRATING
;
9964 set_vol_curr_migr_unit(newdev
, 0);
9965 set_migr_type(newdev
, MIGR_GEN_MIGR
);
9966 newmap
->num_members
= u
->new_raid_disks
;
9967 for (i
= 0; i
< delta_disks
; i
++) {
9968 set_imsm_ord_tbl_ent(newmap
,
9969 u
->old_raid_disks
+ i
,
9970 u
->old_raid_disks
+ i
);
9972 /* New map is correct, now need to save old map
9974 newmap
= get_imsm_map(newdev
, MAP_1
);
9975 memcpy(newmap
, oldmap
, sizeof_imsm_map(oldmap
));
9977 imsm_set_array_size(newdev
, -1);
9980 sp
= (void **)id
->dev
;
9985 /* Clear migration record */
9986 memset(super
->migr_rec
, 0, sizeof(struct migr_record
));
9989 *space_list
= tofree
;
9992 update_reshape_exit
:
9997 static int apply_takeover_update(struct imsm_update_takeover
*u
,
9998 struct intel_super
*super
,
10001 struct imsm_dev
*dev
= NULL
;
10002 struct intel_dev
*dv
;
10003 struct imsm_dev
*dev_new
;
10004 struct imsm_map
*map
;
10005 struct dl
*dm
, *du
;
10008 for (dv
= super
->devlist
; dv
; dv
= dv
->next
)
10009 if (dv
->index
== (unsigned int)u
->subarray
) {
10017 map
= get_imsm_map(dev
, MAP_0
);
10019 if (u
->direction
== R10_TO_R0
) {
10020 /* Number of failed disks must be half of initial disk number */
10021 if (imsm_count_failed(super
, dev
, MAP_0
) !=
10022 (map
->num_members
/ 2))
10025 /* iterate through devices to mark removed disks as spare */
10026 for (dm
= super
->disks
; dm
; dm
= dm
->next
) {
10027 if (dm
->disk
.status
& FAILED_DISK
) {
10028 int idx
= dm
->index
;
10029 /* update indexes on the disk list */
10030 /* FIXME this loop-with-the-loop looks wrong, I'm not convinced
10031 the index values will end up being correct.... NB */
10032 for (du
= super
->disks
; du
; du
= du
->next
)
10033 if (du
->index
> idx
)
10035 /* mark as spare disk */
10040 map
->num_members
/= map
->num_domains
;
10041 map
->map_state
= IMSM_T_STATE_NORMAL
;
10042 update_imsm_raid_level(map
, IMSM_T_RAID0
);
10043 set_num_domains(map
);
10044 update_num_data_stripes(map
, imsm_dev_size(dev
));
10045 map
->failed_disk_num
= -1;
10048 if (u
->direction
== R0_TO_R10
) {
10051 /* update slots in current disk list */
10052 for (dm
= super
->disks
; dm
; dm
= dm
->next
) {
10053 if (dm
->index
>= 0)
10056 /* create new *missing* disks */
10057 for (i
= 0; i
< map
->num_members
; i
++) {
10058 space
= *space_list
;
10061 *space_list
= *space
;
10062 du
= (void *)space
;
10063 memcpy(du
, super
->disks
, sizeof(*du
));
10067 du
->index
= (i
* 2) + 1;
10068 sprintf((char *)du
->disk
.serial
,
10069 " MISSING_%d", du
->index
);
10070 sprintf((char *)du
->serial
,
10071 "MISSING_%d", du
->index
);
10072 du
->next
= super
->missing
;
10073 super
->missing
= du
;
10075 /* create new dev and map */
10076 space
= *space_list
;
10079 *space_list
= *space
;
10080 dev_new
= (void *)space
;
10081 memcpy(dev_new
, dev
, sizeof(*dev
));
10082 /* update new map */
10083 map
= get_imsm_map(dev_new
, MAP_0
);
10085 map
->map_state
= IMSM_T_STATE_DEGRADED
;
10086 update_imsm_raid_level(map
, IMSM_T_RAID10
);
10087 set_num_domains(map
);
10088 map
->num_members
= map
->num_members
* map
->num_domains
;
10089 update_num_data_stripes(map
, imsm_dev_size(dev
));
10091 /* replace dev<->dev_new */
10094 /* update disk order table */
10095 for (du
= super
->disks
; du
; du
= du
->next
)
10096 if (du
->index
>= 0)
10097 set_imsm_ord_tbl_ent(map
, du
->index
, du
->index
);
10098 for (du
= super
->missing
; du
; du
= du
->next
)
10099 if (du
->index
>= 0) {
10100 set_imsm_ord_tbl_ent(map
, du
->index
, du
->index
);
10101 mark_missing(super
, dv
->dev
, &du
->disk
, du
->index
);
10107 static void imsm_process_update(struct supertype
*st
,
10108 struct metadata_update
*update
)
10111 * crack open the metadata_update envelope to find the update record
10112 * update can be one of:
10113 * update_reshape_container_disks - all the arrays in the container
10114 * are being reshaped to have more devices. We need to mark
10115 * the arrays for general migration and convert selected spares
10116 * into active devices.
10117 * update_activate_spare - a spare device has replaced a failed
10118 * device in an array, update the disk_ord_tbl. If this disk is
10119 * present in all member arrays then also clear the SPARE_DISK
10121 * update_create_array
10122 * update_kill_array
10123 * update_rename_array
10124 * update_add_remove_disk
10126 struct intel_super
*super
= st
->sb
;
10127 struct imsm_super
*mpb
;
10128 enum imsm_update_type type
= *(enum imsm_update_type
*) update
->buf
;
10130 /* update requires a larger buf but the allocation failed */
10131 if (super
->next_len
&& !super
->next_buf
) {
10132 super
->next_len
= 0;
10136 if (super
->next_buf
) {
10137 memcpy(super
->next_buf
, super
->buf
, super
->len
);
10139 super
->len
= super
->next_len
;
10140 super
->buf
= super
->next_buf
;
10142 super
->next_len
= 0;
10143 super
->next_buf
= NULL
;
10146 mpb
= super
->anchor
;
10149 case update_general_migration_checkpoint
: {
10150 struct intel_dev
*id
;
10151 struct imsm_update_general_migration_checkpoint
*u
=
10152 (void *)update
->buf
;
10154 dprintf("called for update_general_migration_checkpoint\n");
10156 /* find device under general migration */
10157 for (id
= super
->devlist
; id
; id
= id
->next
) {
10158 if (is_gen_migration(id
->dev
)) {
10159 set_vol_curr_migr_unit(id
->dev
,
10160 u
->curr_migr_unit
);
10161 super
->updates_pending
++;
10166 case update_takeover
: {
10167 struct imsm_update_takeover
*u
= (void *)update
->buf
;
10168 if (apply_takeover_update(u
, super
, &update
->space_list
)) {
10169 imsm_update_version_info(super
);
10170 super
->updates_pending
++;
10175 case update_reshape_container_disks
: {
10176 struct imsm_update_reshape
*u
= (void *)update
->buf
;
10177 if (apply_reshape_container_disks_update(
10178 u
, super
, &update
->space_list
))
10179 super
->updates_pending
++;
10182 case update_reshape_migration
: {
10183 struct imsm_update_reshape_migration
*u
= (void *)update
->buf
;
10184 if (apply_reshape_migration_update(
10185 u
, super
, &update
->space_list
))
10186 super
->updates_pending
++;
10189 case update_size_change
: {
10190 struct imsm_update_size_change
*u
= (void *)update
->buf
;
10191 if (apply_size_change_update(u
, super
))
10192 super
->updates_pending
++;
10195 case update_activate_spare
: {
10196 struct imsm_update_activate_spare
*u
= (void *) update
->buf
;
10198 if (prepare_spare_to_activate(st
, u
) &&
10199 apply_update_activate_spare(u
, super
, st
->arrays
))
10200 super
->updates_pending
++;
10203 case update_create_array
: {
10204 /* someone wants to create a new array, we need to be aware of
10205 * a few races/collisions:
10206 * 1/ 'Create' called by two separate instances of mdadm
10207 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
10208 * devices that have since been assimilated via
10210 * In the event this update can not be carried out mdadm will
10211 * (FIX ME) notice that its update did not take hold.
10213 struct imsm_update_create_array
*u
= (void *) update
->buf
;
10214 struct intel_dev
*dv
;
10215 struct imsm_dev
*dev
;
10216 struct imsm_map
*map
, *new_map
;
10217 unsigned long long start
, end
;
10218 unsigned long long new_start
, new_end
;
10220 struct disk_info
*inf
;
10223 /* handle racing creates: first come first serve */
10224 if (u
->dev_idx
< mpb
->num_raid_devs
) {
10225 dprintf("subarray %d already defined\n", u
->dev_idx
);
10229 /* check update is next in sequence */
10230 if (u
->dev_idx
!= mpb
->num_raid_devs
) {
10231 dprintf("can not create array %d expected index %d\n",
10232 u
->dev_idx
, mpb
->num_raid_devs
);
10236 new_map
= get_imsm_map(&u
->dev
, MAP_0
);
10237 new_start
= pba_of_lba0(new_map
);
10238 new_end
= new_start
+ per_dev_array_size(new_map
);
10239 inf
= get_disk_info(u
);
10241 /* handle activate_spare versus create race:
10242 * check to make sure that overlapping arrays do not include
10243 * overalpping disks
10245 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
10246 dev
= get_imsm_dev(super
, i
);
10247 map
= get_imsm_map(dev
, MAP_0
);
10248 start
= pba_of_lba0(map
);
10249 end
= start
+ per_dev_array_size(map
);
10250 if ((new_start
>= start
&& new_start
<= end
) ||
10251 (start
>= new_start
&& start
<= new_end
))
10256 if (disks_overlap(super
, i
, u
)) {
10257 dprintf("arrays overlap\n");
10262 /* check that prepare update was successful */
10263 if (!update
->space
) {
10264 dprintf("prepare update failed\n");
10268 /* check that all disks are still active before committing
10269 * changes. FIXME: could we instead handle this by creating a
10270 * degraded array? That's probably not what the user expects,
10271 * so better to drop this update on the floor.
10273 for (i
= 0; i
< new_map
->num_members
; i
++) {
10274 dl
= serial_to_dl(inf
[i
].serial
, super
);
10276 dprintf("disk disappeared\n");
10281 super
->updates_pending
++;
10283 /* convert spares to members and fixup ord_tbl */
10284 for (i
= 0; i
< new_map
->num_members
; i
++) {
10285 dl
= serial_to_dl(inf
[i
].serial
, super
);
10286 if (dl
->index
== -1) {
10287 dl
->index
= mpb
->num_disks
;
10289 dl
->disk
.status
|= CONFIGURED_DISK
;
10290 dl
->disk
.status
&= ~SPARE_DISK
;
10292 set_imsm_ord_tbl_ent(new_map
, i
, dl
->index
);
10295 dv
= update
->space
;
10297 update
->space
= NULL
;
10298 imsm_copy_dev(dev
, &u
->dev
);
10299 dv
->index
= u
->dev_idx
;
10300 dv
->next
= super
->devlist
;
10301 super
->devlist
= dv
;
10302 mpb
->num_raid_devs
++;
10304 imsm_update_version_info(super
);
10307 /* mdmon knows how to release update->space, but not
10308 * ((struct intel_dev *) update->space)->dev
10310 if (update
->space
) {
10311 dv
= update
->space
;
10316 case update_kill_array
: {
10317 struct imsm_update_kill_array
*u
= (void *) update
->buf
;
10318 int victim
= u
->dev_idx
;
10319 struct active_array
*a
;
10320 struct intel_dev
**dp
;
10322 /* sanity check that we are not affecting the uuid of
10323 * active arrays, or deleting an active array
10325 * FIXME when immutable ids are available, but note that
10326 * we'll also need to fixup the invalidated/active
10327 * subarray indexes in mdstat
10329 for (a
= st
->arrays
; a
; a
= a
->next
)
10330 if (a
->info
.container_member
>= victim
)
10332 /* by definition if mdmon is running at least one array
10333 * is active in the container, so checking
10334 * mpb->num_raid_devs is just extra paranoia
10336 if (a
|| mpb
->num_raid_devs
== 1 || victim
>= super
->anchor
->num_raid_devs
) {
10337 dprintf("failed to delete subarray-%d\n", victim
);
10341 for (dp
= &super
->devlist
; *dp
;)
10342 if ((*dp
)->index
== (unsigned)super
->current_vol
) {
10345 if ((*dp
)->index
> (unsigned)victim
)
10349 mpb
->num_raid_devs
--;
10350 super
->updates_pending
++;
10353 case update_rename_array
: {
10354 struct imsm_update_rename_array
*u
= (void *) update
->buf
;
10355 char name
[MAX_RAID_SERIAL_LEN
+1];
10356 int target
= u
->dev_idx
;
10357 struct active_array
*a
;
10358 struct imsm_dev
*dev
;
10360 /* sanity check that we are not affecting the uuid of
10363 memset(name
, 0, sizeof(name
));
10364 snprintf(name
, MAX_RAID_SERIAL_LEN
, "%s", (char *) u
->name
);
10365 name
[MAX_RAID_SERIAL_LEN
] = '\0';
10366 for (a
= st
->arrays
; a
; a
= a
->next
)
10367 if (a
->info
.container_member
== target
)
10369 dev
= get_imsm_dev(super
, u
->dev_idx
);
10371 if (a
|| !dev
|| imsm_is_name_allowed(super
, name
, 0) == false) {
10372 dprintf("failed to rename subarray-%d\n", target
);
10376 memcpy(dev
->volume
, name
, MAX_RAID_SERIAL_LEN
);
10377 super
->updates_pending
++;
10380 case update_add_remove_disk
: {
10381 /* we may be able to repair some arrays if disks are
10382 * being added, check the status of add_remove_disk
10383 * if discs has been added.
10385 if (add_remove_disk_update(super
)) {
10386 struct active_array
*a
;
10388 super
->updates_pending
++;
10389 for (a
= st
->arrays
; a
; a
= a
->next
)
10390 a
->check_degraded
= 1;
10394 case update_prealloc_badblocks_mem
:
10396 case update_rwh_policy
: {
10397 struct imsm_update_rwh_policy
*u
= (void *)update
->buf
;
10398 int target
= u
->dev_idx
;
10399 struct imsm_dev
*dev
= get_imsm_dev(super
, target
);
10401 if (dev
->rwh_policy
!= u
->new_policy
) {
10402 dev
->rwh_policy
= u
->new_policy
;
10403 super
->updates_pending
++;
10408 pr_err("error: unsupported process update type:(type: %d)\n", type
);
10412 static struct mdinfo
*get_spares_for_grow(struct supertype
*st
);
10414 static int imsm_prepare_update(struct supertype
*st
,
10415 struct metadata_update
*update
)
10418 * Allocate space to hold new disk entries, raid-device entries or a new
10419 * mpb if necessary. The manager synchronously waits for updates to
10420 * complete in the monitor, so new mpb buffers allocated here can be
10421 * integrated by the monitor thread without worrying about live pointers
10422 * in the manager thread.
10424 enum imsm_update_type type
;
10425 struct intel_super
*super
= st
->sb
;
10426 unsigned int sector_size
= super
->sector_size
;
10427 struct imsm_super
*mpb
= super
->anchor
;
10431 if (update
->len
< (int)sizeof(type
))
10434 type
= *(enum imsm_update_type
*) update
->buf
;
10437 case update_general_migration_checkpoint
:
10438 if (update
->len
< (int)sizeof(struct imsm_update_general_migration_checkpoint
))
10440 dprintf("called for update_general_migration_checkpoint\n");
10442 case update_takeover
: {
10443 struct imsm_update_takeover
*u
= (void *)update
->buf
;
10444 if (update
->len
< (int)sizeof(*u
))
10446 if (u
->direction
== R0_TO_R10
) {
10447 void **tail
= (void **)&update
->space_list
;
10448 struct imsm_dev
*dev
= get_imsm_dev(super
, u
->subarray
);
10449 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
10450 int num_members
= map
->num_members
;
10453 /* allocate memory for added disks */
10454 for (i
= 0; i
< num_members
; i
++) {
10455 size
= sizeof(struct dl
);
10456 space
= xmalloc(size
);
10461 /* allocate memory for new device */
10462 size
= sizeof_imsm_dev(super
->devlist
->dev
, 0) +
10463 (num_members
* sizeof(__u32
));
10464 space
= xmalloc(size
);
10468 len
= disks_to_mpb_size(num_members
* 2);
10473 case update_reshape_container_disks
: {
10474 /* Every raid device in the container is about to
10475 * gain some more devices, and we will enter a
10477 * So each 'imsm_map' will be bigger, and the imsm_vol
10478 * will now hold 2 of them.
10479 * Thus we need new 'struct imsm_dev' allocations sized
10480 * as sizeof_imsm_dev but with more devices in both maps.
10482 struct imsm_update_reshape
*u
= (void *)update
->buf
;
10483 struct intel_dev
*dl
;
10484 void **space_tail
= (void**)&update
->space_list
;
10486 if (update
->len
< (int)sizeof(*u
))
10489 dprintf("for update_reshape\n");
10491 for (dl
= super
->devlist
; dl
; dl
= dl
->next
) {
10492 int size
= sizeof_imsm_dev(dl
->dev
, 1);
10494 if (u
->new_raid_disks
> u
->old_raid_disks
)
10495 size
+= sizeof(__u32
)*2*
10496 (u
->new_raid_disks
- u
->old_raid_disks
);
10500 *space_tail
= NULL
;
10503 len
= disks_to_mpb_size(u
->new_raid_disks
);
10504 dprintf("New anchor length is %llu\n", (unsigned long long)len
);
10507 case update_reshape_migration
: {
10508 /* for migration level 0->5 we need to add disks
10509 * so the same as for container operation we will copy
10510 * device to the bigger location.
10511 * in memory prepared device and new disk area are prepared
10512 * for usage in process update
10514 struct imsm_update_reshape_migration
*u
= (void *)update
->buf
;
10515 struct intel_dev
*id
;
10516 void **space_tail
= (void **)&update
->space_list
;
10519 int current_level
= -1;
10521 if (update
->len
< (int)sizeof(*u
))
10524 dprintf("for update_reshape\n");
10526 /* add space for bigger array in update
10528 for (id
= super
->devlist
; id
; id
= id
->next
) {
10529 if (id
->index
== (unsigned)u
->subdev
) {
10530 size
= sizeof_imsm_dev(id
->dev
, 1);
10531 if (u
->new_raid_disks
> u
->old_raid_disks
)
10532 size
+= sizeof(__u32
)*2*
10533 (u
->new_raid_disks
- u
->old_raid_disks
);
10537 *space_tail
= NULL
;
10541 if (update
->space_list
== NULL
)
10544 /* add space for disk in update
10546 size
= sizeof(struct dl
);
10550 *space_tail
= NULL
;
10552 /* add spare device to update
10554 for (id
= super
->devlist
; id
; id
= id
->next
)
10555 if (id
->index
== (unsigned)u
->subdev
) {
10556 struct imsm_dev
*dev
;
10557 struct imsm_map
*map
;
10559 dev
= get_imsm_dev(super
, u
->subdev
);
10560 map
= get_imsm_map(dev
, MAP_0
);
10561 current_level
= map
->raid_level
;
10564 if (u
->new_level
== 5 && u
->new_level
!= current_level
) {
10565 struct mdinfo
*spares
;
10567 spares
= get_spares_for_grow(st
);
10570 struct mdinfo
*dev
;
10572 dev
= spares
->devs
;
10575 makedev(dev
->disk
.major
,
10577 dl
= get_disk_super(super
,
10580 dl
->index
= u
->old_raid_disks
;
10583 sysfs_free(spares
);
10586 len
= disks_to_mpb_size(u
->new_raid_disks
);
10587 dprintf("New anchor length is %llu\n", (unsigned long long)len
);
10590 case update_size_change
: {
10591 if (update
->len
< (int)sizeof(struct imsm_update_size_change
))
10595 case update_activate_spare
: {
10596 if (update
->len
< (int)sizeof(struct imsm_update_activate_spare
))
10600 case update_create_array
: {
10601 struct imsm_update_create_array
*u
= (void *) update
->buf
;
10602 struct intel_dev
*dv
;
10603 struct imsm_dev
*dev
= &u
->dev
;
10604 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
10606 struct disk_info
*inf
;
10610 if (update
->len
< (int)sizeof(*u
))
10613 inf
= get_disk_info(u
);
10614 len
= sizeof_imsm_dev(dev
, 1);
10615 /* allocate a new super->devlist entry */
10616 dv
= xmalloc(sizeof(*dv
));
10617 dv
->dev
= xmalloc(len
);
10618 update
->space
= dv
;
10620 /* count how many spares will be converted to members */
10621 for (i
= 0; i
< map
->num_members
; i
++) {
10622 dl
= serial_to_dl(inf
[i
].serial
, super
);
10624 /* hmm maybe it failed?, nothing we can do about
10629 if (count_memberships(dl
, super
) == 0)
10632 len
+= activate
* sizeof(struct imsm_disk
);
10635 case update_kill_array
: {
10636 if (update
->len
< (int)sizeof(struct imsm_update_kill_array
))
10640 case update_rename_array
: {
10641 if (update
->len
< (int)sizeof(struct imsm_update_rename_array
))
10645 case update_add_remove_disk
:
10646 /* no update->len needed */
10648 case update_prealloc_badblocks_mem
:
10649 super
->extra_space
+= sizeof(struct bbm_log
) -
10650 get_imsm_bbm_log_size(super
->bbm_log
);
10652 case update_rwh_policy
: {
10653 if (update
->len
< (int)sizeof(struct imsm_update_rwh_policy
))
10661 /* check if we need a larger metadata buffer */
10662 if (super
->next_buf
)
10663 buf_len
= super
->next_len
;
10665 buf_len
= super
->len
;
10667 if (__le32_to_cpu(mpb
->mpb_size
) + super
->extra_space
+ len
> buf_len
) {
10668 /* ok we need a larger buf than what is currently allocated
10669 * if this allocation fails process_update will notice that
10670 * ->next_len is set and ->next_buf is NULL
10672 buf_len
= ROUND_UP(__le32_to_cpu(mpb
->mpb_size
) +
10673 super
->extra_space
+ len
, sector_size
);
10674 if (super
->next_buf
)
10675 free(super
->next_buf
);
10677 super
->next_len
= buf_len
;
10678 if (posix_memalign(&super
->next_buf
, sector_size
, buf_len
) == 0)
10679 memset(super
->next_buf
, 0, buf_len
);
10681 super
->next_buf
= NULL
;
10686 /* must be called while manager is quiesced */
10687 static void imsm_delete(struct intel_super
*super
, struct dl
**dlp
, unsigned index
)
10689 struct imsm_super
*mpb
= super
->anchor
;
10691 struct imsm_dev
*dev
;
10692 struct imsm_map
*map
;
10693 unsigned int i
, j
, num_members
;
10694 __u32 ord
, ord_map0
;
10695 struct bbm_log
*log
= super
->bbm_log
;
10697 dprintf("deleting device[%d] from imsm_super\n", index
);
10699 /* shift all indexes down one */
10700 for (iter
= super
->disks
; iter
; iter
= iter
->next
)
10701 if (iter
->index
> (int)index
)
10703 for (iter
= super
->missing
; iter
; iter
= iter
->next
)
10704 if (iter
->index
> (int)index
)
10707 for (i
= 0; i
< mpb
->num_raid_devs
; i
++) {
10708 dev
= get_imsm_dev(super
, i
);
10709 map
= get_imsm_map(dev
, MAP_0
);
10710 num_members
= map
->num_members
;
10711 for (j
= 0; j
< num_members
; j
++) {
10712 /* update ord entries being careful not to propagate
10713 * ord-flags to the first map
10715 ord
= get_imsm_ord_tbl_ent(dev
, j
, MAP_X
);
10716 ord_map0
= get_imsm_ord_tbl_ent(dev
, j
, MAP_0
);
10718 if (ord_to_idx(ord
) <= index
)
10721 map
= get_imsm_map(dev
, MAP_0
);
10722 set_imsm_ord_tbl_ent(map
, j
, ord_map0
- 1);
10723 map
= get_imsm_map(dev
, MAP_1
);
10725 set_imsm_ord_tbl_ent(map
, j
, ord
- 1);
10729 for (i
= 0; i
< log
->entry_count
; i
++) {
10730 struct bbm_log_entry
*entry
= &log
->marked_block_entries
[i
];
10732 if (entry
->disk_ordinal
<= index
)
10734 entry
->disk_ordinal
--;
10738 super
->updates_pending
++;
10740 struct dl
*dl
= *dlp
;
10742 *dlp
= (*dlp
)->next
;
10743 __free_imsm_disk(dl
, 1);
10747 static int imsm_get_allowed_degradation(int level
, int raid_disks
,
10748 struct intel_super
*super
,
10749 struct imsm_dev
*dev
)
10755 struct imsm_map
*map
;
10758 ret_val
= raid_disks
/2;
10759 /* check map if all disks pairs not failed
10762 map
= get_imsm_map(dev
, MAP_0
);
10763 for (i
= 0; i
< ret_val
; i
++) {
10764 int degradation
= 0;
10765 if (get_imsm_disk(super
, i
) == NULL
)
10767 if (get_imsm_disk(super
, i
+ 1) == NULL
)
10769 if (degradation
== 2)
10772 map
= get_imsm_map(dev
, MAP_1
);
10773 /* if there is no second map
10774 * result can be returned
10778 /* check degradation in second map
10780 for (i
= 0; i
< ret_val
; i
++) {
10781 int degradation
= 0;
10782 if (get_imsm_disk(super
, i
) == NULL
)
10784 if (get_imsm_disk(super
, i
+ 1) == NULL
)
10786 if (degradation
== 2)
10800 /*******************************************************************************
10801 * Function: validate_container_imsm
10802 * Description: This routine validates container after assemble,
10803 * eg. if devices in container are under the same controller.
10806 * info : linked list with info about devices used in array
10810 ******************************************************************************/
10811 int validate_container_imsm(struct mdinfo
*info
)
10813 if (check_no_platform())
10816 struct sys_dev
*idev
;
10817 struct sys_dev
*hba
= NULL
;
10818 struct sys_dev
*intel_devices
= find_intel_devices();
10819 char *dev_path
= devt_to_devpath(makedev(info
->disk
.major
,
10820 info
->disk
.minor
), 1, NULL
);
10822 for (idev
= intel_devices
; idev
; idev
= idev
->next
) {
10823 if (dev_path
&& strstr(dev_path
, idev
->path
)) {
10832 pr_err("WARNING - Cannot detect HBA for device %s!\n",
10833 devid2kname(makedev(info
->disk
.major
, info
->disk
.minor
)));
10837 const struct imsm_orom
*orom
= get_orom_by_device_id(hba
->dev_id
);
10838 struct mdinfo
*dev
;
10840 for (dev
= info
->next
; dev
; dev
= dev
->next
) {
10841 dev_path
= devt_to_devpath(makedev(dev
->disk
.major
,
10842 dev
->disk
.minor
), 1, NULL
);
10844 struct sys_dev
*hba2
= NULL
;
10845 for (idev
= intel_devices
; idev
; idev
= idev
->next
) {
10846 if (dev_path
&& strstr(dev_path
, idev
->path
)) {
10854 const struct imsm_orom
*orom2
= hba2
== NULL
? NULL
:
10855 get_orom_by_device_id(hba2
->dev_id
);
10857 if (hba2
&& hba
->type
!= hba2
->type
) {
10858 pr_err("WARNING - HBAs of devices do not match %s != %s\n",
10859 get_sys_dev_type(hba
->type
), get_sys_dev_type(hba2
->type
));
10863 if (orom
!= orom2
) {
10864 pr_err("WARNING - IMSM container assembled with disks under different HBAs!\n"
10865 " This operation is not supported and can lead to data loss.\n");
10870 pr_err("WARNING - IMSM container assembled with disks under HBAs without IMSM platform support!\n"
10871 " This operation is not supported and can lead to data loss.\n");
10879 /*******************************************************************************
10880 * Function: imsm_record_badblock
10881 * Description: This routine stores new bad block record in BBM log
10884 * a : array containing a bad block
10885 * slot : disk number containing a bad block
10886 * sector : bad block sector
10887 * length : bad block sectors range
10891 ******************************************************************************/
10892 static int imsm_record_badblock(struct active_array
*a
, int slot
,
10893 unsigned long long sector
, int length
)
10895 struct intel_super
*super
= a
->container
->sb
;
10899 ord
= imsm_disk_slot_to_ord(a
, slot
);
10903 ret
= record_new_badblock(super
->bbm_log
, ord_to_idx(ord
), sector
,
10906 super
->updates_pending
++;
10910 /*******************************************************************************
10911 * Function: imsm_clear_badblock
10912 * Description: This routine clears bad block record from BBM log
10915 * a : array containing a bad block
10916 * slot : disk number containing a bad block
10917 * sector : bad block sector
10918 * length : bad block sectors range
10922 ******************************************************************************/
10923 static int imsm_clear_badblock(struct active_array
*a
, int slot
,
10924 unsigned long long sector
, int length
)
10926 struct intel_super
*super
= a
->container
->sb
;
10930 ord
= imsm_disk_slot_to_ord(a
, slot
);
10934 ret
= clear_badblock(super
->bbm_log
, ord_to_idx(ord
), sector
, length
);
10936 super
->updates_pending
++;
10940 /*******************************************************************************
10941 * Function: imsm_get_badblocks
10942 * Description: This routine get list of bad blocks for an array
10946 * slot : disk number
10948 * bb : structure containing bad blocks
10950 ******************************************************************************/
10951 static struct md_bb
*imsm_get_badblocks(struct active_array
*a
, int slot
)
10953 int inst
= a
->info
.container_member
;
10954 struct intel_super
*super
= a
->container
->sb
;
10955 struct imsm_dev
*dev
= get_imsm_dev(super
, inst
);
10956 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
10959 ord
= imsm_disk_slot_to_ord(a
, slot
);
10963 get_volume_badblocks(super
->bbm_log
, ord_to_idx(ord
), pba_of_lba0(map
),
10964 per_dev_array_size(map
), &super
->bb
);
10968 /*******************************************************************************
10969 * Function: examine_badblocks_imsm
10970 * Description: Prints list of bad blocks on a disk to the standard output
10973 * st : metadata handler
10974 * fd : open file descriptor for device
10975 * devname : device name
10979 ******************************************************************************/
10980 static int examine_badblocks_imsm(struct supertype
*st
, int fd
, char *devname
)
10982 struct intel_super
*super
= st
->sb
;
10983 struct bbm_log
*log
= super
->bbm_log
;
10984 struct dl
*d
= NULL
;
10987 for (d
= super
->disks
; d
; d
= d
->next
) {
10988 if (strcmp(d
->devname
, devname
) == 0)
10992 if ((d
== NULL
) || (d
->index
< 0)) { /* serial mismatch probably */
10993 pr_err("%s doesn't appear to be part of a raid array\n",
11000 struct bbm_log_entry
*entry
= &log
->marked_block_entries
[0];
11002 for (i
= 0; i
< log
->entry_count
; i
++) {
11003 if (entry
[i
].disk_ordinal
== d
->index
) {
11004 unsigned long long sector
= __le48_to_cpu(
11005 &entry
[i
].defective_block_start
);
11006 int cnt
= entry
[i
].marked_count
+ 1;
11009 printf("Bad-blocks on %s:\n", devname
);
11013 printf("%20llu for %d sectors\n", sector
, cnt
);
11019 printf("No bad-blocks list configured on %s\n", devname
);
11023 /*******************************************************************************
11024 * Function: init_migr_record_imsm
11025 * Description: Function inits imsm migration record
11027 * super : imsm internal array info
11028 * dev : device under migration
11029 * info : general array info to find the smallest device
11032 ******************************************************************************/
11033 void init_migr_record_imsm(struct supertype
*st
, struct imsm_dev
*dev
,
11034 struct mdinfo
*info
)
11036 struct intel_super
*super
= st
->sb
;
11037 struct migr_record
*migr_rec
= super
->migr_rec
;
11038 int new_data_disks
;
11039 unsigned long long dsize
, dev_sectors
;
11040 long long unsigned min_dev_sectors
= -1LLU;
11041 struct imsm_map
*map_dest
= get_imsm_map(dev
, MAP_0
);
11042 struct imsm_map
*map_src
= get_imsm_map(dev
, MAP_1
);
11043 unsigned long long num_migr_units
;
11044 unsigned long long array_blocks
;
11045 struct dl
*dl_disk
= NULL
;
11047 memset(migr_rec
, 0, sizeof(struct migr_record
));
11048 migr_rec
->family_num
= __cpu_to_le32(super
->anchor
->family_num
);
11050 /* only ascending reshape supported now */
11051 migr_rec
->ascending_migr
= __cpu_to_le32(1);
11053 migr_rec
->dest_depth_per_unit
= GEN_MIGR_AREA_SIZE
/
11054 max(map_dest
->blocks_per_strip
, map_src
->blocks_per_strip
);
11055 migr_rec
->dest_depth_per_unit
*=
11056 max(map_dest
->blocks_per_strip
, map_src
->blocks_per_strip
);
11057 new_data_disks
= imsm_num_data_members(map_dest
);
11058 migr_rec
->blocks_per_unit
=
11059 __cpu_to_le32(migr_rec
->dest_depth_per_unit
* new_data_disks
);
11060 migr_rec
->dest_depth_per_unit
=
11061 __cpu_to_le32(migr_rec
->dest_depth_per_unit
);
11062 array_blocks
= info
->component_size
* new_data_disks
;
11064 array_blocks
/ __le32_to_cpu(migr_rec
->blocks_per_unit
);
11066 if (array_blocks
% __le32_to_cpu(migr_rec
->blocks_per_unit
))
11068 set_num_migr_units(migr_rec
, num_migr_units
);
11070 migr_rec
->post_migr_vol_cap
= dev
->size_low
;
11071 migr_rec
->post_migr_vol_cap_hi
= dev
->size_high
;
11073 /* Find the smallest dev */
11074 for (dl_disk
= super
->disks
; dl_disk
; dl_disk
= dl_disk
->next
) {
11075 /* ignore spares in container */
11076 if (dl_disk
->index
< 0)
11078 get_dev_size(dl_disk
->fd
, NULL
, &dsize
);
11079 dev_sectors
= dsize
/ 512;
11080 if (dev_sectors
< min_dev_sectors
)
11081 min_dev_sectors
= dev_sectors
;
11083 set_migr_chkp_area_pba(migr_rec
, min_dev_sectors
-
11084 RAID_DISK_RESERVED_BLOCKS_IMSM_HI
);
11086 write_imsm_migr_rec(st
);
11091 /*******************************************************************************
11092 * Function: save_backup_imsm
11093 * Description: Function saves critical data stripes to Migration Copy Area
11094 * and updates the current migration unit status.
11095 * Use restore_stripes() to form a destination stripe,
11096 * and to write it to the Copy Area.
11098 * st : supertype information
11099 * dev : imsm device that backup is saved for
11100 * info : general array info
11101 * buf : input buffer
11102 * length : length of data to backup (blocks_per_unit)
11106 ******************************************************************************/
11107 int save_backup_imsm(struct supertype
*st
,
11108 struct imsm_dev
*dev
,
11109 struct mdinfo
*info
,
11114 struct intel_super
*super
= st
->sb
;
11116 struct imsm_map
*map_dest
= get_imsm_map(dev
, MAP_0
);
11117 int new_disks
= map_dest
->num_members
;
11118 int dest_layout
= 0;
11119 int dest_chunk
, targets
[new_disks
];
11120 unsigned long long start
, target_offsets
[new_disks
];
11121 int data_disks
= imsm_num_data_members(map_dest
);
11123 for (i
= 0; i
< new_disks
; i
++) {
11124 struct dl
*dl_disk
= get_imsm_dl_disk(super
, i
);
11125 if (dl_disk
&& is_fd_valid(dl_disk
->fd
))
11126 targets
[i
] = dl_disk
->fd
;
11131 start
= info
->reshape_progress
* 512;
11132 for (i
= 0; i
< new_disks
; i
++) {
11133 target_offsets
[i
] = migr_chkp_area_pba(super
->migr_rec
) * 512;
11134 /* move back copy area adderss, it will be moved forward
11135 * in restore_stripes() using start input variable
11137 target_offsets
[i
] -= start
/data_disks
;
11140 dest_layout
= imsm_level_to_layout(map_dest
->raid_level
);
11141 dest_chunk
= __le16_to_cpu(map_dest
->blocks_per_strip
) * 512;
11143 if (restore_stripes(targets
, /* list of dest devices */
11144 target_offsets
, /* migration record offsets */
11147 map_dest
->raid_level
,
11149 -1, /* source backup file descriptor */
11150 0, /* input buf offset
11151 * always 0 buf is already offseted */
11155 pr_err("Error restoring stripes\n");
11165 /*******************************************************************************
11166 * Function: save_checkpoint_imsm
11167 * Description: Function called for current unit status update
11168 * in the migration record. It writes it to disk.
11170 * super : imsm internal array info
11171 * info : general array info
11175 * 2: failure, means no valid migration record
11176 * / no general migration in progress /
11177 ******************************************************************************/
11178 int save_checkpoint_imsm(struct supertype
*st
, struct mdinfo
*info
, int state
)
11180 struct intel_super
*super
= st
->sb
;
11181 unsigned long long blocks_per_unit
;
11182 unsigned long long curr_migr_unit
;
11184 if (load_imsm_migr_rec(super
) != 0) {
11185 dprintf("imsm: ERROR: Cannot read migration record for checkpoint save.\n");
11189 blocks_per_unit
= __le32_to_cpu(super
->migr_rec
->blocks_per_unit
);
11190 if (blocks_per_unit
== 0) {
11191 dprintf("imsm: no migration in progress.\n");
11194 curr_migr_unit
= info
->reshape_progress
/ blocks_per_unit
;
11195 /* check if array is alligned to copy area
11196 * if it is not alligned, add one to current migration unit value
11197 * this can happend on array reshape finish only
11199 if (info
->reshape_progress
% blocks_per_unit
)
11202 set_current_migr_unit(super
->migr_rec
, curr_migr_unit
);
11203 super
->migr_rec
->rec_status
= __cpu_to_le32(state
);
11204 set_migr_dest_1st_member_lba(super
->migr_rec
,
11205 super
->migr_rec
->dest_depth_per_unit
* curr_migr_unit
);
11207 if (write_imsm_migr_rec(st
) < 0) {
11208 dprintf("imsm: Cannot write migration record outside backup area\n");
11215 /*******************************************************************************
11216 * Function: recover_backup_imsm
11217 * Description: Function recovers critical data from the Migration Copy Area
11218 * while assembling an array.
11220 * super : imsm internal array info
11221 * info : general array info
11223 * 0 : success (or there is no data to recover)
11225 ******************************************************************************/
11226 int recover_backup_imsm(struct supertype
*st
, struct mdinfo
*info
)
11228 struct intel_super
*super
= st
->sb
;
11229 struct migr_record
*migr_rec
= super
->migr_rec
;
11230 struct imsm_map
*map_dest
;
11231 struct intel_dev
*id
= NULL
;
11232 unsigned long long read_offset
;
11233 unsigned long long write_offset
;
11235 int new_disks
, err
;
11238 unsigned int sector_size
= super
->sector_size
;
11239 unsigned long long curr_migr_unit
= current_migr_unit(migr_rec
);
11240 unsigned long long num_migr_units
= get_num_migr_units(migr_rec
);
11241 char buffer
[SYSFS_MAX_BUF_SIZE
];
11242 int skipped_disks
= 0;
11243 struct dl
*dl_disk
;
11245 err
= sysfs_get_str(info
, NULL
, "array_state", (char *)buffer
, sizeof(buffer
));
11249 /* recover data only during assemblation */
11250 if (strncmp(buffer
, "inactive", 8) != 0)
11252 /* no data to recover */
11253 if (__le32_to_cpu(migr_rec
->rec_status
) == UNIT_SRC_NORMAL
)
11255 if (curr_migr_unit
>= num_migr_units
)
11258 /* find device during reshape */
11259 for (id
= super
->devlist
; id
; id
= id
->next
)
11260 if (is_gen_migration(id
->dev
))
11265 map_dest
= get_imsm_map(id
->dev
, MAP_0
);
11266 new_disks
= map_dest
->num_members
;
11268 read_offset
= migr_chkp_area_pba(migr_rec
) * 512;
11270 write_offset
= (migr_dest_1st_member_lba(migr_rec
) +
11271 pba_of_lba0(map_dest
)) * 512;
11273 unit_len
= __le32_to_cpu(migr_rec
->dest_depth_per_unit
) * 512;
11274 if (posix_memalign((void **)&buf
, sector_size
, unit_len
) != 0)
11277 for (dl_disk
= super
->disks
; dl_disk
; dl_disk
= dl_disk
->next
) {
11278 if (dl_disk
->index
< 0)
11281 if (!is_fd_valid(dl_disk
->fd
)) {
11285 if (lseek64(dl_disk
->fd
, read_offset
, SEEK_SET
) < 0) {
11286 pr_err("Cannot seek to block: %s\n",
11291 if (read(dl_disk
->fd
, buf
, unit_len
) != (ssize_t
)unit_len
) {
11292 pr_err("Cannot read copy area block: %s\n",
11297 if (lseek64(dl_disk
->fd
, write_offset
, SEEK_SET
) < 0) {
11298 pr_err("Cannot seek to block: %s\n",
11303 if (write(dl_disk
->fd
, buf
, unit_len
) != (ssize_t
)unit_len
) {
11304 pr_err("Cannot restore block: %s\n",
11311 if (skipped_disks
> imsm_get_allowed_degradation(info
->new_level
,
11315 pr_err("Cannot restore data from backup. Too many failed disks\n");
11319 if (save_checkpoint_imsm(st
, info
, UNIT_SRC_NORMAL
)) {
11320 /* ignore error == 2, this can mean end of reshape here
11322 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL) during restart\n");
11332 * test_and_add_drive_controller_policy_imsm() - add disk controller to policies list.
11333 * @type: Policy type to search on list.
11334 * @pols: List of currently recorded policies.
11335 * @disk_fd: File descriptor of the device to check.
11336 * @hba: The hba disk is attached, could be NULL if verification is disabled.
11337 * @verbose: verbose flag.
11339 * IMSM cares about drive physical placement. If @hba is not set, it adds unknown policy.
11340 * If there is no controller policy on pols we are free to add first one. If there is a policy then,
11341 * new must be the same - no controller mixing allowed.
11343 static mdadm_status_t
11344 test_and_add_drive_controller_policy_imsm(const char * const type
, dev_policy_t
**pols
, int disk_fd
,
11345 struct sys_dev
*hba
, const int verbose
)
11347 const char *controller_policy
= get_sys_dev_type(SYS_DEV_UNKNOWN
);
11348 struct dev_policy
*pol
= pol_find(*pols
, (char *)type
);
11349 char devname
[MAX_RAID_SERIAL_LEN
];
11352 controller_policy
= get_sys_dev_type(hba
->type
);
11355 pol_add(pols
, (char *)type
, (char *)controller_policy
, "imsm");
11356 return MDADM_STATUS_SUCCESS
;
11359 if (strcmp(pol
->value
, controller_policy
) == 0)
11360 return MDADM_STATUS_SUCCESS
;
11362 fd2devname(disk_fd
, devname
);
11363 pr_vrb("Intel(R) raid controller \"%s\" found for %s, but \"%s\" was detected earlier\n",
11364 controller_policy
, devname
, pol
->value
);
11365 pr_vrb("Disks under different controllers cannot be used, aborting\n");
11367 return MDADM_STATUS_ERROR
;
11371 * test_and_add_drive_encryption_policy_imsm() - add disk encryption to policies list.
11372 * @type: policy type to search in the list.
11373 * @pols: list of currently recorded policies.
11374 * @disk_fd: file descriptor of the device to check.
11375 * @hba: The hba to which the drive is attached, could be NULL if verification is disabled.
11376 * @verbose: verbose flag.
11378 * IMSM cares about drive encryption state. It is not allowed to mix disks with different
11379 * encryption state within one md device.
11380 * If there is no encryption policy on pols we are free to add first one.
11381 * If there is a policy then, new must be the same.
11383 static mdadm_status_t
11384 test_and_add_drive_encryption_policy_imsm(const char * const type
, dev_policy_t
**pols
, int disk_fd
,
11385 struct sys_dev
*hba
, const int verbose
)
11387 struct dev_policy
*expected_policy
= pol_find(*pols
, (char *)type
);
11388 struct encryption_information information
= {0};
11389 char *encryption_state
= "Unknown";
11390 int status
= MDADM_STATUS_SUCCESS
;
11391 bool encryption_checked
= true;
11392 char devname
[PATH_MAX
];
11397 switch (hba
->type
) {
11400 status
= get_nvme_opal_encryption_information(disk_fd
, &information
, verbose
);
11403 case SYS_DEV_SATA_VMD
:
11404 status
= get_ata_encryption_information(disk_fd
, &information
, verbose
);
11407 encryption_checked
= false;
11411 fd2devname(disk_fd
, devname
);
11412 pr_vrb("Failed to read encryption information of device %s\n", devname
);
11413 return MDADM_STATUS_ERROR
;
11416 if (encryption_checked
) {
11417 if (information
.status
== ENC_STATUS_LOCKED
) {
11418 fd2devname(disk_fd
, devname
);
11419 pr_vrb("Device %s is in Locked state, cannot use. Aborting.\n", devname
);
11420 return MDADM_STATUS_ERROR
;
11422 encryption_state
= (char *)get_encryption_status_string(information
.status
);
11426 if (expected_policy
) {
11427 if (strcmp(expected_policy
->value
, encryption_state
) == 0)
11428 return MDADM_STATUS_SUCCESS
;
11430 fd2devname(disk_fd
, devname
);
11431 pr_vrb("Encryption status \"%s\" detected for disk %s, but \"%s\" status was detected earlier.\n",
11432 encryption_state
, devname
, expected_policy
->value
);
11433 pr_vrb("Disks with different encryption status cannot be used.\n");
11434 return MDADM_STATUS_ERROR
;
11437 pol_add(pols
, (char *)type
, encryption_state
, "imsm");
11439 return MDADM_STATUS_SUCCESS
;
11442 struct imsm_drive_policy
{
11444 mdadm_status_t (*test_and_add_drive_policy
)(const char * const type
,
11445 struct dev_policy
**pols
, int disk_fd
,
11446 struct sys_dev
*hba
, const int verbose
);
11449 struct imsm_drive_policy imsm_policies
[] = {
11450 {"controller", test_and_add_drive_controller_policy_imsm
},
11451 {"encryption", test_and_add_drive_encryption_policy_imsm
}
11454 mdadm_status_t
test_and_add_drive_policies_imsm(struct dev_policy
**pols
, int disk_fd
,
11457 struct imsm_drive_policy
*imsm_pol
;
11458 struct sys_dev
*hba
= NULL
;
11459 char path
[PATH_MAX
];
11460 mdadm_status_t ret
;
11463 /* If imsm platform verification is disabled, do not search for hba. */
11464 if (check_no_platform() != 1) {
11465 if (!diskfd_to_devpath(disk_fd
, 1, path
)) {
11466 pr_vrb("IMSM: Failed to retrieve device path by file descriptor.\n");
11467 return MDADM_STATUS_ERROR
;
11470 hba
= find_disk_attached_hba(disk_fd
, path
);
11472 pr_vrb("IMSM: Failed to find hba for %s\n", path
);
11473 return MDADM_STATUS_ERROR
;
11477 for (i
= 0; i
< ARRAY_SIZE(imsm_policies
); i
++) {
11478 imsm_pol
= &imsm_policies
[i
];
11480 ret
= imsm_pol
->test_and_add_drive_policy(imsm_pol
->type
, pols
, disk_fd
, hba
,
11482 if (ret
!= MDADM_STATUS_SUCCESS
)
11483 /* Inherit error code */
11487 return MDADM_STATUS_SUCCESS
;
11491 * get_spare_criteria_imsm() - set spare criteria.
11493 * @mddev_path: path to md device devnode, it must be container.
11494 * @c: spare_criteria struct to fill, not NULL.
11496 * If superblock is not loaded, use mddev_path to load_container. It must be given in this case.
11497 * Filles size and sector size accordingly to superblock.
11499 mdadm_status_t
get_spare_criteria_imsm(struct supertype
*st
, char *mddev_path
,
11500 struct spare_criteria
*c
)
11502 mdadm_status_t ret
= MDADM_STATUS_ERROR
;
11503 bool free_superblock
= false;
11504 unsigned long long size
= 0;
11505 struct intel_super
*super
;
11510 /* If no superblock and no mddev_path, we cannot load superblock. */
11511 assert(st
->sb
|| mddev_path
);
11514 int fd
= open(mddev_path
, O_RDONLY
);
11517 if (!is_fd_valid(fd
))
11518 return MDADM_STATUS_ERROR
;
11521 if (load_container_imsm(st
, fd
, st
->devnm
)) {
11523 return MDADM_STATUS_ERROR
;
11525 free_superblock
= true;
11528 rv
= mddev_test_and_add_drive_policies(st
, &c
->pols
, fd
, 0);
11531 if (rv
!= MDADM_STATUS_SUCCESS
)
11537 /* find first active disk in array */
11539 while (dl
&& (is_failed(&dl
->disk
) || dl
->index
== -1))
11545 /* find last lba used by subarrays */
11546 e
= get_extents(super
, dl
, 0);
11550 for (i
= 0; e
[i
].size
; i
++)
11553 size
= e
[i
- 1].start
+ e
[i
- 1].size
;
11556 /* add the amount of space needed for metadata */
11557 size
+= imsm_min_reserved_sectors(super
);
11559 c
->min_size
= size
* 512;
11560 c
->sector_size
= super
->sector_size
;
11561 c
->criteria_set
= true;
11562 ret
= MDADM_STATUS_SUCCESS
;
11565 if (free_superblock
)
11566 free_super_imsm(st
);
11568 if (ret
!= MDADM_STATUS_SUCCESS
)
11569 c
->criteria_set
= false;
11574 static char *imsm_find_array_devnm_by_subdev(int subdev
, char *container
)
11576 static char devnm
[32];
11577 char subdev_name
[20];
11578 struct mdstat_ent
*mdstat
;
11580 sprintf(subdev_name
, "%d", subdev
);
11581 mdstat
= mdstat_by_subdev(subdev_name
, container
);
11585 strcpy(devnm
, mdstat
->devnm
);
11586 free_mdstat(mdstat
);
11590 static int imsm_reshape_is_allowed_on_container(struct supertype
*st
,
11591 struct geo_params
*geo
,
11592 int *old_raid_disks
,
11595 /* currently we only support increasing the number of devices
11596 * for a container. This increases the number of device for each
11597 * member array. They must all be RAID0 or RAID5.
11600 struct mdinfo
*info
, *member
;
11601 int devices_that_can_grow
= 0;
11603 dprintf("imsm: imsm_reshape_is_allowed_on_container(ENTER): st->devnm = (%s)\n", st
->devnm
);
11605 if (geo
->size
> 0 ||
11606 geo
->level
!= UnSet
||
11607 geo
->layout
!= UnSet
||
11608 geo
->chunksize
!= 0 ||
11609 geo
->raid_disks
== UnSet
) {
11610 dprintf("imsm: Container operation is allowed for raid disks number change only.\n");
11614 if (direction
== ROLLBACK_METADATA_CHANGES
) {
11615 dprintf("imsm: Metadata changes rollback is not supported for container operation.\n");
11619 info
= container_content_imsm(st
, NULL
);
11620 for (member
= info
; member
; member
= member
->next
) {
11623 dprintf("imsm: checking device_num: %i\n",
11624 member
->container_member
);
11626 if (geo
->raid_disks
<= member
->array
.raid_disks
) {
11627 /* we work on container for Online Capacity Expansion
11628 * only so raid_disks has to grow
11630 dprintf("imsm: for container operation raid disks increase is required\n");
11634 if (info
->array
.level
!= 0 && info
->array
.level
!= 5) {
11635 /* we cannot use this container with other raid level
11637 dprintf("imsm: for container operation wrong raid level (%i) detected\n",
11638 info
->array
.level
);
11641 /* check for platform support
11642 * for this raid level configuration
11644 struct intel_super
*super
= st
->sb
;
11645 if (!is_raid_level_supported(super
->orom
,
11646 member
->array
.level
,
11647 geo
->raid_disks
)) {
11648 dprintf("platform does not support raid%d with %d disk%s\n",
11651 geo
->raid_disks
> 1 ? "s" : "");
11654 /* check if component size is aligned to chunk size
11656 if (info
->component_size
%
11657 (info
->array
.chunk_size
/512)) {
11658 dprintf("Component size is not aligned to chunk size\n");
11663 if (*old_raid_disks
&&
11664 info
->array
.raid_disks
!= *old_raid_disks
)
11666 *old_raid_disks
= info
->array
.raid_disks
;
11668 /* All raid5 and raid0 volumes in container
11669 * have to be ready for Online Capacity Expansion
11670 * so they need to be assembled. We have already
11671 * checked that no recovery etc is happening.
11673 result
= imsm_find_array_devnm_by_subdev(member
->container_member
,
11674 st
->container_devnm
);
11675 if (result
== NULL
) {
11676 dprintf("imsm: cannot find array\n");
11679 devices_that_can_grow
++;
11682 if (!member
&& devices_that_can_grow
)
11686 dprintf("Container operation allowed\n");
11688 dprintf("Error: %i\n", ret_val
);
11693 /* Function: get_spares_for_grow
11694 * Description: Allocates memory and creates list of spare devices
11695 * avaliable in container. Checks if spare drive size is acceptable.
11696 * Parameters: Pointer to the supertype structure
11697 * Returns: Pointer to the list of spare devices (mdinfo structure) on success,
11700 static struct mdinfo
*get_spares_for_grow(struct supertype
*st
)
11702 struct spare_criteria sc
= {0};
11703 struct mdinfo
*spares
;
11705 get_spare_criteria_imsm(st
, NULL
, &sc
);
11706 spares
= container_choose_spares(st
, &sc
, NULL
, NULL
, NULL
, 0);
11708 dev_policy_free(sc
.pols
);
11713 /******************************************************************************
11714 * function: imsm_create_metadata_update_for_reshape
11715 * Function creates update for whole IMSM container.
11717 ******************************************************************************/
11718 static int imsm_create_metadata_update_for_reshape(
11719 struct supertype
*st
,
11720 struct geo_params
*geo
,
11721 int old_raid_disks
,
11722 struct imsm_update_reshape
**updatep
)
11724 struct intel_super
*super
= st
->sb
;
11725 struct imsm_super
*mpb
= super
->anchor
;
11726 int update_memory_size
;
11727 struct imsm_update_reshape
*u
;
11728 struct mdinfo
*spares
;
11731 struct mdinfo
*dev
;
11733 dprintf("(enter) raid_disks = %i\n", geo
->raid_disks
);
11735 delta_disks
= geo
->raid_disks
- old_raid_disks
;
11737 /* size of all update data without anchor */
11738 update_memory_size
= sizeof(struct imsm_update_reshape
);
11740 /* now add space for spare disks that we need to add. */
11741 update_memory_size
+= sizeof(u
->new_disks
[0]) * (delta_disks
- 1);
11743 u
= xcalloc(1, update_memory_size
);
11744 u
->type
= update_reshape_container_disks
;
11745 u
->old_raid_disks
= old_raid_disks
;
11746 u
->new_raid_disks
= geo
->raid_disks
;
11748 /* now get spare disks list
11750 spares
= get_spares_for_grow(st
);
11752 if (spares
== NULL
|| delta_disks
> spares
->array
.spare_disks
) {
11753 pr_err("imsm: ERROR: Cannot get spare devices for %s.\n", geo
->dev_name
);
11758 /* we have got spares
11759 * update disk list in imsm_disk list table in anchor
11761 dprintf("imsm: %i spares are available.\n\n",
11762 spares
->array
.spare_disks
);
11764 dev
= spares
->devs
;
11765 for (i
= 0; i
< delta_disks
; i
++) {
11770 u
->new_disks
[i
] = makedev(dev
->disk
.major
,
11772 dl
= get_disk_super(super
, dev
->disk
.major
, dev
->disk
.minor
);
11773 dl
->index
= mpb
->num_disks
;
11781 sysfs_free(spares
);
11783 dprintf("imsm: reshape update preparation :");
11784 if (i
== delta_disks
) {
11785 dprintf_cont(" OK\n");
11787 return update_memory_size
;
11790 dprintf_cont(" Error\n");
11795 /******************************************************************************
11796 * function: imsm_create_metadata_update_for_size_change()
11797 * Creates update for IMSM array for array size change.
11799 ******************************************************************************/
11800 static int imsm_create_metadata_update_for_size_change(
11801 struct supertype
*st
,
11802 struct geo_params
*geo
,
11803 struct imsm_update_size_change
**updatep
)
11805 struct intel_super
*super
= st
->sb
;
11806 int update_memory_size
;
11807 struct imsm_update_size_change
*u
;
11809 dprintf("(enter) New size = %llu\n", geo
->size
);
11811 /* size of all update data without anchor */
11812 update_memory_size
= sizeof(struct imsm_update_size_change
);
11814 u
= xcalloc(1, update_memory_size
);
11815 u
->type
= update_size_change
;
11816 u
->subdev
= super
->current_vol
;
11817 u
->new_size
= geo
->size
;
11819 dprintf("imsm: reshape update preparation : OK\n");
11822 return update_memory_size
;
11825 /******************************************************************************
11826 * function: imsm_create_metadata_update_for_migration()
11827 * Creates update for IMSM array.
11829 ******************************************************************************/
11830 static int imsm_create_metadata_update_for_migration(
11831 struct supertype
*st
,
11832 struct geo_params
*geo
,
11833 struct imsm_update_reshape_migration
**updatep
)
11835 struct intel_super
*super
= st
->sb
;
11836 int update_memory_size
;
11837 int current_chunk_size
;
11838 struct imsm_update_reshape_migration
*u
;
11839 struct imsm_dev
*dev
= get_imsm_dev(super
, super
->current_vol
);
11840 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
11841 int previous_level
= -1;
11843 dprintf("(enter) New Level = %i\n", geo
->level
);
11845 /* size of all update data without anchor */
11846 update_memory_size
= sizeof(struct imsm_update_reshape_migration
);
11848 u
= xcalloc(1, update_memory_size
);
11849 u
->type
= update_reshape_migration
;
11850 u
->subdev
= super
->current_vol
;
11851 u
->new_level
= geo
->level
;
11852 u
->new_layout
= geo
->layout
;
11853 u
->new_raid_disks
= u
->old_raid_disks
= geo
->raid_disks
;
11854 u
->new_disks
[0] = -1;
11855 u
->new_chunksize
= -1;
11857 current_chunk_size
= __le16_to_cpu(map
->blocks_per_strip
) / 2;
11859 if (geo
->chunksize
!= current_chunk_size
) {
11860 u
->new_chunksize
= geo
->chunksize
/ 1024;
11861 dprintf("imsm: chunk size change from %i to %i\n",
11862 current_chunk_size
, u
->new_chunksize
);
11864 previous_level
= map
->raid_level
;
11866 if (geo
->level
== 5 && previous_level
== 0) {
11867 struct mdinfo
*spares
= NULL
;
11869 u
->new_raid_disks
++;
11870 spares
= get_spares_for_grow(st
);
11871 if (spares
== NULL
|| spares
->array
.spare_disks
< 1) {
11873 sysfs_free(spares
);
11874 update_memory_size
= 0;
11875 pr_err("cannot get spare device for requested migration\n");
11878 sysfs_free(spares
);
11880 dprintf("imsm: reshape update preparation : OK\n");
11883 return update_memory_size
;
11886 static void imsm_update_metadata_locally(struct supertype
*st
,
11887 void *buf
, int len
)
11889 struct metadata_update mu
;
11894 mu
.space_list
= NULL
;
11896 if (imsm_prepare_update(st
, &mu
))
11897 imsm_process_update(st
, &mu
);
11899 while (mu
.space_list
) {
11900 void **space
= mu
.space_list
;
11901 mu
.space_list
= *space
;
11907 * imsm_analyze_expand() - check expand properties and calculate new size.
11908 * @st: imsm supertype.
11909 * @geo: new geometry params.
11910 * @array: array info.
11911 * @direction: reshape direction.
11913 * Obtain free space after the &array and verify if expand to requested size is
11914 * possible. If geo->size is set to %MAX_SIZE, assume that max free size is
11918 * On success %IMSM_STATUS_OK is returned, geo->size and geo->raid_disks are
11920 * On error, %IMSM_STATUS_ERROR is returned.
11922 static imsm_status_t
imsm_analyze_expand(struct supertype
*st
,
11923 struct geo_params
*geo
,
11924 struct mdinfo
*array
,
11927 struct intel_super
*super
= st
->sb
;
11928 struct imsm_dev
*dev
= get_imsm_dev(super
, super
->current_vol
);
11929 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
11930 int data_disks
= imsm_num_data_members(map
);
11932 unsigned long long current_size
;
11933 unsigned long long free_size
;
11934 unsigned long long new_size
;
11935 unsigned long long max_size
;
11937 const int chunk_kib
= geo
->chunksize
/ 1024;
11940 if (direction
== ROLLBACK_METADATA_CHANGES
) {
11942 * Accept size for rollback only.
11944 new_size
= geo
->size
* 2;
11948 if (data_disks
== 0) {
11949 pr_err("imsm: Cannot retrieve data disks.\n");
11950 return IMSM_STATUS_ERROR
;
11952 current_size
= array
->custom_array_size
/ data_disks
;
11954 rv
= imsm_get_free_size(super
, dev
->vol
.map
->num_members
, 0, chunk_kib
, &free_size
, true);
11955 if (rv
!= IMSM_STATUS_OK
) {
11956 pr_err("imsm: Cannot find free space for expand.\n");
11957 return IMSM_STATUS_ERROR
;
11959 max_size
= round_member_size_to_mb(free_size
+ current_size
);
11961 if (geo
->size
== MAX_SIZE
)
11962 new_size
= max_size
;
11964 new_size
= round_member_size_to_mb(geo
->size
* 2);
11966 if (new_size
== 0) {
11967 pr_err("imsm: Rounded requested size is 0.\n");
11968 return IMSM_STATUS_ERROR
;
11971 if (new_size
> max_size
) {
11972 pr_err("imsm: Rounded requested size (%llu) is larger than free space available (%llu).\n",
11973 new_size
, max_size
);
11974 return IMSM_STATUS_ERROR
;
11977 if (new_size
== current_size
) {
11978 pr_err("imsm: Rounded requested size (%llu) is same as current size (%llu).\n",
11979 new_size
, current_size
);
11980 return IMSM_STATUS_ERROR
;
11983 if (new_size
< current_size
) {
11984 pr_err("imsm: Size reduction is not supported, rounded requested size (%llu) is smaller than current (%llu).\n",
11985 new_size
, current_size
);
11986 return IMSM_STATUS_ERROR
;
11990 dprintf("imsm: New size per member is %llu.\n", new_size
);
11991 geo
->size
= data_disks
* new_size
;
11992 geo
->raid_disks
= dev
->vol
.map
->num_members
;
11993 return IMSM_STATUS_OK
;
11996 /***************************************************************************
11997 * Function: imsm_analyze_change
11998 * Description: Function analyze change for single volume
11999 * and validate if transition is supported
12000 * Parameters: Geometry parameters, supertype structure,
12001 * metadata change direction (apply/rollback)
12002 * Returns: Operation type code on success, -1 if fail
12003 ****************************************************************************/
12004 enum imsm_reshape_type
imsm_analyze_change(struct supertype
*st
,
12005 struct geo_params
*geo
,
12006 int direction
, struct context
*c
)
12008 struct mdinfo info
;
12010 int check_devs
= 0;
12012 /* imsm compatible layout value for array geometry verification */
12013 int imsm_layout
= -1;
12014 int raid_disks
= geo
->raid_disks
;
12017 getinfo_super_imsm_volume(st
, &info
, NULL
);
12018 if (geo
->level
!= info
.array
.level
&& geo
->level
>= IMSM_T_RAID0
&&
12019 geo
->level
!= UnSet
) {
12020 switch (info
.array
.level
) {
12022 if (geo
->level
== IMSM_T_RAID5
) {
12023 change
= CH_MIGRATION
;
12024 if (geo
->layout
!= ALGORITHM_LEFT_ASYMMETRIC
) {
12025 pr_err("Error. Requested Layout not supported (left-asymmetric layout is supported only)!\n");
12027 goto analyse_change_exit
;
12029 imsm_layout
= geo
->layout
;
12031 raid_disks
+= 1; /* parity disk added */
12032 } else if (geo
->level
== IMSM_T_RAID10
) {
12033 if (geo
->level
== IMSM_T_RAID10
&& geo
->raid_disks
> 2 &&
12035 pr_err("Warning! VROC UEFI driver does not support RAID10 in requested layout.\n");
12036 pr_err("Array won't be suitable as boot device.\n");
12037 pr_err("Note: You can omit this check with \"--force\"\n");
12038 if (ask("Do you want to continue") < 1)
12041 change
= CH_TAKEOVER
;
12043 raid_disks
*= 2; /* mirrors added */
12044 imsm_layout
= 0x102; /* imsm supported layout */
12048 case IMSM_T_RAID10
:
12049 if (geo
->level
== 0) {
12050 change
= CH_TAKEOVER
;
12053 imsm_layout
= 0; /* imsm raid0 layout */
12057 if (change
== -1) {
12058 pr_err("Error. Level Migration from %d to %d not supported!\n",
12059 info
.array
.level
, geo
->level
);
12060 goto analyse_change_exit
;
12063 geo
->level
= info
.array
.level
;
12065 if (geo
->layout
!= info
.array
.layout
&&
12066 (geo
->layout
!= UnSet
&& geo
->layout
!= -1)) {
12067 change
= CH_MIGRATION
;
12068 if (info
.array
.layout
== 0 && info
.array
.level
== IMSM_T_RAID5
&&
12069 geo
->layout
== 5) {
12070 /* reshape 5 -> 4 */
12071 } else if (info
.array
.layout
== 5 && info
.array
.level
== IMSM_T_RAID5
&&
12072 geo
->layout
== 0) {
12073 /* reshape 4 -> 5 */
12077 pr_err("Error. Layout Migration from %d to %d not supported!\n",
12078 info
.array
.layout
, geo
->layout
);
12080 goto analyse_change_exit
;
12083 geo
->layout
= info
.array
.layout
;
12084 if (imsm_layout
== -1)
12085 imsm_layout
= info
.array
.layout
;
12088 if (geo
->chunksize
> 0 && geo
->chunksize
!= UnSet
&&
12089 geo
->chunksize
!= info
.array
.chunk_size
) {
12090 if (info
.array
.level
== IMSM_T_RAID10
) {
12091 pr_err("Error. Chunk size change for RAID 10 is not supported.\n");
12093 goto analyse_change_exit
;
12094 } else if (info
.component_size
% (geo
->chunksize
/512)) {
12095 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk). Aborting...\n",
12096 geo
->chunksize
/1024, info
.component_size
/2);
12098 goto analyse_change_exit
;
12100 change
= CH_MIGRATION
;
12102 geo
->chunksize
= info
.array
.chunk_size
;
12105 if (geo
->size
> 0) {
12106 if (change
!= -1) {
12107 pr_err("Error. Size change should be the only one at a time.\n");
12109 goto analyse_change_exit
;
12112 rv
= imsm_analyze_expand(st
, geo
, &info
, direction
);
12113 if (rv
!= IMSM_STATUS_OK
)
12114 goto analyse_change_exit
;
12115 raid_disks
= geo
->raid_disks
;
12116 change
= CH_ARRAY_SIZE
;
12119 chunk
= geo
->chunksize
/ 1024;
12121 if (!validate_geometry_imsm(st
,
12126 geo
->size
, INVALID_SECTORS
,
12127 0, 0, info
.consistency_policy
, 1))
12131 struct intel_super
*super
= st
->sb
;
12132 struct imsm_super
*mpb
= super
->anchor
;
12134 if (mpb
->num_raid_devs
> 1) {
12135 pr_err("Error. Cannot perform operation on %s- for this operation "
12136 "it MUST be single array in container\n", geo
->dev_name
);
12141 analyse_change_exit
:
12142 if (direction
== ROLLBACK_METADATA_CHANGES
&&
12143 (change
== CH_MIGRATION
|| change
== CH_TAKEOVER
)) {
12144 dprintf("imsm: Metadata changes rollback is not supported for migration and takeover operations.\n");
12150 int imsm_takeover(struct supertype
*st
, struct geo_params
*geo
)
12152 struct intel_super
*super
= st
->sb
;
12153 struct imsm_update_takeover
*u
;
12155 u
= xmalloc(sizeof(struct imsm_update_takeover
));
12157 u
->type
= update_takeover
;
12158 u
->subarray
= super
->current_vol
;
12160 /* 10->0 transition */
12161 if (geo
->level
== 0)
12162 u
->direction
= R10_TO_R0
;
12164 /* 0->10 transition */
12165 if (geo
->level
== 10)
12166 u
->direction
= R0_TO_R10
;
12168 /* update metadata locally */
12169 imsm_update_metadata_locally(st
, u
,
12170 sizeof(struct imsm_update_takeover
));
12171 /* and possibly remotely */
12172 if (st
->update_tail
)
12173 append_metadata_update(st
, u
,
12174 sizeof(struct imsm_update_takeover
));
12181 /* Flush size update if size calculated by num_data_stripes is higher than
12182 * imsm_dev_size to eliminate differences during reshape.
12183 * Mdmon will recalculate them correctly.
12184 * If subarray index is not set then check whole container.
12186 * 0 - no error occurred
12187 * 1 - error detected
12189 static int imsm_fix_size_mismatch(struct supertype
*st
, int subarray_index
)
12191 struct intel_super
*super
= st
->sb
;
12192 int tmp
= super
->current_vol
;
12196 for (i
= 0; i
< super
->anchor
->num_raid_devs
; i
++) {
12197 if (subarray_index
>= 0 && i
!= subarray_index
)
12199 super
->current_vol
= i
;
12200 struct imsm_dev
*dev
= get_imsm_dev(super
, super
->current_vol
);
12201 struct imsm_map
*map
= get_imsm_map(dev
, MAP_0
);
12202 unsigned int disc_count
= imsm_num_data_members(map
);
12203 struct geo_params geo
;
12204 struct imsm_update_size_change
*update
;
12205 unsigned long long calc_size
= per_dev_array_size(map
) * disc_count
;
12206 unsigned long long d_size
= imsm_dev_size(dev
);
12209 if (calc_size
== d_size
)
12212 /* There is a difference, confirm that imsm_dev_size is
12213 * smaller and push update.
12215 if (d_size
> calc_size
) {
12216 pr_err("imsm: dev size of subarray %d is incorrect\n",
12220 memset(&geo
, 0, sizeof(struct geo_params
));
12222 u_size
= imsm_create_metadata_update_for_size_change(st
, &geo
,
12224 imsm_update_metadata_locally(st
, update
, u_size
);
12225 if (st
->update_tail
) {
12226 append_metadata_update(st
, update
, u_size
);
12227 flush_metadata_updates(st
);
12228 st
->update_tail
= &st
->updates
;
12230 imsm_sync_metadata(st
);
12236 super
->current_vol
= tmp
;
12241 * shape_to_geo() - fill geo_params from shape.
12243 * @shape: array details.
12244 * @geo: new geometry params.
12245 * Returns: 0 on success, 1 otherwise.
12247 static void shape_to_geo(struct shape
*shape
, struct geo_params
*geo
)
12252 geo
->dev_name
= shape
->dev
;
12253 geo
->size
= shape
->size
;
12254 geo
->level
= shape
->level
;
12255 geo
->layout
= shape
->layout
;
12256 geo
->chunksize
= shape
->chunk
;
12257 geo
->raid_disks
= shape
->raiddisks
;
12260 static int imsm_reshape_super(struct supertype
*st
, struct shape
*shape
, struct context
*c
)
12263 struct geo_params geo
= {0};
12265 dprintf("(enter)\n");
12267 shape_to_geo(shape
, &geo
);
12268 strcpy(geo
.devnm
, st
->devnm
);
12269 if (shape
->delta_disks
!= UnSet
)
12270 geo
.raid_disks
+= shape
->delta_disks
;
12272 dprintf("for level : %i\n", geo
.level
);
12273 dprintf("for raid_disks : %i\n", geo
.raid_disks
);
12275 if (strcmp(st
->container_devnm
, st
->devnm
) == 0) {
12276 /* On container level we can only increase number of devices. */
12277 dprintf("imsm: info: Container operation\n");
12278 int old_raid_disks
= 0;
12280 if (imsm_reshape_is_allowed_on_container(
12281 st
, &geo
, &old_raid_disks
, shape
->direction
)) {
12282 struct imsm_update_reshape
*u
= NULL
;
12285 if (imsm_fix_size_mismatch(st
, -1)) {
12286 dprintf("imsm: Cannot fix size mismatch\n");
12287 goto exit_imsm_reshape_super
;
12290 len
= imsm_create_metadata_update_for_reshape(
12291 st
, &geo
, old_raid_disks
, &u
);
12294 dprintf("imsm: Cannot prepare update\n");
12295 goto exit_imsm_reshape_super
;
12299 /* update metadata locally */
12300 imsm_update_metadata_locally(st
, u
, len
);
12301 /* and possibly remotely */
12302 if (st
->update_tail
)
12303 append_metadata_update(st
, u
, len
);
12308 pr_err("(imsm) Operation is not allowed on this container\n");
12311 /* On volume level we support following operations
12312 * - takeover: raid10 -> raid0; raid0 -> raid10
12313 * - chunk size migration
12314 * - migration: raid5 -> raid0; raid0 -> raid5
12316 struct intel_super
*super
= st
->sb
;
12317 struct intel_dev
*dev
= super
->devlist
;
12319 dprintf("imsm: info: Volume operation\n");
12320 /* find requested device */
12323 imsm_find_array_devnm_by_subdev(
12324 dev
->index
, st
->container_devnm
);
12325 if (devnm
&& strcmp(devnm
, geo
.devnm
) == 0)
12330 pr_err("Cannot find %s (%s) subarray\n",
12331 geo
.dev_name
, geo
.devnm
);
12332 goto exit_imsm_reshape_super
;
12334 super
->current_vol
= dev
->index
;
12335 change
= imsm_analyze_change(st
, &geo
, shape
->direction
, c
);
12338 ret_val
= imsm_takeover(st
, &geo
);
12340 case CH_MIGRATION
: {
12341 struct imsm_update_reshape_migration
*u
= NULL
;
12343 imsm_create_metadata_update_for_migration(
12346 dprintf("imsm: Cannot prepare update\n");
12350 /* update metadata locally */
12351 imsm_update_metadata_locally(st
, u
, len
);
12352 /* and possibly remotely */
12353 if (st
->update_tail
)
12354 append_metadata_update(st
, u
, len
);
12359 case CH_ARRAY_SIZE
: {
12360 struct imsm_update_size_change
*u
= NULL
;
12362 imsm_create_metadata_update_for_size_change(
12365 dprintf("imsm: Cannot prepare update\n");
12369 /* update metadata locally */
12370 imsm_update_metadata_locally(st
, u
, len
);
12371 /* and possibly remotely */
12372 if (st
->update_tail
)
12373 append_metadata_update(st
, u
, len
);
12384 exit_imsm_reshape_super
:
12385 dprintf("imsm: reshape_super Exit code = %i\n", ret_val
);
12389 #define COMPLETED_OK 0
12390 #define COMPLETED_NONE 1
12391 #define COMPLETED_DELAYED 2
12393 static int read_completed(int fd
, unsigned long long *val
)
12396 char buf
[SYSFS_MAX_BUF_SIZE
];
12398 ret
= sysfs_fd_get_str(fd
, buf
, sizeof(buf
));
12402 ret
= COMPLETED_OK
;
12403 if (str_is_none(buf
) == true) {
12404 ret
= COMPLETED_NONE
;
12405 } else if (strncmp(buf
, "delayed", 7) == 0) {
12406 ret
= COMPLETED_DELAYED
;
12409 *val
= strtoull(buf
, &ep
, 0);
12410 if (ep
== buf
|| (*ep
!= 0 && *ep
!= '\n' && *ep
!= ' '))
12416 /*******************************************************************************
12417 * Function: wait_for_reshape_imsm
12418 * Description: Function writes new sync_max value and waits until
12419 * reshape process reach new position
12421 * sra : general array info
12422 * ndata : number of disks in new array's layout
12425 * 1 : there is no reshape in progress,
12427 ******************************************************************************/
12428 int wait_for_reshape_imsm(struct mdinfo
*sra
, int ndata
)
12430 int fd
= sysfs_get_fd(sra
, NULL
, "sync_completed");
12432 unsigned long long completed
;
12433 /* to_complete : new sync_max position */
12434 unsigned long long to_complete
= sra
->reshape_progress
;
12435 unsigned long long position_to_set
= to_complete
/ ndata
;
12437 if (!is_fd_valid(fd
)) {
12438 dprintf("cannot open reshape_position\n");
12443 if (sysfs_fd_get_ll(fd
, &completed
) < 0) {
12445 dprintf("cannot read reshape_position (no reshape in progres)\n");
12449 sleep_for(0, MSEC_TO_NSEC(30), true);
12454 if (completed
> position_to_set
) {
12455 dprintf("wrong next position to set %llu (%llu)\n",
12456 to_complete
, position_to_set
);
12460 dprintf("Position set: %llu\n", position_to_set
);
12461 if (sysfs_set_num(sra
, NULL
, "sync_max",
12462 position_to_set
) != 0) {
12463 dprintf("cannot set reshape position to %llu\n",
12471 char action
[SYSFS_MAX_BUF_SIZE
];
12472 int timeout
= 3000;
12474 sysfs_wait(fd
, &timeout
);
12475 if (sysfs_get_str(sra
, NULL
, "sync_action",
12476 action
, sizeof(action
)) > 0 &&
12477 strncmp(action
, "reshape", 7) != 0) {
12478 if (strncmp(action
, "idle", 4) == 0)
12484 rc
= read_completed(fd
, &completed
);
12486 dprintf("cannot read reshape_position (in loop)\n");
12489 } else if (rc
== COMPLETED_NONE
)
12491 } while (completed
< position_to_set
);
12497 /*******************************************************************************
12498 * Function: check_degradation_change
12499 * Description: Check that array hasn't become failed.
12501 * info : for sysfs access
12502 * sources : source disks descriptors
12503 * degraded: previous degradation level
12505 * degradation level
12506 ******************************************************************************/
12507 int check_degradation_change(struct mdinfo
*info
,
12511 unsigned long long new_degraded
;
12514 rv
= sysfs_get_ll(info
, NULL
, "degraded", &new_degraded
);
12515 if (rv
== -1 || (new_degraded
!= (unsigned long long)degraded
)) {
12516 /* check each device to ensure it is still working */
12519 for (sd
= info
->devs
; sd
; sd
= sd
->next
) {
12520 if (sd
->disk
.state
& (1<<MD_DISK_FAULTY
))
12522 if (sd
->disk
.state
& (1<<MD_DISK_SYNC
)) {
12523 char sbuf
[SYSFS_MAX_BUF_SIZE
];
12524 int raid_disk
= sd
->disk
.raid_disk
;
12526 if (sysfs_get_str(info
,
12527 sd
, "state", sbuf
, sizeof(sbuf
)) < 0 ||
12528 strstr(sbuf
, "faulty") ||
12529 strstr(sbuf
, "in_sync") == NULL
) {
12530 /* this device is dead */
12531 sd
->disk
.state
= (1<<MD_DISK_FAULTY
);
12532 if (raid_disk
>= 0)
12533 close_fd(&sources
[raid_disk
]);
12540 return new_degraded
;
12543 /*******************************************************************************
12544 * Function: imsm_manage_reshape
12545 * Description: Function finds array under reshape and it manages reshape
12546 * process. It creates stripes backups (if required) and sets
12549 * afd : Backup handle (nattive) - not used
12550 * sra : general array info
12551 * reshape : reshape parameters - not used
12552 * st : supertype structure
12553 * blocks : size of critical section [blocks]
12554 * fds : table of source device descriptor
12555 * offsets : start of array (offest per devices)
12557 * destfd : table of destination device descriptor
12558 * destoffsets : table of destination offsets (per device)
12560 * 1 : success, reshape is done
12562 ******************************************************************************/
12563 static int imsm_manage_reshape(
12564 int afd
, struct mdinfo
*sra
, struct reshape
*reshape
,
12565 struct supertype
*st
, unsigned long backup_blocks
,
12566 int *fds
, unsigned long long *offsets
,
12567 int dests
, int *destfd
, unsigned long long *destoffsets
)
12570 struct intel_super
*super
= st
->sb
;
12571 struct intel_dev
*dv
;
12572 unsigned int sector_size
= super
->sector_size
;
12573 struct imsm_dev
*dev
= NULL
;
12574 struct imsm_map
*map_src
, *map_dest
;
12575 int migr_vol_qan
= 0;
12576 int ndata
, odata
; /* [bytes] */
12577 int chunk
; /* [bytes] */
12578 struct migr_record
*migr_rec
;
12580 unsigned int buf_size
; /* [bytes] */
12581 unsigned long long max_position
; /* array size [bytes] */
12582 unsigned long long next_step
; /* [blocks]/[bytes] */
12583 unsigned long long old_data_stripe_length
;
12584 unsigned long long start_src
; /* [bytes] */
12585 unsigned long long start
; /* [bytes] */
12586 unsigned long long start_buf_shift
; /* [bytes] */
12588 int source_layout
= 0;
12589 int subarray_index
= -1;
12594 if (!fds
|| !offsets
)
12597 /* Find volume during the reshape */
12598 for (dv
= super
->devlist
; dv
; dv
= dv
->next
) {
12599 if (dv
->dev
->vol
.migr_type
== MIGR_GEN_MIGR
&&
12600 dv
->dev
->vol
.migr_state
== 1) {
12603 subarray_index
= dv
->index
;
12606 /* Only one volume can migrate at the same time */
12607 if (migr_vol_qan
!= 1) {
12608 pr_err("%s", migr_vol_qan
?
12609 "Number of migrating volumes greater than 1\n" :
12610 "There is no volume during migrationg\n");
12614 map_dest
= get_imsm_map(dev
, MAP_0
);
12615 map_src
= get_imsm_map(dev
, MAP_1
);
12616 if (map_src
== NULL
)
12619 ndata
= imsm_num_data_members(map_dest
);
12620 odata
= imsm_num_data_members(map_src
);
12622 chunk
= __le16_to_cpu(map_src
->blocks_per_strip
) * 512;
12623 old_data_stripe_length
= odata
* chunk
;
12625 migr_rec
= super
->migr_rec
;
12627 /* initialize migration record for start condition */
12628 if (sra
->reshape_progress
== 0)
12629 init_migr_record_imsm(st
, dev
, sra
);
12631 if (__le32_to_cpu(migr_rec
->rec_status
) != UNIT_SRC_NORMAL
) {
12632 pr_err("imsm: Cannot restart migration when data are present in copy area.\n"
12633 " Reassemble array to try to restore critical sector.\n");
12636 /* Save checkpoint to update migration record for current
12637 * reshape position (in md). It can be farther than current
12638 * reshape position in metadata.
12640 if (save_checkpoint_imsm(st
, sra
, UNIT_SRC_NORMAL
) == 1) {
12641 /* ignore error == 2, this can mean end of reshape here
12643 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL, initial save)\n");
12648 /* size for data */
12649 buf_size
= __le32_to_cpu(migr_rec
->blocks_per_unit
) * 512;
12650 /* extend buffer size for parity disk */
12651 buf_size
+= __le32_to_cpu(migr_rec
->dest_depth_per_unit
) * 512;
12652 /* add space for stripe alignment */
12653 buf_size
+= old_data_stripe_length
;
12654 if (posix_memalign((void **)&buf
, MAX_SECTOR_SIZE
, buf_size
)) {
12655 dprintf("imsm: Cannot allocate checkpoint buffer\n");
12659 max_position
= sra
->component_size
* ndata
;
12660 source_layout
= imsm_level_to_layout(map_src
->raid_level
);
12662 while (current_migr_unit(migr_rec
) <
12663 get_num_migr_units(migr_rec
)) {
12664 /* current reshape position [blocks] */
12665 unsigned long long current_position
=
12666 __le32_to_cpu(migr_rec
->blocks_per_unit
)
12667 * current_migr_unit(migr_rec
);
12668 unsigned long long border
;
12670 /* Check that array hasn't become failed.
12672 degraded
= check_degradation_change(sra
, fds
, degraded
);
12673 if (degraded
> 1) {
12674 dprintf("imsm: Abort reshape due to degradation level (%i)\n", degraded
);
12678 next_step
= __le32_to_cpu(migr_rec
->blocks_per_unit
);
12680 if ((current_position
+ next_step
) > max_position
)
12681 next_step
= max_position
- current_position
;
12683 start
= current_position
* 512;
12685 /* align reading start to old geometry */
12686 start_buf_shift
= start
% old_data_stripe_length
;
12687 start_src
= start
- start_buf_shift
;
12689 border
= (start_src
/ odata
) - (start
/ ndata
);
12691 if (border
<= __le32_to_cpu(migr_rec
->dest_depth_per_unit
)) {
12692 /* save critical stripes to buf
12693 * start - start address of current unit
12694 * to backup [bytes]
12695 * start_src - start address of current unit
12696 * to backup alligned to source array
12699 unsigned long long next_step_filler
;
12700 unsigned long long copy_length
= next_step
* 512;
12702 /* allign copy area length to stripe in old geometry */
12703 next_step_filler
= ((copy_length
+ start_buf_shift
)
12704 % old_data_stripe_length
);
12705 if (next_step_filler
)
12706 next_step_filler
= (old_data_stripe_length
12707 - next_step_filler
);
12708 dprintf("save_stripes() parameters: start = %llu,\tstart_src = %llu,\tnext_step*512 = %llu,\tstart_in_buf_shift = %llu,\tnext_step_filler = %llu\n",
12709 start
, start_src
, copy_length
,
12710 start_buf_shift
, next_step_filler
);
12712 if (save_stripes(fds
, offsets
, map_src
->num_members
,
12713 chunk
, map_src
->raid_level
,
12714 source_layout
, 0, NULL
, start_src
,
12716 next_step_filler
+ start_buf_shift
,
12718 dprintf("imsm: Cannot save stripes to buffer\n");
12721 /* Convert data to destination format and store it
12722 * in backup general migration area
12724 if (save_backup_imsm(st
, dev
, sra
,
12725 buf
+ start_buf_shift
, copy_length
)) {
12726 dprintf("imsm: Cannot save stripes to target devices\n");
12729 if (save_checkpoint_imsm(st
, sra
,
12730 UNIT_SRC_IN_CP_AREA
)) {
12731 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_IN_CP_AREA)\n");
12735 /* set next step to use whole border area */
12736 border
/= next_step
;
12738 next_step
*= border
;
12740 /* When data backed up, checkpoint stored,
12741 * kick the kernel to reshape unit of data
12743 next_step
= next_step
+ sra
->reshape_progress
;
12744 /* limit next step to array max position */
12745 if (next_step
> max_position
)
12746 next_step
= max_position
;
12747 sysfs_set_num(sra
, NULL
, "suspend_lo", sra
->reshape_progress
);
12748 sysfs_set_num(sra
, NULL
, "suspend_hi", next_step
);
12749 sra
->reshape_progress
= next_step
;
12751 /* wait until reshape finish */
12752 if (wait_for_reshape_imsm(sra
, ndata
)) {
12753 dprintf("wait_for_reshape_imsm returned error!\n");
12757 if (save_checkpoint_imsm(st
, sra
, UNIT_SRC_NORMAL
) == 1) {
12758 /* ignore error == 2, this can mean end of reshape here
12760 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL)\n");
12769 /* clear migr_rec on disks after successful migration */
12772 memset(super
->migr_rec_buf
, 0, MIGR_REC_BUF_SECTORS
*MAX_SECTOR_SIZE
);
12773 for (d
= super
->disks
; d
; d
= d
->next
) {
12774 if (d
->index
< 0 || is_failed(&d
->disk
))
12776 unsigned long long dsize
;
12778 get_dev_size(d
->fd
, NULL
, &dsize
);
12779 if (lseek64(d
->fd
, dsize
- MIGR_REC_SECTOR_POSITION
*sector_size
,
12781 if ((unsigned int)write(d
->fd
, super
->migr_rec_buf
,
12782 MIGR_REC_BUF_SECTORS
*sector_size
) !=
12783 MIGR_REC_BUF_SECTORS
*sector_size
)
12784 perror("Write migr_rec failed");
12788 /* return '1' if done */
12791 /* After the reshape eliminate size mismatch in metadata.
12792 * Don't update md/component_size here, volume hasn't
12793 * to take whole space. It is allowed by kernel.
12794 * md/component_size will be set propoperly after next assembly.
12796 imsm_fix_size_mismatch(st
, subarray_index
);
12800 /* See Grow.c: abort_reshape() for further explanation */
12801 sysfs_set_num(sra
, NULL
, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL
);
12802 sysfs_set_num(sra
, NULL
, "suspend_hi", 0);
12803 sysfs_set_num(sra
, NULL
, "suspend_lo", 0);
12808 /*******************************************************************************
12809 * Function: calculate_bitmap_min_chunksize
12810 * Description: Calculates the minimal valid bitmap chunk size
12812 * max_bits : indicate how many bits can be used for the bitmap
12813 * data_area_size : the size of the data area covered by the bitmap
12816 * The bitmap chunk size
12817 ******************************************************************************/
12818 static unsigned long long
12819 calculate_bitmap_min_chunksize(unsigned long long max_bits
,
12820 unsigned long long data_area_size
)
12822 unsigned long long min_chunk
=
12823 4096; /* sub-page chunks don't work yet.. */
12824 unsigned long long bits
= data_area_size
/ min_chunk
+ 1;
12826 while (bits
> max_bits
) {
12828 bits
= (bits
+ 1) / 2;
12833 /*******************************************************************************
12834 * Function: calculate_bitmap_chunksize
12835 * Description: Calculates the bitmap chunk size for the given device
12837 * st : supertype information
12838 * dev : device for the bitmap
12841 * The bitmap chunk size
12842 ******************************************************************************/
12843 static unsigned long long calculate_bitmap_chunksize(struct supertype
*st
,
12844 struct imsm_dev
*dev
)
12846 struct intel_super
*super
= st
->sb
;
12847 unsigned long long min_chunksize
;
12848 unsigned long long result
= IMSM_DEFAULT_BITMAP_CHUNKSIZE
;
12849 size_t dev_size
= imsm_dev_size(dev
);
12851 min_chunksize
= calculate_bitmap_min_chunksize(
12852 IMSM_BITMAP_AREA_SIZE
* super
->sector_size
, dev_size
);
12854 if (result
< min_chunksize
)
12855 result
= min_chunksize
;
12860 /*******************************************************************************
12861 * Function: init_bitmap_header
12862 * Description: Initialize the bitmap header structure
12864 * st : supertype information
12865 * bms : bitmap header struct to initialize
12866 * dev : device for the bitmap
12871 ******************************************************************************/
12872 static int init_bitmap_header(struct supertype
*st
, struct bitmap_super_s
*bms
,
12873 struct imsm_dev
*dev
)
12880 bms
->magic
= __cpu_to_le32(BITMAP_MAGIC
);
12881 bms
->version
= __cpu_to_le32(BITMAP_MAJOR_HI
);
12882 bms
->daemon_sleep
= __cpu_to_le32(IMSM_DEFAULT_BITMAP_DAEMON_SLEEP
);
12883 bms
->sync_size
= __cpu_to_le64(IMSM_BITMAP_AREA_SIZE
);
12884 bms
->write_behind
= __cpu_to_le32(0);
12886 uuid_from_super_imsm(st
, vol_uuid
);
12887 memcpy(bms
->uuid
, vol_uuid
, 16);
12889 bms
->chunksize
= calculate_bitmap_chunksize(st
, dev
);
12894 /*******************************************************************************
12895 * Function: validate_internal_bitmap_for_drive
12896 * Description: Verify if the bitmap header for a given drive.
12898 * st : supertype information
12899 * offset : The offset from the beginning of the drive where to look for
12900 * the bitmap header.
12901 * d : the drive info
12906 ******************************************************************************/
12907 static int validate_internal_bitmap_for_drive(struct supertype
*st
,
12908 unsigned long long offset
,
12911 struct intel_super
*super
= st
->sb
;
12914 bitmap_super_t
*bms
;
12922 if (posix_memalign(&read_buf
, MAX_SECTOR_SIZE
, IMSM_BITMAP_HEADER_SIZE
))
12926 if (!is_fd_valid(fd
)) {
12927 fd
= open(d
->devname
, O_RDONLY
, 0);
12929 if (!is_fd_valid(fd
)) {
12930 dprintf("cannot open the device %s\n", d
->devname
);
12935 if (lseek64(fd
, offset
* super
->sector_size
, SEEK_SET
) < 0)
12937 if (read(fd
, read_buf
, IMSM_BITMAP_HEADER_SIZE
) !=
12938 IMSM_BITMAP_HEADER_SIZE
)
12941 uuid_from_super_imsm(st
, vol_uuid
);
12944 if ((bms
->magic
!= __cpu_to_le32(BITMAP_MAGIC
)) ||
12945 (bms
->version
!= __cpu_to_le32(BITMAP_MAJOR_HI
)) ||
12946 (!same_uuid((int *)bms
->uuid
, vol_uuid
, st
->ss
->swapuuid
))) {
12947 dprintf("wrong bitmap header detected\n");
12953 if (!is_fd_valid(d
->fd
))
12962 /*******************************************************************************
12963 * Function: validate_internal_bitmap_imsm
12964 * Description: Verify if the bitmap header is in place and with proper data.
12966 * st : supertype information
12969 * 0 : success or device w/o RWH_BITMAP
12971 ******************************************************************************/
12972 static int validate_internal_bitmap_imsm(struct supertype
*st
)
12974 struct intel_super
*super
= st
->sb
;
12975 struct imsm_dev
*dev
= get_imsm_dev(super
, super
->current_vol
);
12976 unsigned long long offset
;
12979 if (dev
->rwh_policy
!= RWH_BITMAP
)
12982 offset
= get_bitmap_header_sector(super
, super
->current_vol
);
12983 for (d
= super
->disks
; d
; d
= d
->next
) {
12984 if (d
->index
< 0 || is_failed(&d
->disk
))
12987 if (validate_internal_bitmap_for_drive(st
, offset
, d
)) {
12988 pr_err("imsm: bitmap validation failed\n");
12995 /*******************************************************************************
12996 * Function: add_internal_bitmap_imsm
12997 * Description: Mark the volume to use the bitmap and updates the chunk size value.
12999 * st : supertype information
13000 * chunkp : bitmap chunk size
13001 * delay : not used for imsm
13002 * write_behind : not used for imsm
13003 * size : not used for imsm
13004 * may_change : not used for imsm
13005 * amajor : not used for imsm
13010 ******************************************************************************/
13011 static int add_internal_bitmap_imsm(struct supertype
*st
, int *chunkp
,
13012 int delay
, int write_behind
,
13013 unsigned long long size
, int may_change
,
13016 struct intel_super
*super
= st
->sb
;
13017 int vol_idx
= super
->current_vol
;
13018 struct imsm_dev
*dev
;
13020 if (!super
->devlist
|| vol_idx
== -1 || !chunkp
)
13023 dev
= get_imsm_dev(super
, vol_idx
);
13024 dev
->rwh_policy
= RWH_BITMAP
;
13025 *chunkp
= calculate_bitmap_chunksize(st
, dev
);
13029 /*******************************************************************************
13030 * Function: locate_bitmap_imsm
13031 * Description: Seek 'fd' to start of write-intent-bitmap.
13033 * st : supertype information
13034 * fd : file descriptor for the device
13035 * node_num : not used for imsm
13040 ******************************************************************************/
13041 static int locate_bitmap_imsm(struct supertype
*st
, int fd
, int node_num
)
13043 struct intel_super
*super
= st
->sb
;
13044 unsigned long long offset
;
13045 int vol_idx
= super
->current_vol
;
13047 if (!super
->devlist
|| vol_idx
== -1)
13050 offset
= get_bitmap_header_sector(super
, super
->current_vol
);
13051 dprintf("bitmap header offset is %llu\n", offset
);
13053 lseek64(fd
, offset
<< 9, 0);
13058 /*******************************************************************************
13059 * Function: write_init_bitmap_imsm
13060 * Description: Write a bitmap header and prepares the area for the bitmap.
13062 * st : supertype information
13063 * fd : file descriptor for the device
13064 * update : not used for imsm
13069 ******************************************************************************/
13070 static int write_init_bitmap_imsm(struct supertype
*st
, int fd
,
13071 enum bitmap_update update
)
13073 struct intel_super
*super
= st
->sb
;
13074 int vol_idx
= super
->current_vol
;
13076 unsigned long long offset
;
13077 bitmap_super_t bms
= { 0 };
13078 size_t written
= 0;
13083 if (!super
->devlist
|| !super
->sector_size
|| vol_idx
== -1)
13086 struct imsm_dev
*dev
= get_imsm_dev(super
, vol_idx
);
13088 /* first clear the space for bitmap header */
13089 unsigned long long bitmap_area_start
=
13090 get_bitmap_header_sector(super
, vol_idx
);
13092 dprintf("zeroing area start (%llu) and size (%u)\n", bitmap_area_start
,
13093 IMSM_BITMAP_AND_HEADER_SIZE
/ super
->sector_size
);
13094 if (zero_disk_range(fd
, bitmap_area_start
,
13095 IMSM_BITMAP_HEADER_SIZE
/ super
->sector_size
)) {
13096 pr_err("imsm: cannot zeroing the space for the bitmap\n");
13100 /* The bitmap area should be filled with "1"s to perform initial
13103 if (posix_memalign(&buf
, MAX_SECTOR_SIZE
, MAX_SECTOR_SIZE
))
13105 memset(buf
, 0xFF, MAX_SECTOR_SIZE
);
13106 offset
= get_bitmap_sector(super
, vol_idx
);
13107 lseek64(fd
, offset
<< 9, 0);
13108 while (written
< IMSM_BITMAP_AREA_SIZE
) {
13109 to_write
= IMSM_BITMAP_AREA_SIZE
- written
;
13110 if (to_write
> MAX_SECTOR_SIZE
)
13111 to_write
= MAX_SECTOR_SIZE
;
13112 rv_num
= write(fd
, buf
, MAX_SECTOR_SIZE
);
13113 if (rv_num
!= MAX_SECTOR_SIZE
) {
13115 dprintf("cannot initialize bitmap area\n");
13121 /* write a bitmap header */
13122 init_bitmap_header(st
, &bms
, dev
);
13123 memset(buf
, 0, MAX_SECTOR_SIZE
);
13124 memcpy(buf
, &bms
, sizeof(bitmap_super_t
));
13125 if (locate_bitmap_imsm(st
, fd
, 0)) {
13127 dprintf("cannot locate the bitmap\n");
13130 if (write(fd
, buf
, MAX_SECTOR_SIZE
) != MAX_SECTOR_SIZE
) {
13132 dprintf("cannot write the bitmap header\n");
13143 /*******************************************************************************
13144 * Function: is_vol_to_setup_bitmap
13145 * Description: Checks if a bitmap should be activated on the dev.
13147 * info : info about the volume to setup the bitmap
13148 * dev : the device to check against bitmap creation
13151 * 0 : bitmap should be set up on the device
13153 ******************************************************************************/
13154 static int is_vol_to_setup_bitmap(struct mdinfo
*info
, struct imsm_dev
*dev
)
13159 if ((strcmp((char *)dev
->volume
, info
->name
) == 0) &&
13160 (dev
->rwh_policy
== RWH_BITMAP
))
13166 /*******************************************************************************
13167 * Function: set_bitmap_sysfs
13168 * Description: Set the sysfs atributes of a given volume to activate the bitmap.
13170 * info : info about the volume where the bitmap should be setup
13171 * chunksize : bitmap chunk size
13172 * location : location of the bitmap
13177 ******************************************************************************/
13178 static int set_bitmap_sysfs(struct mdinfo
*info
, unsigned long long chunksize
,
13181 /* The bitmap/metadata is set to external to allow changing of value for
13182 * bitmap/location. When external is used, the kernel will treat an offset
13183 * related to the device's first lba (in opposition to the "internal" case
13184 * when this value is related to the beginning of the superblock).
13186 if (sysfs_set_str(info
, NULL
, "bitmap/metadata", "external")) {
13187 dprintf("failed to set bitmap/metadata\n");
13191 /* It can only be changed when no bitmap is active.
13192 * Should be bigger than 512 and must be power of 2.
13193 * It is expecting the value in bytes.
13195 if (sysfs_set_num(info
, NULL
, "bitmap/chunksize",
13196 __cpu_to_le32(chunksize
))) {
13197 dprintf("failed to set bitmap/chunksize\n");
13201 /* It is expecting the value in sectors. */
13202 if (sysfs_set_num(info
, NULL
, "bitmap/space",
13203 __cpu_to_le64(IMSM_BITMAP_AREA_SIZE
))) {
13204 dprintf("failed to set bitmap/space\n");
13208 /* Determines the delay between the bitmap updates.
13209 * It is expecting the value in seconds.
13211 if (sysfs_set_num(info
, NULL
, "bitmap/time_base",
13212 __cpu_to_le64(IMSM_DEFAULT_BITMAP_DAEMON_SLEEP
))) {
13213 dprintf("failed to set bitmap/time_base\n");
13217 /* It is expecting the value in sectors with a sign at the beginning. */
13218 if (sysfs_set_str(info
, NULL
, "bitmap/location", location
)) {
13219 dprintf("failed to set bitmap/location\n");
13226 /*******************************************************************************
13227 * Function: set_bitmap_imsm
13228 * Description: Setup the bitmap for the given volume
13230 * st : supertype information
13231 * info : info about the volume where the bitmap should be setup
13236 ******************************************************************************/
13237 static int set_bitmap_imsm(struct supertype
*st
, struct mdinfo
*info
)
13239 struct intel_super
*super
= st
->sb
;
13240 int prev_current_vol
= super
->current_vol
;
13241 struct imsm_dev
*dev
;
13243 char location
[16] = "";
13244 unsigned long long chunksize
;
13245 struct intel_dev
*dev_it
;
13247 for (dev_it
= super
->devlist
; dev_it
; dev_it
= dev_it
->next
) {
13248 super
->current_vol
= dev_it
->index
;
13249 dev
= get_imsm_dev(super
, super
->current_vol
);
13251 if (is_vol_to_setup_bitmap(info
, dev
)) {
13252 if (validate_internal_bitmap_imsm(st
)) {
13253 dprintf("bitmap header validation failed\n");
13257 chunksize
= calculate_bitmap_chunksize(st
, dev
);
13258 dprintf("chunk size is %llu\n", chunksize
);
13260 snprintf(location
, sizeof(location
), "+%llu",
13261 get_bitmap_sector(super
, super
->current_vol
));
13262 dprintf("bitmap offset is %s\n", location
);
13264 if (set_bitmap_sysfs(info
, chunksize
, location
)) {
13265 dprintf("cannot setup the bitmap\n");
13272 super
->current_vol
= prev_current_vol
;
13276 struct superswitch super_imsm
= {
13277 .examine_super
= examine_super_imsm
,
13278 .brief_examine_super
= brief_examine_super_imsm
,
13279 .brief_examine_subarrays
= brief_examine_subarrays_imsm
,
13280 .export_examine_super
= export_examine_super_imsm
,
13281 .detail_super
= detail_super_imsm
,
13282 .brief_detail_super
= brief_detail_super_imsm
,
13283 .write_init_super
= write_init_super_imsm
,
13284 .validate_geometry
= validate_geometry_imsm
,
13285 .add_to_super
= add_to_super_imsm
,
13286 .remove_from_super
= remove_from_super_imsm
,
13287 .detail_platform
= detail_platform_imsm
,
13288 .export_detail_platform
= export_detail_platform_imsm
,
13289 .kill_subarray
= kill_subarray_imsm
,
13290 .update_subarray
= update_subarray_imsm
,
13291 .load_container
= load_container_imsm
,
13292 .default_geometry
= default_geometry_imsm
,
13293 .test_and_add_drive_policies
= test_and_add_drive_policies_imsm
,
13294 .reshape_super
= imsm_reshape_super
,
13295 .manage_reshape
= imsm_manage_reshape
,
13296 .recover_backup
= recover_backup_imsm
,
13297 .examine_badblocks
= examine_badblocks_imsm
,
13298 .match_home
= match_home_imsm
,
13299 .uuid_from_super
= uuid_from_super_imsm
,
13300 .getinfo_super
= getinfo_super_imsm
,
13301 .getinfo_super_disks
= getinfo_super_disks_imsm
,
13302 .update_super
= update_super_imsm
,
13304 .avail_size
= avail_size_imsm
,
13305 .get_spare_criteria
= get_spare_criteria_imsm
,
13307 .compare_super
= compare_super_imsm
,
13309 .load_super
= load_super_imsm
,
13310 .init_super
= init_super_imsm
,
13311 .store_super
= store_super_imsm
,
13312 .free_super
= free_super_imsm
,
13313 .match_metadata_desc
= match_metadata_desc_imsm
,
13314 .container_content
= container_content_imsm
,
13315 .validate_container
= validate_container_imsm
,
13317 .add_internal_bitmap
= add_internal_bitmap_imsm
,
13318 .locate_bitmap
= locate_bitmap_imsm
,
13319 .write_bitmap
= write_init_bitmap_imsm
,
13320 .set_bitmap
= set_bitmap_imsm
,
13322 .write_init_ppl
= write_init_ppl_imsm
,
13323 .validate_ppl
= validate_ppl_imsm
,
13330 .open_new
= imsm_open_new
,
13331 .set_array_state
= imsm_set_array_state
,
13332 .set_disk
= imsm_set_disk
,
13333 .sync_metadata
= imsm_sync_metadata
,
13334 .activate_spare
= imsm_activate_spare
,
13335 .process_update
= imsm_process_update
,
13336 .prepare_update
= imsm_prepare_update
,
13337 .record_bad_block
= imsm_record_badblock
,
13338 .clear_bad_block
= imsm_clear_badblock
,
13339 .get_bad_blocks
= imsm_get_badblocks
,