]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-intel.c
4284ddeaba0cb4a0b16bafbac30ea4ff20176073
[thirdparty/mdadm.git] / super-intel.c
1 /*
2 * mdadm - Intel(R) Matrix Storage Manager Support
3 *
4 * Copyright (C) 2002-2008 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define HAVE_STDINT_H 1
21 #include "mdadm.h"
22 #include "mdmon.h"
23 #include "sha1.h"
24 #include "platform-intel.h"
25 #include <values.h>
26 #include <scsi/sg.h>
27 #include <ctype.h>
28 #include <dirent.h>
29
30 /* MPB == Metadata Parameter Block */
31 #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
32 #define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
33 #define MPB_VERSION_RAID0 "1.0.00"
34 #define MPB_VERSION_RAID1 "1.1.00"
35 #define MPB_VERSION_MANY_VOLUMES_PER_ARRAY "1.2.00"
36 #define MPB_VERSION_3OR4_DISK_ARRAY "1.2.01"
37 #define MPB_VERSION_RAID5 "1.2.02"
38 #define MPB_VERSION_5OR6_DISK_ARRAY "1.2.04"
39 #define MPB_VERSION_CNG "1.2.06"
40 #define MPB_VERSION_ATTRIBS "1.3.00"
41 #define MAX_SIGNATURE_LENGTH 32
42 #define MAX_RAID_SERIAL_LEN 16
43
44 /* supports RAID0 */
45 #define MPB_ATTRIB_RAID0 __cpu_to_le32(0x00000001)
46 /* supports RAID1 */
47 #define MPB_ATTRIB_RAID1 __cpu_to_le32(0x00000002)
48 /* supports RAID10 */
49 #define MPB_ATTRIB_RAID10 __cpu_to_le32(0x00000004)
50 /* supports RAID1E */
51 #define MPB_ATTRIB_RAID1E __cpu_to_le32(0x00000008)
52 /* supports RAID5 */
53 #define MPB_ATTRIB_RAID5 __cpu_to_le32(0x00000010)
54 /* supports RAID CNG */
55 #define MPB_ATTRIB_RAIDCNG __cpu_to_le32(0x00000020)
56 /* supports expanded stripe sizes of 256K, 512K and 1MB */
57 #define MPB_ATTRIB_EXP_STRIPE_SIZE __cpu_to_le32(0x00000040)
58
59 /* The OROM Support RST Caching of Volumes */
60 #define MPB_ATTRIB_NVM __cpu_to_le32(0x02000000)
61 /* The OROM supports creating disks greater than 2TB */
62 #define MPB_ATTRIB_2TB_DISK __cpu_to_le32(0x04000000)
63 /* The OROM supports Bad Block Management */
64 #define MPB_ATTRIB_BBM __cpu_to_le32(0x08000000)
65
66 /* THe OROM Supports NVM Caching of Volumes */
67 #define MPB_ATTRIB_NEVER_USE2 __cpu_to_le32(0x10000000)
68 /* The OROM supports creating volumes greater than 2TB */
69 #define MPB_ATTRIB_2TB __cpu_to_le32(0x20000000)
70 /* originally for PMP, now it's wasted b/c. Never use this bit! */
71 #define MPB_ATTRIB_NEVER_USE __cpu_to_le32(0x40000000)
72 /* Verify MPB contents against checksum after reading MPB */
73 #define MPB_ATTRIB_CHECKSUM_VERIFY __cpu_to_le32(0x80000000)
74
75 /* Define all supported attributes that have to be accepted by mdadm
76 */
77 #define MPB_ATTRIB_SUPPORTED (MPB_ATTRIB_CHECKSUM_VERIFY | \
78 MPB_ATTRIB_2TB | \
79 MPB_ATTRIB_2TB_DISK | \
80 MPB_ATTRIB_RAID0 | \
81 MPB_ATTRIB_RAID1 | \
82 MPB_ATTRIB_RAID10 | \
83 MPB_ATTRIB_RAID5 | \
84 MPB_ATTRIB_EXP_STRIPE_SIZE | \
85 MPB_ATTRIB_BBM)
86
87 /* Define attributes that are unused but not harmful */
88 #define MPB_ATTRIB_IGNORED (MPB_ATTRIB_NEVER_USE)
89
90 #define MPB_SECTOR_CNT 2210
91 #define IMSM_RESERVED_SECTORS 8192
92 #define NUM_BLOCKS_DIRTY_STRIPE_REGION 2048
93 #define SECT_PER_MB_SHIFT 11
94 #define MAX_SECTOR_SIZE 4096
95 #define MULTIPLE_PPL_AREA_SIZE_IMSM (1024 * 1024) /* Size of the whole
96 * mutliple PPL area
97 */
98
99 /*
100 * Internal Write-intent bitmap is stored in the same area where PPL.
101 * Both features are mutually exclusive, so it is not an issue.
102 * The first 8KiB of the area are reserved and shall not be used.
103 */
104 #define IMSM_BITMAP_AREA_RESERVED_SIZE 8192
105
106 #define IMSM_BITMAP_HEADER_OFFSET (IMSM_BITMAP_AREA_RESERVED_SIZE)
107 #define IMSM_BITMAP_HEADER_SIZE MAX_SECTOR_SIZE
108
109 #define IMSM_BITMAP_START_OFFSET (IMSM_BITMAP_HEADER_OFFSET + IMSM_BITMAP_HEADER_SIZE)
110 #define IMSM_BITMAP_AREA_SIZE (MULTIPLE_PPL_AREA_SIZE_IMSM - IMSM_BITMAP_START_OFFSET)
111 #define IMSM_BITMAP_AND_HEADER_SIZE (IMSM_BITMAP_AREA_SIZE + IMSM_BITMAP_HEADER_SIZE)
112
113 #define IMSM_DEFAULT_BITMAP_CHUNKSIZE (64 * 1024 * 1024)
114 #define IMSM_DEFAULT_BITMAP_DAEMON_SLEEP 5
115
116 /*
117 * This macro let's us ensure that no-one accidentally
118 * changes the size of a struct
119 */
120 #define ASSERT_SIZE(_struct, size) \
121 static inline void __assert_size_##_struct(void) \
122 { \
123 switch (0) { \
124 case 0: break; \
125 case (sizeof(struct _struct) == size): break; \
126 } \
127 }
128
129 /* Disk configuration info. */
130 #define IMSM_MAX_DEVICES 255
131 struct imsm_disk {
132 __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */
133 __u32 total_blocks_lo; /* 0xE8 - 0xEB total blocks lo */
134 __u32 scsi_id; /* 0xEC - 0xEF scsi ID */
135 #define SPARE_DISK __cpu_to_le32(0x01) /* Spare */
136 #define CONFIGURED_DISK __cpu_to_le32(0x02) /* Member of some RaidDev */
137 #define FAILED_DISK __cpu_to_le32(0x04) /* Permanent failure */
138 #define JOURNAL_DISK __cpu_to_le32(0x2000000) /* Device marked as Journaling Drive */
139 __u32 status; /* 0xF0 - 0xF3 */
140 __u32 owner_cfg_num; /* which config 0,1,2... owns this disk */
141 __u32 total_blocks_hi; /* 0xF4 - 0xF5 total blocks hi */
142 #define IMSM_DISK_FILLERS 3
143 __u32 filler[IMSM_DISK_FILLERS]; /* 0xF5 - 0x107 MPB_DISK_FILLERS for future expansion */
144 };
145 ASSERT_SIZE(imsm_disk, 48)
146
147 /* map selector for map managment
148 */
149 #define MAP_0 0
150 #define MAP_1 1
151 #define MAP_X -1
152
153 /* RAID map configuration infos. */
154 struct imsm_map {
155 __u32 pba_of_lba0_lo; /* start address of partition */
156 __u32 blocks_per_member_lo;/* blocks per member */
157 __u32 num_data_stripes_lo; /* number of data stripes */
158 __u16 blocks_per_strip;
159 __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */
160 #define IMSM_T_STATE_NORMAL 0
161 #define IMSM_T_STATE_UNINITIALIZED 1
162 #define IMSM_T_STATE_DEGRADED 2
163 #define IMSM_T_STATE_FAILED 3
164 __u8 raid_level;
165 #define IMSM_T_RAID0 0
166 #define IMSM_T_RAID1 1
167 #define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
168 __u8 num_members; /* number of member disks */
169 __u8 num_domains; /* number of parity domains */
170 __u8 failed_disk_num; /* valid only when state is degraded */
171 __u8 ddf;
172 __u32 pba_of_lba0_hi;
173 __u32 blocks_per_member_hi;
174 __u32 num_data_stripes_hi;
175 __u32 filler[4]; /* expansion area */
176 #define IMSM_ORD_REBUILD (1 << 24)
177 __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members],
178 * top byte contains some flags
179 */
180 };
181 ASSERT_SIZE(imsm_map, 52)
182
183 struct imsm_vol {
184 __u32 curr_migr_unit;
185 __u32 checkpoint_id; /* id to access curr_migr_unit */
186 __u8 migr_state; /* Normal or Migrating */
187 #define MIGR_INIT 0
188 #define MIGR_REBUILD 1
189 #define MIGR_VERIFY 2 /* analagous to echo check > sync_action */
190 #define MIGR_GEN_MIGR 3
191 #define MIGR_STATE_CHANGE 4
192 #define MIGR_REPAIR 5
193 __u8 migr_type; /* Initializing, Rebuilding, ... */
194 #define RAIDVOL_CLEAN 0
195 #define RAIDVOL_DIRTY 1
196 #define RAIDVOL_DSRECORD_VALID 2
197 __u8 dirty;
198 __u8 fs_state; /* fast-sync state for CnG (0xff == disabled) */
199 __u16 verify_errors; /* number of mismatches */
200 __u16 bad_blocks; /* number of bad blocks during verify */
201 __u32 filler[4];
202 struct imsm_map map[1];
203 /* here comes another one if migr_state */
204 };
205 ASSERT_SIZE(imsm_vol, 84)
206
207 struct imsm_dev {
208 __u8 volume[MAX_RAID_SERIAL_LEN];
209 __u32 size_low;
210 __u32 size_high;
211 #define DEV_BOOTABLE __cpu_to_le32(0x01)
212 #define DEV_BOOT_DEVICE __cpu_to_le32(0x02)
213 #define DEV_READ_COALESCING __cpu_to_le32(0x04)
214 #define DEV_WRITE_COALESCING __cpu_to_le32(0x08)
215 #define DEV_LAST_SHUTDOWN_DIRTY __cpu_to_le32(0x10)
216 #define DEV_HIDDEN_AT_BOOT __cpu_to_le32(0x20)
217 #define DEV_CURRENTLY_HIDDEN __cpu_to_le32(0x40)
218 #define DEV_VERIFY_AND_FIX __cpu_to_le32(0x80)
219 #define DEV_MAP_STATE_UNINIT __cpu_to_le32(0x100)
220 #define DEV_NO_AUTO_RECOVERY __cpu_to_le32(0x200)
221 #define DEV_CLONE_N_GO __cpu_to_le32(0x400)
222 #define DEV_CLONE_MAN_SYNC __cpu_to_le32(0x800)
223 #define DEV_CNG_MASTER_DISK_NUM __cpu_to_le32(0x1000)
224 __u32 status; /* Persistent RaidDev status */
225 __u32 reserved_blocks; /* Reserved blocks at beginning of volume */
226 __u8 migr_priority;
227 __u8 num_sub_vols;
228 __u8 tid;
229 __u8 cng_master_disk;
230 __u16 cache_policy;
231 __u8 cng_state;
232 __u8 cng_sub_state;
233 __u16 my_vol_raid_dev_num; /* Used in Unique volume Id for this RaidDev */
234
235 /* NVM_EN */
236 __u8 nv_cache_mode;
237 __u8 nv_cache_flags;
238
239 /* Unique Volume Id of the NvCache Volume associated with this volume */
240 __u32 nvc_vol_orig_family_num;
241 __u16 nvc_vol_raid_dev_num;
242
243 #define RWH_OFF 0
244 #define RWH_DISTRIBUTED 1
245 #define RWH_JOURNALING_DRIVE 2
246 #define RWH_MULTIPLE_DISTRIBUTED 3
247 #define RWH_MULTIPLE_PPLS_JOURNALING_DRIVE 4
248 #define RWH_MULTIPLE_OFF 5
249 #define RWH_BITMAP 6
250 __u8 rwh_policy; /* Raid Write Hole Policy */
251 __u8 jd_serial[MAX_RAID_SERIAL_LEN]; /* Journal Drive serial number */
252 __u8 filler1;
253
254 #define IMSM_DEV_FILLERS 3
255 __u32 filler[IMSM_DEV_FILLERS];
256 struct imsm_vol vol;
257 };
258 ASSERT_SIZE(imsm_dev, 164)
259
260 struct imsm_super {
261 __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */
262 __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */
263 __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */
264 __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */
265 __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
266 __u32 error_log_size; /* 0x30 - 0x33 in bytes */
267 __u32 attributes; /* 0x34 - 0x37 */
268 __u8 num_disks; /* 0x38 Number of configured disks */
269 __u8 num_raid_devs; /* 0x39 Number of configured volumes */
270 __u8 error_log_pos; /* 0x3A */
271 __u8 fill[1]; /* 0x3B */
272 __u32 cache_size; /* 0x3c - 0x40 in mb */
273 __u32 orig_family_num; /* 0x40 - 0x43 original family num */
274 __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
275 __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
276 __u16 num_raid_devs_created; /* 0x4C - 0x4D Used for generating unique
277 * volume IDs for raid_dev created in this array
278 * (starts at 1)
279 */
280 __u16 filler1; /* 0x4E - 0x4F */
281 __u64 creation_time; /* 0x50 - 0x57 Array creation time */
282 #define IMSM_FILLERS 32
283 __u32 filler[IMSM_FILLERS]; /* 0x58 - 0xD7 RAID_MPB_FILLERS */
284 struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
285 /* here comes imsm_dev[num_raid_devs] */
286 /* here comes BBM logs */
287 };
288 ASSERT_SIZE(imsm_super, 264)
289
290 #define BBM_LOG_MAX_ENTRIES 254
291 #define BBM_LOG_MAX_LBA_ENTRY_VAL 256 /* Represents 256 LBAs */
292 #define BBM_LOG_SIGNATURE 0xabadb10c
293
294 struct bbm_log_block_addr {
295 __u16 w1;
296 __u32 dw1;
297 } __attribute__ ((__packed__));
298
299 struct bbm_log_entry {
300 __u8 marked_count; /* Number of blocks marked - 1 */
301 __u8 disk_ordinal; /* Disk entry within the imsm_super */
302 struct bbm_log_block_addr defective_block_start;
303 } __attribute__ ((__packed__));
304
305 struct bbm_log {
306 __u32 signature; /* 0xABADB10C */
307 __u32 entry_count;
308 struct bbm_log_entry marked_block_entries[BBM_LOG_MAX_ENTRIES];
309 };
310 ASSERT_SIZE(bbm_log, 2040)
311
312 static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
313
314 #define BLOCKS_PER_KB (1024/512)
315
316 #define RAID_DISK_RESERVED_BLOCKS_IMSM_HI 2209
317
318 #define GEN_MIGR_AREA_SIZE 2048 /* General Migration Copy Area size in blocks */
319
320 #define MIGR_REC_BUF_SECTORS 1 /* size of migr_record i/o buffer in sectors */
321 #define MIGR_REC_SECTOR_POSITION 1 /* migr_record position offset on disk,
322 * MIGR_REC_BUF_SECTORS <= MIGR_REC_SECTOR_POS
323 */
324
325 #define UNIT_SRC_NORMAL 0 /* Source data for curr_migr_unit must
326 * be recovered using srcMap */
327 #define UNIT_SRC_IN_CP_AREA 1 /* Source data for curr_migr_unit has
328 * already been migrated and must
329 * be recovered from checkpoint area */
330
331 #define PPL_ENTRY_SPACE (128 * 1024) /* Size of single PPL, without the header */
332
333 struct migr_record {
334 __u32 rec_status; /* Status used to determine how to restart
335 * migration in case it aborts
336 * in some fashion */
337 __u32 curr_migr_unit_lo; /* 0..numMigrUnits-1 */
338 __u32 family_num; /* Family number of MPB
339 * containing the RaidDev
340 * that is migrating */
341 __u32 ascending_migr; /* True if migrating in increasing
342 * order of lbas */
343 __u32 blocks_per_unit; /* Num disk blocks per unit of operation */
344 __u32 dest_depth_per_unit; /* Num member blocks each destMap
345 * member disk
346 * advances per unit-of-operation */
347 __u32 ckpt_area_pba_lo; /* Pba of first block of ckpt copy area */
348 __u32 dest_1st_member_lba_lo; /* First member lba on first
349 * stripe of destination */
350 __u32 num_migr_units_lo; /* Total num migration units-of-op */
351 __u32 post_migr_vol_cap; /* Size of volume after
352 * migration completes */
353 __u32 post_migr_vol_cap_hi; /* Expansion space for LBA64 */
354 __u32 ckpt_read_disk_num; /* Which member disk in destSubMap[0] the
355 * migration ckpt record was read from
356 * (for recovered migrations) */
357 __u32 curr_migr_unit_hi; /* 0..numMigrUnits-1 high order 32 bits */
358 __u32 ckpt_area_pba_hi; /* Pba of first block of ckpt copy area
359 * high order 32 bits */
360 __u32 dest_1st_member_lba_hi; /* First member lba on first stripe of
361 * destination - high order 32 bits */
362 __u32 num_migr_units_hi; /* Total num migration units-of-op
363 * high order 32 bits */
364 };
365 ASSERT_SIZE(migr_record, 64)
366
367 struct md_list {
368 /* usage marker:
369 * 1: load metadata
370 * 2: metadata does not match
371 * 4: already checked
372 */
373 int used;
374 char *devname;
375 int found;
376 int container;
377 dev_t st_rdev;
378 struct md_list *next;
379 };
380
381 #define pr_vrb(fmt, arg...) (void) (verbose && pr_err(fmt, ##arg))
382
383 static __u8 migr_type(struct imsm_dev *dev)
384 {
385 if (dev->vol.migr_type == MIGR_VERIFY &&
386 dev->status & DEV_VERIFY_AND_FIX)
387 return MIGR_REPAIR;
388 else
389 return dev->vol.migr_type;
390 }
391
392 static void set_migr_type(struct imsm_dev *dev, __u8 migr_type)
393 {
394 /* for compatibility with older oroms convert MIGR_REPAIR, into
395 * MIGR_VERIFY w/ DEV_VERIFY_AND_FIX status
396 */
397 if (migr_type == MIGR_REPAIR) {
398 dev->vol.migr_type = MIGR_VERIFY;
399 dev->status |= DEV_VERIFY_AND_FIX;
400 } else {
401 dev->vol.migr_type = migr_type;
402 dev->status &= ~DEV_VERIFY_AND_FIX;
403 }
404 }
405
406 static unsigned int sector_count(__u32 bytes, unsigned int sector_size)
407 {
408 return ROUND_UP(bytes, sector_size) / sector_size;
409 }
410
411 static unsigned int mpb_sectors(struct imsm_super *mpb,
412 unsigned int sector_size)
413 {
414 return sector_count(__le32_to_cpu(mpb->mpb_size), sector_size);
415 }
416
417 struct intel_dev {
418 struct imsm_dev *dev;
419 struct intel_dev *next;
420 unsigned index;
421 };
422
423 struct intel_hba {
424 enum sys_dev_type type;
425 char *path;
426 char *pci_id;
427 struct intel_hba *next;
428 };
429
430 enum action {
431 DISK_REMOVE = 1,
432 DISK_ADD
433 };
434 /* internal representation of IMSM metadata */
435 struct intel_super {
436 union {
437 void *buf; /* O_DIRECT buffer for reading/writing metadata */
438 struct imsm_super *anchor; /* immovable parameters */
439 };
440 union {
441 void *migr_rec_buf; /* buffer for I/O operations */
442 struct migr_record *migr_rec; /* migration record */
443 };
444 int clean_migration_record_by_mdmon; /* when reshape is switched to next
445 array, it indicates that mdmon is allowed to clean migration
446 record */
447 size_t len; /* size of the 'buf' allocation */
448 size_t extra_space; /* extra space in 'buf' that is not used yet */
449 void *next_buf; /* for realloc'ing buf from the manager */
450 size_t next_len;
451 int updates_pending; /* count of pending updates for mdmon */
452 int current_vol; /* index of raid device undergoing creation */
453 unsigned long long create_offset; /* common start for 'current_vol' */
454 __u32 random; /* random data for seeding new family numbers */
455 struct intel_dev *devlist;
456 unsigned int sector_size; /* sector size of used member drives */
457 struct dl {
458 struct dl *next;
459 int index;
460 __u8 serial[MAX_RAID_SERIAL_LEN];
461 int major, minor;
462 char *devname;
463 struct imsm_disk disk;
464 int fd;
465 int extent_cnt;
466 struct extent *e; /* for determining freespace @ create */
467 int raiddisk; /* slot to fill in autolayout */
468 enum action action;
469 } *disks, *current_disk;
470 struct dl *disk_mgmt_list; /* list of disks to add/remove while mdmon
471 active */
472 struct dl *missing; /* disks removed while we weren't looking */
473 struct bbm_log *bbm_log;
474 struct intel_hba *hba; /* device path of the raid controller for this metadata */
475 const struct imsm_orom *orom; /* platform firmware support */
476 struct intel_super *next; /* (temp) list for disambiguating family_num */
477 struct md_bb bb; /* memory for get_bad_blocks call */
478 };
479
480 struct intel_disk {
481 struct imsm_disk disk;
482 #define IMSM_UNKNOWN_OWNER (-1)
483 int owner;
484 struct intel_disk *next;
485 };
486
487 struct extent {
488 unsigned long long start, size;
489 };
490
491 /* definitions of reshape process types */
492 enum imsm_reshape_type {
493 CH_TAKEOVER,
494 CH_MIGRATION,
495 CH_ARRAY_SIZE,
496 };
497
498 /* definition of messages passed to imsm_process_update */
499 enum imsm_update_type {
500 update_activate_spare,
501 update_create_array,
502 update_kill_array,
503 update_rename_array,
504 update_add_remove_disk,
505 update_reshape_container_disks,
506 update_reshape_migration,
507 update_takeover,
508 update_general_migration_checkpoint,
509 update_size_change,
510 update_prealloc_badblocks_mem,
511 update_rwh_policy,
512 };
513
514 struct imsm_update_activate_spare {
515 enum imsm_update_type type;
516 struct dl *dl;
517 int slot;
518 int array;
519 struct imsm_update_activate_spare *next;
520 };
521
522 struct geo_params {
523 char devnm[32];
524 char *dev_name;
525 unsigned long long size;
526 int level;
527 int layout;
528 int chunksize;
529 int raid_disks;
530 };
531
532 enum takeover_direction {
533 R10_TO_R0,
534 R0_TO_R10
535 };
536 struct imsm_update_takeover {
537 enum imsm_update_type type;
538 int subarray;
539 enum takeover_direction direction;
540 };
541
542 struct imsm_update_reshape {
543 enum imsm_update_type type;
544 int old_raid_disks;
545 int new_raid_disks;
546
547 int new_disks[1]; /* new_raid_disks - old_raid_disks makedev number */
548 };
549
550 struct imsm_update_reshape_migration {
551 enum imsm_update_type type;
552 int old_raid_disks;
553 int new_raid_disks;
554 /* fields for array migration changes
555 */
556 int subdev;
557 int new_level;
558 int new_layout;
559 int new_chunksize;
560
561 int new_disks[1]; /* new_raid_disks - old_raid_disks makedev number */
562 };
563
564 struct imsm_update_size_change {
565 enum imsm_update_type type;
566 int subdev;
567 long long new_size;
568 };
569
570 struct imsm_update_general_migration_checkpoint {
571 enum imsm_update_type type;
572 __u32 curr_migr_unit;
573 };
574
575 struct disk_info {
576 __u8 serial[MAX_RAID_SERIAL_LEN];
577 };
578
579 struct imsm_update_create_array {
580 enum imsm_update_type type;
581 int dev_idx;
582 struct imsm_dev dev;
583 };
584
585 struct imsm_update_kill_array {
586 enum imsm_update_type type;
587 int dev_idx;
588 };
589
590 struct imsm_update_rename_array {
591 enum imsm_update_type type;
592 __u8 name[MAX_RAID_SERIAL_LEN];
593 int dev_idx;
594 };
595
596 struct imsm_update_add_remove_disk {
597 enum imsm_update_type type;
598 };
599
600 struct imsm_update_prealloc_bb_mem {
601 enum imsm_update_type type;
602 };
603
604 struct imsm_update_rwh_policy {
605 enum imsm_update_type type;
606 int new_policy;
607 int dev_idx;
608 };
609
610 static const char *_sys_dev_type[] = {
611 [SYS_DEV_UNKNOWN] = "Unknown",
612 [SYS_DEV_SAS] = "SAS",
613 [SYS_DEV_SATA] = "SATA",
614 [SYS_DEV_NVME] = "NVMe",
615 [SYS_DEV_VMD] = "VMD"
616 };
617
618 const char *get_sys_dev_type(enum sys_dev_type type)
619 {
620 if (type >= SYS_DEV_MAX)
621 type = SYS_DEV_UNKNOWN;
622
623 return _sys_dev_type[type];
624 }
625
626 static struct intel_hba * alloc_intel_hba(struct sys_dev *device)
627 {
628 struct intel_hba *result = xmalloc(sizeof(*result));
629
630 result->type = device->type;
631 result->path = xstrdup(device->path);
632 result->next = NULL;
633 if (result->path && (result->pci_id = strrchr(result->path, '/')) != NULL)
634 result->pci_id++;
635
636 return result;
637 }
638
639 static struct intel_hba * find_intel_hba(struct intel_hba *hba, struct sys_dev *device)
640 {
641 struct intel_hba *result;
642
643 for (result = hba; result; result = result->next) {
644 if (result->type == device->type && strcmp(result->path, device->path) == 0)
645 break;
646 }
647 return result;
648 }
649
650 static int attach_hba_to_super(struct intel_super *super, struct sys_dev *device)
651 {
652 struct intel_hba *hba;
653
654 /* check if disk attached to Intel HBA */
655 hba = find_intel_hba(super->hba, device);
656 if (hba != NULL)
657 return 1;
658 /* Check if HBA is already attached to super */
659 if (super->hba == NULL) {
660 super->hba = alloc_intel_hba(device);
661 return 1;
662 }
663
664 hba = super->hba;
665 /* Intel metadata allows for all disks attached to the same type HBA.
666 * Do not support HBA types mixing
667 */
668 if (device->type != hba->type)
669 return 2;
670
671 /* Multiple same type HBAs can be used if they share the same OROM */
672 const struct imsm_orom *device_orom = get_orom_by_device_id(device->dev_id);
673
674 if (device_orom != super->orom)
675 return 2;
676
677 while (hba->next)
678 hba = hba->next;
679
680 hba->next = alloc_intel_hba(device);
681 return 1;
682 }
683
684 static struct sys_dev* find_disk_attached_hba(int fd, const char *devname)
685 {
686 struct sys_dev *list, *elem;
687 char *disk_path;
688
689 if ((list = find_intel_devices()) == NULL)
690 return 0;
691
692 if (fd < 0)
693 disk_path = (char *) devname;
694 else
695 disk_path = diskfd_to_devpath(fd);
696
697 if (!disk_path)
698 return 0;
699
700 for (elem = list; elem; elem = elem->next)
701 if (path_attached_to_hba(disk_path, elem->path))
702 return elem;
703
704 if (disk_path != devname)
705 free(disk_path);
706
707 return NULL;
708 }
709
710 static int find_intel_hba_capability(int fd, struct intel_super *super,
711 char *devname);
712
713 static struct supertype *match_metadata_desc_imsm(char *arg)
714 {
715 struct supertype *st;
716
717 if (strcmp(arg, "imsm") != 0 &&
718 strcmp(arg, "default") != 0
719 )
720 return NULL;
721
722 st = xcalloc(1, sizeof(*st));
723 st->ss = &super_imsm;
724 st->max_devs = IMSM_MAX_DEVICES;
725 st->minor_version = 0;
726 st->sb = NULL;
727 return st;
728 }
729
730 static __u8 *get_imsm_version(struct imsm_super *mpb)
731 {
732 return &mpb->sig[MPB_SIG_LEN];
733 }
734
735 /* retrieve a disk directly from the anchor when the anchor is known to be
736 * up-to-date, currently only at load time
737 */
738 static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index)
739 {
740 if (index >= mpb->num_disks)
741 return NULL;
742 return &mpb->disk[index];
743 }
744
745 /* retrieve the disk description based on a index of the disk
746 * in the sub-array
747 */
748 static struct dl *get_imsm_dl_disk(struct intel_super *super, __u8 index)
749 {
750 struct dl *d;
751
752 for (d = super->disks; d; d = d->next)
753 if (d->index == index)
754 return d;
755
756 return NULL;
757 }
758 /* retrieve a disk from the parsed metadata */
759 static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index)
760 {
761 struct dl *dl;
762
763 dl = get_imsm_dl_disk(super, index);
764 if (dl)
765 return &dl->disk;
766
767 return NULL;
768 }
769
770 /* generate a checksum directly from the anchor when the anchor is known to be
771 * up-to-date, currently only at load or write_super after coalescing
772 */
773 static __u32 __gen_imsm_checksum(struct imsm_super *mpb)
774 {
775 __u32 end = mpb->mpb_size / sizeof(end);
776 __u32 *p = (__u32 *) mpb;
777 __u32 sum = 0;
778
779 while (end--) {
780 sum += __le32_to_cpu(*p);
781 p++;
782 }
783
784 return sum - __le32_to_cpu(mpb->check_sum);
785 }
786
787 static size_t sizeof_imsm_map(struct imsm_map *map)
788 {
789 return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1);
790 }
791
792 struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
793 {
794 /* A device can have 2 maps if it is in the middle of a migration.
795 * If second_map is:
796 * MAP_0 - we return the first map
797 * MAP_1 - we return the second map if it exists, else NULL
798 * MAP_X - we return the second map if it exists, else the first
799 */
800 struct imsm_map *map = &dev->vol.map[0];
801 struct imsm_map *map2 = NULL;
802
803 if (dev->vol.migr_state)
804 map2 = (void *)map + sizeof_imsm_map(map);
805
806 switch (second_map) {
807 case MAP_0:
808 break;
809 case MAP_1:
810 map = map2;
811 break;
812 case MAP_X:
813 if (map2)
814 map = map2;
815 break;
816 default:
817 map = NULL;
818 }
819 return map;
820
821 }
822
823 /* return the size of the device.
824 * migr_state increases the returned size if map[0] were to be duplicated
825 */
826 static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state)
827 {
828 size_t size = sizeof(*dev) - sizeof(struct imsm_map) +
829 sizeof_imsm_map(get_imsm_map(dev, MAP_0));
830
831 /* migrating means an additional map */
832 if (dev->vol.migr_state)
833 size += sizeof_imsm_map(get_imsm_map(dev, MAP_1));
834 else if (migr_state)
835 size += sizeof_imsm_map(get_imsm_map(dev, MAP_0));
836
837 return size;
838 }
839
840 /* retrieve disk serial number list from a metadata update */
841 static struct disk_info *get_disk_info(struct imsm_update_create_array *update)
842 {
843 void *u = update;
844 struct disk_info *inf;
845
846 inf = u + sizeof(*update) - sizeof(struct imsm_dev) +
847 sizeof_imsm_dev(&update->dev, 0);
848
849 return inf;
850 }
851
852 static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index)
853 {
854 int offset;
855 int i;
856 void *_mpb = mpb;
857
858 if (index >= mpb->num_raid_devs)
859 return NULL;
860
861 /* devices start after all disks */
862 offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb;
863
864 for (i = 0; i <= index; i++)
865 if (i == index)
866 return _mpb + offset;
867 else
868 offset += sizeof_imsm_dev(_mpb + offset, 0);
869
870 return NULL;
871 }
872
873 static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index)
874 {
875 struct intel_dev *dv;
876
877 if (index >= super->anchor->num_raid_devs)
878 return NULL;
879 for (dv = super->devlist; dv; dv = dv->next)
880 if (dv->index == index)
881 return dv->dev;
882 return NULL;
883 }
884
885 static inline unsigned long long __le48_to_cpu(const struct bbm_log_block_addr
886 *addr)
887 {
888 return ((((__u64)__le32_to_cpu(addr->dw1)) << 16) |
889 __le16_to_cpu(addr->w1));
890 }
891
892 static inline struct bbm_log_block_addr __cpu_to_le48(unsigned long long sec)
893 {
894 struct bbm_log_block_addr addr;
895
896 addr.w1 = __cpu_to_le16((__u16)(sec & 0xffff));
897 addr.dw1 = __cpu_to_le32((__u32)(sec >> 16) & 0xffffffff);
898 return addr;
899 }
900
901 /* get size of the bbm log */
902 static __u32 get_imsm_bbm_log_size(struct bbm_log *log)
903 {
904 if (!log || log->entry_count == 0)
905 return 0;
906
907 return sizeof(log->signature) +
908 sizeof(log->entry_count) +
909 log->entry_count * sizeof(struct bbm_log_entry);
910 }
911
912 /* check if bad block is not partially stored in bbm log */
913 static int is_stored_in_bbm(struct bbm_log *log, const __u8 idx, const unsigned
914 long long sector, const int length, __u32 *pos)
915 {
916 __u32 i;
917
918 for (i = *pos; i < log->entry_count; i++) {
919 struct bbm_log_entry *entry = &log->marked_block_entries[i];
920 unsigned long long bb_start;
921 unsigned long long bb_end;
922
923 bb_start = __le48_to_cpu(&entry->defective_block_start);
924 bb_end = bb_start + (entry->marked_count + 1);
925
926 if ((entry->disk_ordinal == idx) && (bb_start >= sector) &&
927 (bb_end <= sector + length)) {
928 *pos = i;
929 return 1;
930 }
931 }
932 return 0;
933 }
934
935 /* record new bad block in bbm log */
936 static int record_new_badblock(struct bbm_log *log, const __u8 idx, unsigned
937 long long sector, int length)
938 {
939 int new_bb = 0;
940 __u32 pos = 0;
941 struct bbm_log_entry *entry = NULL;
942
943 while (is_stored_in_bbm(log, idx, sector, length, &pos)) {
944 struct bbm_log_entry *e = &log->marked_block_entries[pos];
945
946 if ((e->marked_count + 1 == BBM_LOG_MAX_LBA_ENTRY_VAL) &&
947 (__le48_to_cpu(&e->defective_block_start) == sector)) {
948 sector += BBM_LOG_MAX_LBA_ENTRY_VAL;
949 length -= BBM_LOG_MAX_LBA_ENTRY_VAL;
950 pos = pos + 1;
951 continue;
952 }
953 entry = e;
954 break;
955 }
956
957 if (entry) {
958 int cnt = (length <= BBM_LOG_MAX_LBA_ENTRY_VAL) ? length :
959 BBM_LOG_MAX_LBA_ENTRY_VAL;
960 entry->defective_block_start = __cpu_to_le48(sector);
961 entry->marked_count = cnt - 1;
962 if (cnt == length)
963 return 1;
964 sector += cnt;
965 length -= cnt;
966 }
967
968 new_bb = ROUND_UP(length, BBM_LOG_MAX_LBA_ENTRY_VAL) /
969 BBM_LOG_MAX_LBA_ENTRY_VAL;
970 if (log->entry_count + new_bb > BBM_LOG_MAX_ENTRIES)
971 return 0;
972
973 while (length > 0) {
974 int cnt = (length <= BBM_LOG_MAX_LBA_ENTRY_VAL) ? length :
975 BBM_LOG_MAX_LBA_ENTRY_VAL;
976 struct bbm_log_entry *entry =
977 &log->marked_block_entries[log->entry_count];
978
979 entry->defective_block_start = __cpu_to_le48(sector);
980 entry->marked_count = cnt - 1;
981 entry->disk_ordinal = idx;
982
983 sector += cnt;
984 length -= cnt;
985
986 log->entry_count++;
987 }
988
989 return new_bb;
990 }
991
992 /* clear all bad blocks for given disk */
993 static void clear_disk_badblocks(struct bbm_log *log, const __u8 idx)
994 {
995 __u32 i = 0;
996
997 while (i < log->entry_count) {
998 struct bbm_log_entry *entries = log->marked_block_entries;
999
1000 if (entries[i].disk_ordinal == idx) {
1001 if (i < log->entry_count - 1)
1002 entries[i] = entries[log->entry_count - 1];
1003 log->entry_count--;
1004 } else {
1005 i++;
1006 }
1007 }
1008 }
1009
1010 /* clear given bad block */
1011 static int clear_badblock(struct bbm_log *log, const __u8 idx, const unsigned
1012 long long sector, const int length) {
1013 __u32 i = 0;
1014
1015 while (i < log->entry_count) {
1016 struct bbm_log_entry *entries = log->marked_block_entries;
1017
1018 if ((entries[i].disk_ordinal == idx) &&
1019 (__le48_to_cpu(&entries[i].defective_block_start) ==
1020 sector) && (entries[i].marked_count + 1 == length)) {
1021 if (i < log->entry_count - 1)
1022 entries[i] = entries[log->entry_count - 1];
1023 log->entry_count--;
1024 break;
1025 }
1026 i++;
1027 }
1028
1029 return 1;
1030 }
1031
1032 /* allocate and load BBM log from metadata */
1033 static int load_bbm_log(struct intel_super *super)
1034 {
1035 struct imsm_super *mpb = super->anchor;
1036 __u32 bbm_log_size = __le32_to_cpu(mpb->bbm_log_size);
1037
1038 super->bbm_log = xcalloc(1, sizeof(struct bbm_log));
1039 if (!super->bbm_log)
1040 return 1;
1041
1042 if (bbm_log_size) {
1043 struct bbm_log *log = (void *)mpb +
1044 __le32_to_cpu(mpb->mpb_size) - bbm_log_size;
1045
1046 __u32 entry_count;
1047
1048 if (bbm_log_size < sizeof(log->signature) +
1049 sizeof(log->entry_count))
1050 return 2;
1051
1052 entry_count = __le32_to_cpu(log->entry_count);
1053 if ((__le32_to_cpu(log->signature) != BBM_LOG_SIGNATURE) ||
1054 (entry_count > BBM_LOG_MAX_ENTRIES))
1055 return 3;
1056
1057 if (bbm_log_size !=
1058 sizeof(log->signature) + sizeof(log->entry_count) +
1059 entry_count * sizeof(struct bbm_log_entry))
1060 return 4;
1061
1062 memcpy(super->bbm_log, log, bbm_log_size);
1063 } else {
1064 super->bbm_log->signature = __cpu_to_le32(BBM_LOG_SIGNATURE);
1065 super->bbm_log->entry_count = 0;
1066 }
1067
1068 return 0;
1069 }
1070
1071 /* checks if bad block is within volume boundaries */
1072 static int is_bad_block_in_volume(const struct bbm_log_entry *entry,
1073 const unsigned long long start_sector,
1074 const unsigned long long size)
1075 {
1076 unsigned long long bb_start;
1077 unsigned long long bb_end;
1078
1079 bb_start = __le48_to_cpu(&entry->defective_block_start);
1080 bb_end = bb_start + (entry->marked_count + 1);
1081
1082 if (((bb_start >= start_sector) && (bb_start < start_sector + size)) ||
1083 ((bb_end >= start_sector) && (bb_end <= start_sector + size)))
1084 return 1;
1085
1086 return 0;
1087 }
1088
1089 /* get list of bad blocks on a drive for a volume */
1090 static void get_volume_badblocks(const struct bbm_log *log, const __u8 idx,
1091 const unsigned long long start_sector,
1092 const unsigned long long size,
1093 struct md_bb *bbs)
1094 {
1095 __u32 count = 0;
1096 __u32 i;
1097
1098 for (i = 0; i < log->entry_count; i++) {
1099 const struct bbm_log_entry *ent =
1100 &log->marked_block_entries[i];
1101 struct md_bb_entry *bb;
1102
1103 if ((ent->disk_ordinal == idx) &&
1104 is_bad_block_in_volume(ent, start_sector, size)) {
1105
1106 if (!bbs->entries) {
1107 bbs->entries = xmalloc(BBM_LOG_MAX_ENTRIES *
1108 sizeof(*bb));
1109 if (!bbs->entries)
1110 break;
1111 }
1112
1113 bb = &bbs->entries[count++];
1114 bb->sector = __le48_to_cpu(&ent->defective_block_start);
1115 bb->length = ent->marked_count + 1;
1116 }
1117 }
1118 bbs->count = count;
1119 }
1120
1121 /*
1122 * for second_map:
1123 * == MAP_0 get first map
1124 * == MAP_1 get second map
1125 * == MAP_X than get map according to the current migr_state
1126 */
1127 static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev,
1128 int slot,
1129 int second_map)
1130 {
1131 struct imsm_map *map;
1132
1133 map = get_imsm_map(dev, second_map);
1134
1135 /* top byte identifies disk under rebuild */
1136 return __le32_to_cpu(map->disk_ord_tbl[slot]);
1137 }
1138
1139 #define ord_to_idx(ord) (((ord) << 8) >> 8)
1140 static __u32 get_imsm_disk_idx(struct imsm_dev *dev, int slot, int second_map)
1141 {
1142 __u32 ord = get_imsm_ord_tbl_ent(dev, slot, second_map);
1143
1144 return ord_to_idx(ord);
1145 }
1146
1147 static void set_imsm_ord_tbl_ent(struct imsm_map *map, int slot, __u32 ord)
1148 {
1149 map->disk_ord_tbl[slot] = __cpu_to_le32(ord);
1150 }
1151
1152 static int get_imsm_disk_slot(struct imsm_map *map, unsigned idx)
1153 {
1154 int slot;
1155 __u32 ord;
1156
1157 for (slot = 0; slot < map->num_members; slot++) {
1158 ord = __le32_to_cpu(map->disk_ord_tbl[slot]);
1159 if (ord_to_idx(ord) == idx)
1160 return slot;
1161 }
1162
1163 return -1;
1164 }
1165
1166 static int get_imsm_raid_level(struct imsm_map *map)
1167 {
1168 if (map->raid_level == 1) {
1169 if (map->num_members == 2)
1170 return 1;
1171 else
1172 return 10;
1173 }
1174
1175 return map->raid_level;
1176 }
1177
1178 static int cmp_extent(const void *av, const void *bv)
1179 {
1180 const struct extent *a = av;
1181 const struct extent *b = bv;
1182 if (a->start < b->start)
1183 return -1;
1184 if (a->start > b->start)
1185 return 1;
1186 return 0;
1187 }
1188
1189 static int count_memberships(struct dl *dl, struct intel_super *super)
1190 {
1191 int memberships = 0;
1192 int i;
1193
1194 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1195 struct imsm_dev *dev = get_imsm_dev(super, i);
1196 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1197
1198 if (get_imsm_disk_slot(map, dl->index) >= 0)
1199 memberships++;
1200 }
1201
1202 return memberships;
1203 }
1204
1205 static __u32 imsm_min_reserved_sectors(struct intel_super *super);
1206
1207 static int split_ull(unsigned long long n, void *lo, void *hi)
1208 {
1209 if (lo == 0 || hi == 0)
1210 return 1;
1211 __put_unaligned32(__cpu_to_le32((__u32)n), lo);
1212 __put_unaligned32(__cpu_to_le32((n >> 32)), hi);
1213 return 0;
1214 }
1215
1216 static unsigned long long join_u32(__u32 lo, __u32 hi)
1217 {
1218 return (unsigned long long)__le32_to_cpu(lo) |
1219 (((unsigned long long)__le32_to_cpu(hi)) << 32);
1220 }
1221
1222 static unsigned long long total_blocks(struct imsm_disk *disk)
1223 {
1224 if (disk == NULL)
1225 return 0;
1226 return join_u32(disk->total_blocks_lo, disk->total_blocks_hi);
1227 }
1228
1229 static unsigned long long pba_of_lba0(struct imsm_map *map)
1230 {
1231 if (map == NULL)
1232 return 0;
1233 return join_u32(map->pba_of_lba0_lo, map->pba_of_lba0_hi);
1234 }
1235
1236 static unsigned long long blocks_per_member(struct imsm_map *map)
1237 {
1238 if (map == NULL)
1239 return 0;
1240 return join_u32(map->blocks_per_member_lo, map->blocks_per_member_hi);
1241 }
1242
1243 static unsigned long long num_data_stripes(struct imsm_map *map)
1244 {
1245 if (map == NULL)
1246 return 0;
1247 return join_u32(map->num_data_stripes_lo, map->num_data_stripes_hi);
1248 }
1249
1250 static unsigned long long imsm_dev_size(struct imsm_dev *dev)
1251 {
1252 if (dev == NULL)
1253 return 0;
1254 return join_u32(dev->size_low, dev->size_high);
1255 }
1256
1257 static unsigned long long migr_chkp_area_pba(struct migr_record *migr_rec)
1258 {
1259 if (migr_rec == NULL)
1260 return 0;
1261 return join_u32(migr_rec->ckpt_area_pba_lo,
1262 migr_rec->ckpt_area_pba_hi);
1263 }
1264
1265 static unsigned long long current_migr_unit(struct migr_record *migr_rec)
1266 {
1267 if (migr_rec == NULL)
1268 return 0;
1269 return join_u32(migr_rec->curr_migr_unit_lo,
1270 migr_rec->curr_migr_unit_hi);
1271 }
1272
1273 static unsigned long long migr_dest_1st_member_lba(struct migr_record *migr_rec)
1274 {
1275 if (migr_rec == NULL)
1276 return 0;
1277 return join_u32(migr_rec->dest_1st_member_lba_lo,
1278 migr_rec->dest_1st_member_lba_hi);
1279 }
1280
1281 static unsigned long long get_num_migr_units(struct migr_record *migr_rec)
1282 {
1283 if (migr_rec == NULL)
1284 return 0;
1285 return join_u32(migr_rec->num_migr_units_lo,
1286 migr_rec->num_migr_units_hi);
1287 }
1288
1289 static void set_total_blocks(struct imsm_disk *disk, unsigned long long n)
1290 {
1291 split_ull(n, &disk->total_blocks_lo, &disk->total_blocks_hi);
1292 }
1293
1294 static void set_pba_of_lba0(struct imsm_map *map, unsigned long long n)
1295 {
1296 split_ull(n, &map->pba_of_lba0_lo, &map->pba_of_lba0_hi);
1297 }
1298
1299 static void set_blocks_per_member(struct imsm_map *map, unsigned long long n)
1300 {
1301 split_ull(n, &map->blocks_per_member_lo, &map->blocks_per_member_hi);
1302 }
1303
1304 static void set_num_data_stripes(struct imsm_map *map, unsigned long long n)
1305 {
1306 split_ull(n, &map->num_data_stripes_lo, &map->num_data_stripes_hi);
1307 }
1308
1309 static void set_imsm_dev_size(struct imsm_dev *dev, unsigned long long n)
1310 {
1311 split_ull(n, &dev->size_low, &dev->size_high);
1312 }
1313
1314 static void set_migr_chkp_area_pba(struct migr_record *migr_rec,
1315 unsigned long long n)
1316 {
1317 split_ull(n, &migr_rec->ckpt_area_pba_lo, &migr_rec->ckpt_area_pba_hi);
1318 }
1319
1320 static void set_current_migr_unit(struct migr_record *migr_rec,
1321 unsigned long long n)
1322 {
1323 split_ull(n, &migr_rec->curr_migr_unit_lo,
1324 &migr_rec->curr_migr_unit_hi);
1325 }
1326
1327 static void set_migr_dest_1st_member_lba(struct migr_record *migr_rec,
1328 unsigned long long n)
1329 {
1330 split_ull(n, &migr_rec->dest_1st_member_lba_lo,
1331 &migr_rec->dest_1st_member_lba_hi);
1332 }
1333
1334 static void set_num_migr_units(struct migr_record *migr_rec,
1335 unsigned long long n)
1336 {
1337 split_ull(n, &migr_rec->num_migr_units_lo,
1338 &migr_rec->num_migr_units_hi);
1339 }
1340
1341 static unsigned long long per_dev_array_size(struct imsm_map *map)
1342 {
1343 unsigned long long array_size = 0;
1344
1345 if (map == NULL)
1346 return array_size;
1347
1348 array_size = num_data_stripes(map) * map->blocks_per_strip;
1349 if (get_imsm_raid_level(map) == 1 || get_imsm_raid_level(map) == 10)
1350 array_size *= 2;
1351
1352 return array_size;
1353 }
1354
1355 static struct extent *get_extents(struct intel_super *super, struct dl *dl,
1356 int get_minimal_reservation)
1357 {
1358 /* find a list of used extents on the given physical device */
1359 struct extent *rv, *e;
1360 int i;
1361 int memberships = count_memberships(dl, super);
1362 __u32 reservation;
1363
1364 /* trim the reserved area for spares, so they can join any array
1365 * regardless of whether the OROM has assigned sectors from the
1366 * IMSM_RESERVED_SECTORS region
1367 */
1368 if (dl->index == -1 || get_minimal_reservation)
1369 reservation = imsm_min_reserved_sectors(super);
1370 else
1371 reservation = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1372
1373 rv = xcalloc(sizeof(struct extent), (memberships + 1));
1374 e = rv;
1375
1376 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1377 struct imsm_dev *dev = get_imsm_dev(super, i);
1378 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1379
1380 if (get_imsm_disk_slot(map, dl->index) >= 0) {
1381 e->start = pba_of_lba0(map);
1382 e->size = per_dev_array_size(map);
1383 e++;
1384 }
1385 }
1386 qsort(rv, memberships, sizeof(*rv), cmp_extent);
1387
1388 /* determine the start of the metadata
1389 * when no raid devices are defined use the default
1390 * ...otherwise allow the metadata to truncate the value
1391 * as is the case with older versions of imsm
1392 */
1393 if (memberships) {
1394 struct extent *last = &rv[memberships - 1];
1395 unsigned long long remainder;
1396
1397 remainder = total_blocks(&dl->disk) - (last->start + last->size);
1398 /* round down to 1k block to satisfy precision of the kernel
1399 * 'size' interface
1400 */
1401 remainder &= ~1UL;
1402 /* make sure remainder is still sane */
1403 if (remainder < (unsigned)ROUND_UP(super->len, 512) >> 9)
1404 remainder = ROUND_UP(super->len, 512) >> 9;
1405 if (reservation > remainder)
1406 reservation = remainder;
1407 }
1408 e->start = total_blocks(&dl->disk) - reservation;
1409 e->size = 0;
1410 return rv;
1411 }
1412
1413 /* try to determine how much space is reserved for metadata from
1414 * the last get_extents() entry, otherwise fallback to the
1415 * default
1416 */
1417 static __u32 imsm_reserved_sectors(struct intel_super *super, struct dl *dl)
1418 {
1419 struct extent *e;
1420 int i;
1421 __u32 rv;
1422
1423 /* for spares just return a minimal reservation which will grow
1424 * once the spare is picked up by an array
1425 */
1426 if (dl->index == -1)
1427 return MPB_SECTOR_CNT;
1428
1429 e = get_extents(super, dl, 0);
1430 if (!e)
1431 return MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1432
1433 /* scroll to last entry */
1434 for (i = 0; e[i].size; i++)
1435 continue;
1436
1437 rv = total_blocks(&dl->disk) - e[i].start;
1438
1439 free(e);
1440
1441 return rv;
1442 }
1443
1444 static int is_spare(struct imsm_disk *disk)
1445 {
1446 return (disk->status & SPARE_DISK) == SPARE_DISK;
1447 }
1448
1449 static int is_configured(struct imsm_disk *disk)
1450 {
1451 return (disk->status & CONFIGURED_DISK) == CONFIGURED_DISK;
1452 }
1453
1454 static int is_failed(struct imsm_disk *disk)
1455 {
1456 return (disk->status & FAILED_DISK) == FAILED_DISK;
1457 }
1458
1459 static int is_journal(struct imsm_disk *disk)
1460 {
1461 return (disk->status & JOURNAL_DISK) == JOURNAL_DISK;
1462 }
1463
1464 /* round array size down to closest MB and ensure it splits evenly
1465 * between members
1466 */
1467 static unsigned long long round_size_to_mb(unsigned long long size, unsigned int
1468 disk_count)
1469 {
1470 size /= disk_count;
1471 size = (size >> SECT_PER_MB_SHIFT) << SECT_PER_MB_SHIFT;
1472 size *= disk_count;
1473
1474 return size;
1475 }
1476
1477 static int able_to_resync(int raid_level, int missing_disks)
1478 {
1479 int max_missing_disks = 0;
1480
1481 switch (raid_level) {
1482 case 10:
1483 max_missing_disks = 1;
1484 break;
1485 default:
1486 max_missing_disks = 0;
1487 }
1488 return missing_disks <= max_missing_disks;
1489 }
1490
1491 /* try to determine how much space is reserved for metadata from
1492 * the last get_extents() entry on the smallest active disk,
1493 * otherwise fallback to the default
1494 */
1495 static __u32 imsm_min_reserved_sectors(struct intel_super *super)
1496 {
1497 struct extent *e;
1498 int i;
1499 unsigned long long min_active;
1500 __u32 remainder;
1501 __u32 rv = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1502 struct dl *dl, *dl_min = NULL;
1503
1504 if (!super)
1505 return rv;
1506
1507 min_active = 0;
1508 for (dl = super->disks; dl; dl = dl->next) {
1509 if (dl->index < 0)
1510 continue;
1511 unsigned long long blocks = total_blocks(&dl->disk);
1512 if (blocks < min_active || min_active == 0) {
1513 dl_min = dl;
1514 min_active = blocks;
1515 }
1516 }
1517 if (!dl_min)
1518 return rv;
1519
1520 /* find last lba used by subarrays on the smallest active disk */
1521 e = get_extents(super, dl_min, 0);
1522 if (!e)
1523 return rv;
1524 for (i = 0; e[i].size; i++)
1525 continue;
1526
1527 remainder = min_active - e[i].start;
1528 free(e);
1529
1530 /* to give priority to recovery we should not require full
1531 IMSM_RESERVED_SECTORS from the spare */
1532 rv = MPB_SECTOR_CNT + NUM_BLOCKS_DIRTY_STRIPE_REGION;
1533
1534 /* if real reservation is smaller use that value */
1535 return (remainder < rv) ? remainder : rv;
1536 }
1537
1538 /*
1539 * Return minimum size of a spare and sector size
1540 * that can be used in this array
1541 */
1542 int get_spare_criteria_imsm(struct supertype *st, struct spare_criteria *c)
1543 {
1544 struct intel_super *super = st->sb;
1545 struct dl *dl;
1546 struct extent *e;
1547 int i;
1548 unsigned long long size = 0;
1549
1550 c->min_size = 0;
1551 c->sector_size = 0;
1552
1553 if (!super)
1554 return -EINVAL;
1555 /* find first active disk in array */
1556 dl = super->disks;
1557 while (dl && (is_failed(&dl->disk) || dl->index == -1))
1558 dl = dl->next;
1559 if (!dl)
1560 return -EINVAL;
1561 /* find last lba used by subarrays */
1562 e = get_extents(super, dl, 0);
1563 if (!e)
1564 return -EINVAL;
1565 for (i = 0; e[i].size; i++)
1566 continue;
1567 if (i > 0)
1568 size = e[i-1].start + e[i-1].size;
1569 free(e);
1570
1571 /* add the amount of space needed for metadata */
1572 size += imsm_min_reserved_sectors(super);
1573
1574 c->min_size = size * 512;
1575 c->sector_size = super->sector_size;
1576
1577 return 0;
1578 }
1579
1580 static int is_gen_migration(struct imsm_dev *dev);
1581
1582 #define IMSM_4K_DIV 8
1583
1584 static __u64 blocks_per_migr_unit(struct intel_super *super,
1585 struct imsm_dev *dev);
1586
1587 static void print_imsm_dev(struct intel_super *super,
1588 struct imsm_dev *dev,
1589 char *uuid,
1590 int disk_idx)
1591 {
1592 __u64 sz;
1593 int slot, i;
1594 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1595 struct imsm_map *map2 = get_imsm_map(dev, MAP_1);
1596 __u32 ord;
1597
1598 printf("\n");
1599 printf("[%.16s]:\n", dev->volume);
1600 printf(" Subarray : %d\n", super->current_vol);
1601 printf(" UUID : %s\n", uuid);
1602 printf(" RAID Level : %d", get_imsm_raid_level(map));
1603 if (map2)
1604 printf(" <-- %d", get_imsm_raid_level(map2));
1605 printf("\n");
1606 printf(" Members : %d", map->num_members);
1607 if (map2)
1608 printf(" <-- %d", map2->num_members);
1609 printf("\n");
1610 printf(" Slots : [");
1611 for (i = 0; i < map->num_members; i++) {
1612 ord = get_imsm_ord_tbl_ent(dev, i, MAP_0);
1613 printf("%s", ord & IMSM_ORD_REBUILD ? "_" : "U");
1614 }
1615 printf("]");
1616 if (map2) {
1617 printf(" <-- [");
1618 for (i = 0; i < map2->num_members; i++) {
1619 ord = get_imsm_ord_tbl_ent(dev, i, MAP_1);
1620 printf("%s", ord & IMSM_ORD_REBUILD ? "_" : "U");
1621 }
1622 printf("]");
1623 }
1624 printf("\n");
1625 printf(" Failed disk : ");
1626 if (map->failed_disk_num == 0xff)
1627 printf("none");
1628 else
1629 printf("%i", map->failed_disk_num);
1630 printf("\n");
1631 slot = get_imsm_disk_slot(map, disk_idx);
1632 if (slot >= 0) {
1633 ord = get_imsm_ord_tbl_ent(dev, slot, MAP_X);
1634 printf(" This Slot : %d%s\n", slot,
1635 ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : "");
1636 } else
1637 printf(" This Slot : ?\n");
1638 printf(" Sector Size : %u\n", super->sector_size);
1639 sz = imsm_dev_size(dev);
1640 printf(" Array Size : %llu%s\n",
1641 (unsigned long long)sz * 512 / super->sector_size,
1642 human_size(sz * 512));
1643 sz = blocks_per_member(map);
1644 printf(" Per Dev Size : %llu%s\n",
1645 (unsigned long long)sz * 512 / super->sector_size,
1646 human_size(sz * 512));
1647 printf(" Sector Offset : %llu\n",
1648 pba_of_lba0(map));
1649 printf(" Num Stripes : %llu\n",
1650 num_data_stripes(map));
1651 printf(" Chunk Size : %u KiB",
1652 __le16_to_cpu(map->blocks_per_strip) / 2);
1653 if (map2)
1654 printf(" <-- %u KiB",
1655 __le16_to_cpu(map2->blocks_per_strip) / 2);
1656 printf("\n");
1657 printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
1658 printf(" Migrate State : ");
1659 if (dev->vol.migr_state) {
1660 if (migr_type(dev) == MIGR_INIT)
1661 printf("initialize\n");
1662 else if (migr_type(dev) == MIGR_REBUILD)
1663 printf("rebuild\n");
1664 else if (migr_type(dev) == MIGR_VERIFY)
1665 printf("check\n");
1666 else if (migr_type(dev) == MIGR_GEN_MIGR)
1667 printf("general migration\n");
1668 else if (migr_type(dev) == MIGR_STATE_CHANGE)
1669 printf("state change\n");
1670 else if (migr_type(dev) == MIGR_REPAIR)
1671 printf("repair\n");
1672 else
1673 printf("<unknown:%d>\n", migr_type(dev));
1674 } else
1675 printf("idle\n");
1676 printf(" Map State : %s", map_state_str[map->map_state]);
1677 if (dev->vol.migr_state) {
1678 struct imsm_map *map = get_imsm_map(dev, MAP_1);
1679
1680 printf(" <-- %s", map_state_str[map->map_state]);
1681 printf("\n Checkpoint : %u ",
1682 __le32_to_cpu(dev->vol.curr_migr_unit));
1683 if (is_gen_migration(dev) && (slot > 1 || slot < 0))
1684 printf("(N/A)");
1685 else
1686 printf("(%llu)", (unsigned long long)
1687 blocks_per_migr_unit(super, dev));
1688 }
1689 printf("\n");
1690 printf(" Dirty State : %s\n", (dev->vol.dirty & RAIDVOL_DIRTY) ?
1691 "dirty" : "clean");
1692 printf(" RWH Policy : ");
1693 if (dev->rwh_policy == RWH_OFF || dev->rwh_policy == RWH_MULTIPLE_OFF)
1694 printf("off\n");
1695 else if (dev->rwh_policy == RWH_DISTRIBUTED)
1696 printf("PPL distributed\n");
1697 else if (dev->rwh_policy == RWH_JOURNALING_DRIVE)
1698 printf("PPL journaling drive\n");
1699 else if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
1700 printf("Multiple distributed PPLs\n");
1701 else if (dev->rwh_policy == RWH_MULTIPLE_PPLS_JOURNALING_DRIVE)
1702 printf("Multiple PPLs on journaling drive\n");
1703 else if (dev->rwh_policy == RWH_BITMAP)
1704 printf("Write-intent bitmap\n");
1705 else
1706 printf("<unknown:%d>\n", dev->rwh_policy);
1707
1708 printf(" Volume ID : %u\n", dev->my_vol_raid_dev_num);
1709 }
1710
1711 static void print_imsm_disk(struct imsm_disk *disk,
1712 int index,
1713 __u32 reserved,
1714 unsigned int sector_size) {
1715 char str[MAX_RAID_SERIAL_LEN + 1];
1716 __u64 sz;
1717
1718 if (index < -1 || !disk)
1719 return;
1720
1721 printf("\n");
1722 snprintf(str, MAX_RAID_SERIAL_LEN + 1, "%s", disk->serial);
1723 if (index >= 0)
1724 printf(" Disk%02d Serial : %s\n", index, str);
1725 else
1726 printf(" Disk Serial : %s\n", str);
1727 printf(" State :%s%s%s%s\n", is_spare(disk) ? " spare" : "",
1728 is_configured(disk) ? " active" : "",
1729 is_failed(disk) ? " failed" : "",
1730 is_journal(disk) ? " journal" : "");
1731 printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id));
1732 sz = total_blocks(disk) - reserved;
1733 printf(" Usable Size : %llu%s\n",
1734 (unsigned long long)sz * 512 / sector_size,
1735 human_size(sz * 512));
1736 }
1737
1738 void convert_to_4k_imsm_migr_rec(struct intel_super *super)
1739 {
1740 struct migr_record *migr_rec = super->migr_rec;
1741
1742 migr_rec->blocks_per_unit /= IMSM_4K_DIV;
1743 migr_rec->dest_depth_per_unit /= IMSM_4K_DIV;
1744 split_ull((join_u32(migr_rec->post_migr_vol_cap,
1745 migr_rec->post_migr_vol_cap_hi) / IMSM_4K_DIV),
1746 &migr_rec->post_migr_vol_cap, &migr_rec->post_migr_vol_cap_hi);
1747 set_migr_chkp_area_pba(migr_rec,
1748 migr_chkp_area_pba(migr_rec) / IMSM_4K_DIV);
1749 set_migr_dest_1st_member_lba(migr_rec,
1750 migr_dest_1st_member_lba(migr_rec) / IMSM_4K_DIV);
1751 }
1752
1753 void convert_to_4k_imsm_disk(struct imsm_disk *disk)
1754 {
1755 set_total_blocks(disk, (total_blocks(disk)/IMSM_4K_DIV));
1756 }
1757
1758 void convert_to_4k(struct intel_super *super)
1759 {
1760 struct imsm_super *mpb = super->anchor;
1761 struct imsm_disk *disk;
1762 int i;
1763 __u32 bbm_log_size = __le32_to_cpu(mpb->bbm_log_size);
1764
1765 for (i = 0; i < mpb->num_disks ; i++) {
1766 disk = __get_imsm_disk(mpb, i);
1767 /* disk */
1768 convert_to_4k_imsm_disk(disk);
1769 }
1770 for (i = 0; i < mpb->num_raid_devs; i++) {
1771 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1772 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1773 /* dev */
1774 set_imsm_dev_size(dev, imsm_dev_size(dev)/IMSM_4K_DIV);
1775 dev->vol.curr_migr_unit /= IMSM_4K_DIV;
1776
1777 /* map0 */
1778 set_blocks_per_member(map, blocks_per_member(map)/IMSM_4K_DIV);
1779 map->blocks_per_strip /= IMSM_4K_DIV;
1780 set_pba_of_lba0(map, pba_of_lba0(map)/IMSM_4K_DIV);
1781
1782 if (dev->vol.migr_state) {
1783 /* map1 */
1784 map = get_imsm_map(dev, MAP_1);
1785 set_blocks_per_member(map,
1786 blocks_per_member(map)/IMSM_4K_DIV);
1787 map->blocks_per_strip /= IMSM_4K_DIV;
1788 set_pba_of_lba0(map, pba_of_lba0(map)/IMSM_4K_DIV);
1789 }
1790 }
1791 if (bbm_log_size) {
1792 struct bbm_log *log = (void *)mpb +
1793 __le32_to_cpu(mpb->mpb_size) - bbm_log_size;
1794 __u32 i;
1795
1796 for (i = 0; i < log->entry_count; i++) {
1797 struct bbm_log_entry *entry =
1798 &log->marked_block_entries[i];
1799
1800 __u8 count = entry->marked_count + 1;
1801 unsigned long long sector =
1802 __le48_to_cpu(&entry->defective_block_start);
1803
1804 entry->defective_block_start =
1805 __cpu_to_le48(sector/IMSM_4K_DIV);
1806 entry->marked_count = max(count/IMSM_4K_DIV, 1) - 1;
1807 }
1808 }
1809
1810 mpb->check_sum = __gen_imsm_checksum(mpb);
1811 }
1812
1813 void examine_migr_rec_imsm(struct intel_super *super)
1814 {
1815 struct migr_record *migr_rec = super->migr_rec;
1816 struct imsm_super *mpb = super->anchor;
1817 int i;
1818
1819 for (i = 0; i < mpb->num_raid_devs; i++) {
1820 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1821 struct imsm_map *map;
1822 int slot = -1;
1823
1824 if (is_gen_migration(dev) == 0)
1825 continue;
1826
1827 printf("\nMigration Record Information:");
1828
1829 /* first map under migration */
1830 map = get_imsm_map(dev, MAP_0);
1831 if (map)
1832 slot = get_imsm_disk_slot(map, super->disks->index);
1833 if (map == NULL || slot > 1 || slot < 0) {
1834 printf(" Empty\n ");
1835 printf("Examine one of first two disks in array\n");
1836 break;
1837 }
1838 printf("\n Status : ");
1839 if (__le32_to_cpu(migr_rec->rec_status) == UNIT_SRC_NORMAL)
1840 printf("Normal\n");
1841 else
1842 printf("Contains Data\n");
1843 printf(" Current Unit : %llu\n",
1844 current_migr_unit(migr_rec));
1845 printf(" Family : %u\n",
1846 __le32_to_cpu(migr_rec->family_num));
1847 printf(" Ascending : %u\n",
1848 __le32_to_cpu(migr_rec->ascending_migr));
1849 printf(" Blocks Per Unit : %u\n",
1850 __le32_to_cpu(migr_rec->blocks_per_unit));
1851 printf(" Dest. Depth Per Unit : %u\n",
1852 __le32_to_cpu(migr_rec->dest_depth_per_unit));
1853 printf(" Checkpoint Area pba : %llu\n",
1854 migr_chkp_area_pba(migr_rec));
1855 printf(" First member lba : %llu\n",
1856 migr_dest_1st_member_lba(migr_rec));
1857 printf(" Total Number of Units : %llu\n",
1858 get_num_migr_units(migr_rec));
1859 printf(" Size of volume : %llu\n",
1860 join_u32(migr_rec->post_migr_vol_cap,
1861 migr_rec->post_migr_vol_cap_hi));
1862 printf(" Record was read from : %u\n",
1863 __le32_to_cpu(migr_rec->ckpt_read_disk_num));
1864
1865 break;
1866 }
1867 }
1868
1869 void convert_from_4k_imsm_migr_rec(struct intel_super *super)
1870 {
1871 struct migr_record *migr_rec = super->migr_rec;
1872
1873 migr_rec->blocks_per_unit *= IMSM_4K_DIV;
1874 migr_rec->dest_depth_per_unit *= IMSM_4K_DIV;
1875 split_ull((join_u32(migr_rec->post_migr_vol_cap,
1876 migr_rec->post_migr_vol_cap_hi) * IMSM_4K_DIV),
1877 &migr_rec->post_migr_vol_cap,
1878 &migr_rec->post_migr_vol_cap_hi);
1879 set_migr_chkp_area_pba(migr_rec,
1880 migr_chkp_area_pba(migr_rec) * IMSM_4K_DIV);
1881 set_migr_dest_1st_member_lba(migr_rec,
1882 migr_dest_1st_member_lba(migr_rec) * IMSM_4K_DIV);
1883 }
1884
1885 void convert_from_4k(struct intel_super *super)
1886 {
1887 struct imsm_super *mpb = super->anchor;
1888 struct imsm_disk *disk;
1889 int i;
1890 __u32 bbm_log_size = __le32_to_cpu(mpb->bbm_log_size);
1891
1892 for (i = 0; i < mpb->num_disks ; i++) {
1893 disk = __get_imsm_disk(mpb, i);
1894 /* disk */
1895 set_total_blocks(disk, (total_blocks(disk)*IMSM_4K_DIV));
1896 }
1897
1898 for (i = 0; i < mpb->num_raid_devs; i++) {
1899 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1900 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1901 /* dev */
1902 set_imsm_dev_size(dev, imsm_dev_size(dev)*IMSM_4K_DIV);
1903 dev->vol.curr_migr_unit *= IMSM_4K_DIV;
1904
1905 /* map0 */
1906 set_blocks_per_member(map, blocks_per_member(map)*IMSM_4K_DIV);
1907 map->blocks_per_strip *= IMSM_4K_DIV;
1908 set_pba_of_lba0(map, pba_of_lba0(map)*IMSM_4K_DIV);
1909
1910 if (dev->vol.migr_state) {
1911 /* map1 */
1912 map = get_imsm_map(dev, MAP_1);
1913 set_blocks_per_member(map,
1914 blocks_per_member(map)*IMSM_4K_DIV);
1915 map->blocks_per_strip *= IMSM_4K_DIV;
1916 set_pba_of_lba0(map, pba_of_lba0(map)*IMSM_4K_DIV);
1917 }
1918 }
1919 if (bbm_log_size) {
1920 struct bbm_log *log = (void *)mpb +
1921 __le32_to_cpu(mpb->mpb_size) - bbm_log_size;
1922 __u32 i;
1923
1924 for (i = 0; i < log->entry_count; i++) {
1925 struct bbm_log_entry *entry =
1926 &log->marked_block_entries[i];
1927
1928 __u8 count = entry->marked_count + 1;
1929 unsigned long long sector =
1930 __le48_to_cpu(&entry->defective_block_start);
1931
1932 entry->defective_block_start =
1933 __cpu_to_le48(sector*IMSM_4K_DIV);
1934 entry->marked_count = count*IMSM_4K_DIV - 1;
1935 }
1936 }
1937
1938 mpb->check_sum = __gen_imsm_checksum(mpb);
1939 }
1940
1941 /*******************************************************************************
1942 * function: imsm_check_attributes
1943 * Description: Function checks if features represented by attributes flags
1944 * are supported by mdadm.
1945 * Parameters:
1946 * attributes - Attributes read from metadata
1947 * Returns:
1948 * 0 - passed attributes contains unsupported features flags
1949 * 1 - all features are supported
1950 ******************************************************************************/
1951 static int imsm_check_attributes(__u32 attributes)
1952 {
1953 int ret_val = 1;
1954 __u32 not_supported = MPB_ATTRIB_SUPPORTED^0xffffffff;
1955
1956 not_supported &= ~MPB_ATTRIB_IGNORED;
1957
1958 not_supported &= attributes;
1959 if (not_supported) {
1960 pr_err("(IMSM): Unsupported attributes : %x\n",
1961 (unsigned)__le32_to_cpu(not_supported));
1962 if (not_supported & MPB_ATTRIB_CHECKSUM_VERIFY) {
1963 dprintf("\t\tMPB_ATTRIB_CHECKSUM_VERIFY \n");
1964 not_supported ^= MPB_ATTRIB_CHECKSUM_VERIFY;
1965 }
1966 if (not_supported & MPB_ATTRIB_2TB) {
1967 dprintf("\t\tMPB_ATTRIB_2TB\n");
1968 not_supported ^= MPB_ATTRIB_2TB;
1969 }
1970 if (not_supported & MPB_ATTRIB_RAID0) {
1971 dprintf("\t\tMPB_ATTRIB_RAID0\n");
1972 not_supported ^= MPB_ATTRIB_RAID0;
1973 }
1974 if (not_supported & MPB_ATTRIB_RAID1) {
1975 dprintf("\t\tMPB_ATTRIB_RAID1\n");
1976 not_supported ^= MPB_ATTRIB_RAID1;
1977 }
1978 if (not_supported & MPB_ATTRIB_RAID10) {
1979 dprintf("\t\tMPB_ATTRIB_RAID10\n");
1980 not_supported ^= MPB_ATTRIB_RAID10;
1981 }
1982 if (not_supported & MPB_ATTRIB_RAID1E) {
1983 dprintf("\t\tMPB_ATTRIB_RAID1E\n");
1984 not_supported ^= MPB_ATTRIB_RAID1E;
1985 }
1986 if (not_supported & MPB_ATTRIB_RAID5) {
1987 dprintf("\t\tMPB_ATTRIB_RAID5\n");
1988 not_supported ^= MPB_ATTRIB_RAID5;
1989 }
1990 if (not_supported & MPB_ATTRIB_RAIDCNG) {
1991 dprintf("\t\tMPB_ATTRIB_RAIDCNG\n");
1992 not_supported ^= MPB_ATTRIB_RAIDCNG;
1993 }
1994 if (not_supported & MPB_ATTRIB_BBM) {
1995 dprintf("\t\tMPB_ATTRIB_BBM\n");
1996 not_supported ^= MPB_ATTRIB_BBM;
1997 }
1998 if (not_supported & MPB_ATTRIB_CHECKSUM_VERIFY) {
1999 dprintf("\t\tMPB_ATTRIB_CHECKSUM_VERIFY (== MPB_ATTRIB_LEGACY)\n");
2000 not_supported ^= MPB_ATTRIB_CHECKSUM_VERIFY;
2001 }
2002 if (not_supported & MPB_ATTRIB_EXP_STRIPE_SIZE) {
2003 dprintf("\t\tMPB_ATTRIB_EXP_STRIP_SIZE\n");
2004 not_supported ^= MPB_ATTRIB_EXP_STRIPE_SIZE;
2005 }
2006 if (not_supported & MPB_ATTRIB_2TB_DISK) {
2007 dprintf("\t\tMPB_ATTRIB_2TB_DISK\n");
2008 not_supported ^= MPB_ATTRIB_2TB_DISK;
2009 }
2010 if (not_supported & MPB_ATTRIB_NEVER_USE2) {
2011 dprintf("\t\tMPB_ATTRIB_NEVER_USE2\n");
2012 not_supported ^= MPB_ATTRIB_NEVER_USE2;
2013 }
2014 if (not_supported & MPB_ATTRIB_NEVER_USE) {
2015 dprintf("\t\tMPB_ATTRIB_NEVER_USE\n");
2016 not_supported ^= MPB_ATTRIB_NEVER_USE;
2017 }
2018
2019 if (not_supported)
2020 dprintf("(IMSM): Unknown attributes : %x\n", not_supported);
2021
2022 ret_val = 0;
2023 }
2024
2025 return ret_val;
2026 }
2027
2028 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info, char *map);
2029
2030 static void examine_super_imsm(struct supertype *st, char *homehost)
2031 {
2032 struct intel_super *super = st->sb;
2033 struct imsm_super *mpb = super->anchor;
2034 char str[MAX_SIGNATURE_LENGTH];
2035 int i;
2036 struct mdinfo info;
2037 char nbuf[64];
2038 __u32 sum;
2039 __u32 reserved = imsm_reserved_sectors(super, super->disks);
2040 struct dl *dl;
2041 time_t creation_time;
2042
2043 strncpy(str, (char *)mpb->sig, MPB_SIG_LEN);
2044 str[MPB_SIG_LEN-1] = '\0';
2045 printf(" Magic : %s\n", str);
2046 printf(" Version : %s\n", get_imsm_version(mpb));
2047 printf(" Orig Family : %08x\n", __le32_to_cpu(mpb->orig_family_num));
2048 printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
2049 printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num));
2050 creation_time = __le64_to_cpu(mpb->creation_time);
2051 printf(" Creation Time : %.24s\n",
2052 creation_time ? ctime(&creation_time) : "Unknown");
2053 printf(" Attributes : ");
2054 if (imsm_check_attributes(mpb->attributes))
2055 printf("All supported\n");
2056 else
2057 printf("not supported\n");
2058 getinfo_super_imsm(st, &info, NULL);
2059 fname_from_uuid(st, &info, nbuf, ':');
2060 printf(" UUID : %s\n", nbuf + 5);
2061 sum = __le32_to_cpu(mpb->check_sum);
2062 printf(" Checksum : %08x %s\n", sum,
2063 __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect");
2064 printf(" MPB Sectors : %d\n", mpb_sectors(mpb, super->sector_size));
2065 printf(" Disks : %d\n", mpb->num_disks);
2066 printf(" RAID Devices : %d\n", mpb->num_raid_devs);
2067 print_imsm_disk(__get_imsm_disk(mpb, super->disks->index),
2068 super->disks->index, reserved, super->sector_size);
2069 if (get_imsm_bbm_log_size(super->bbm_log)) {
2070 struct bbm_log *log = super->bbm_log;
2071
2072 printf("\n");
2073 printf("Bad Block Management Log:\n");
2074 printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size));
2075 printf(" Signature : %x\n", __le32_to_cpu(log->signature));
2076 printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count));
2077 }
2078 for (i = 0; i < mpb->num_raid_devs; i++) {
2079 struct mdinfo info;
2080 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
2081
2082 super->current_vol = i;
2083 getinfo_super_imsm(st, &info, NULL);
2084 fname_from_uuid(st, &info, nbuf, ':');
2085 print_imsm_dev(super, dev, nbuf + 5, super->disks->index);
2086 }
2087 for (i = 0; i < mpb->num_disks; i++) {
2088 if (i == super->disks->index)
2089 continue;
2090 print_imsm_disk(__get_imsm_disk(mpb, i), i, reserved,
2091 super->sector_size);
2092 }
2093
2094 for (dl = super->disks; dl; dl = dl->next)
2095 if (dl->index == -1)
2096 print_imsm_disk(&dl->disk, -1, reserved,
2097 super->sector_size);
2098
2099 examine_migr_rec_imsm(super);
2100 }
2101
2102 static void brief_examine_super_imsm(struct supertype *st, int verbose)
2103 {
2104 /* We just write a generic IMSM ARRAY entry */
2105 struct mdinfo info;
2106 char nbuf[64];
2107 struct intel_super *super = st->sb;
2108
2109 if (!super->anchor->num_raid_devs) {
2110 printf("ARRAY metadata=imsm\n");
2111 return;
2112 }
2113
2114 getinfo_super_imsm(st, &info, NULL);
2115 fname_from_uuid(st, &info, nbuf, ':');
2116 printf("ARRAY metadata=imsm UUID=%s\n", nbuf + 5);
2117 }
2118
2119 static void brief_examine_subarrays_imsm(struct supertype *st, int verbose)
2120 {
2121 /* We just write a generic IMSM ARRAY entry */
2122 struct mdinfo info;
2123 char nbuf[64];
2124 char nbuf1[64];
2125 struct intel_super *super = st->sb;
2126 int i;
2127
2128 if (!super->anchor->num_raid_devs)
2129 return;
2130
2131 getinfo_super_imsm(st, &info, NULL);
2132 fname_from_uuid(st, &info, nbuf, ':');
2133 for (i = 0; i < super->anchor->num_raid_devs; i++) {
2134 struct imsm_dev *dev = get_imsm_dev(super, i);
2135
2136 super->current_vol = i;
2137 getinfo_super_imsm(st, &info, NULL);
2138 fname_from_uuid(st, &info, nbuf1, ':');
2139 printf("ARRAY /dev/md/%.16s container=%s member=%d UUID=%s\n",
2140 dev->volume, nbuf + 5, i, nbuf1 + 5);
2141 }
2142 }
2143
2144 static void export_examine_super_imsm(struct supertype *st)
2145 {
2146 struct intel_super *super = st->sb;
2147 struct imsm_super *mpb = super->anchor;
2148 struct mdinfo info;
2149 char nbuf[64];
2150
2151 getinfo_super_imsm(st, &info, NULL);
2152 fname_from_uuid(st, &info, nbuf, ':');
2153 printf("MD_METADATA=imsm\n");
2154 printf("MD_LEVEL=container\n");
2155 printf("MD_UUID=%s\n", nbuf+5);
2156 printf("MD_DEVICES=%u\n", mpb->num_disks);
2157 printf("MD_CREATION_TIME=%llu\n", __le64_to_cpu(mpb->creation_time));
2158 }
2159
2160 static void detail_super_imsm(struct supertype *st, char *homehost,
2161 char *subarray)
2162 {
2163 struct mdinfo info;
2164 char nbuf[64];
2165 struct intel_super *super = st->sb;
2166 int temp_vol = super->current_vol;
2167
2168 if (subarray)
2169 super->current_vol = strtoul(subarray, NULL, 10);
2170
2171 getinfo_super_imsm(st, &info, NULL);
2172 fname_from_uuid(st, &info, nbuf, ':');
2173 printf("\n UUID : %s\n", nbuf + 5);
2174
2175 super->current_vol = temp_vol;
2176 }
2177
2178 static void brief_detail_super_imsm(struct supertype *st, char *subarray)
2179 {
2180 struct mdinfo info;
2181 char nbuf[64];
2182 struct intel_super *super = st->sb;
2183 int temp_vol = super->current_vol;
2184
2185 if (subarray)
2186 super->current_vol = strtoul(subarray, NULL, 10);
2187
2188 getinfo_super_imsm(st, &info, NULL);
2189 fname_from_uuid(st, &info, nbuf, ':');
2190 printf(" UUID=%s", nbuf + 5);
2191
2192 super->current_vol = temp_vol;
2193 }
2194
2195 static int imsm_read_serial(int fd, char *devname, __u8 *serial,
2196 size_t serial_buf_len);
2197 static void fd2devname(int fd, char *name);
2198
2199 static int ahci_enumerate_ports(const char *hba_path, int port_count, int host_base, int verbose)
2200 {
2201 /* dump an unsorted list of devices attached to AHCI Intel storage
2202 * controller, as well as non-connected ports
2203 */
2204 int hba_len = strlen(hba_path) + 1;
2205 struct dirent *ent;
2206 DIR *dir;
2207 char *path = NULL;
2208 int err = 0;
2209 unsigned long port_mask = (1 << port_count) - 1;
2210
2211 if (port_count > (int)sizeof(port_mask) * 8) {
2212 if (verbose > 0)
2213 pr_err("port_count %d out of range\n", port_count);
2214 return 2;
2215 }
2216
2217 /* scroll through /sys/dev/block looking for devices attached to
2218 * this hba
2219 */
2220 dir = opendir("/sys/dev/block");
2221 if (!dir)
2222 return 1;
2223
2224 for (ent = readdir(dir); ent; ent = readdir(dir)) {
2225 int fd;
2226 char model[64];
2227 char vendor[64];
2228 char buf[1024];
2229 int major, minor;
2230 char *device;
2231 char *c;
2232 int port;
2233 int type;
2234
2235 if (sscanf(ent->d_name, "%d:%d", &major, &minor) != 2)
2236 continue;
2237 path = devt_to_devpath(makedev(major, minor));
2238 if (!path)
2239 continue;
2240 if (!path_attached_to_hba(path, hba_path)) {
2241 free(path);
2242 path = NULL;
2243 continue;
2244 }
2245
2246 /* retrieve the scsi device type */
2247 if (asprintf(&device, "/sys/dev/block/%d:%d/device/xxxxxxx", major, minor) < 0) {
2248 if (verbose > 0)
2249 pr_err("failed to allocate 'device'\n");
2250 err = 2;
2251 break;
2252 }
2253 sprintf(device, "/sys/dev/block/%d:%d/device/type", major, minor);
2254 if (load_sys(device, buf, sizeof(buf)) != 0) {
2255 if (verbose > 0)
2256 pr_err("failed to read device type for %s\n",
2257 path);
2258 err = 2;
2259 free(device);
2260 break;
2261 }
2262 type = strtoul(buf, NULL, 10);
2263
2264 /* if it's not a disk print the vendor and model */
2265 if (!(type == 0 || type == 7 || type == 14)) {
2266 vendor[0] = '\0';
2267 model[0] = '\0';
2268 sprintf(device, "/sys/dev/block/%d:%d/device/vendor", major, minor);
2269 if (load_sys(device, buf, sizeof(buf)) == 0) {
2270 strncpy(vendor, buf, sizeof(vendor));
2271 vendor[sizeof(vendor) - 1] = '\0';
2272 c = (char *) &vendor[sizeof(vendor) - 1];
2273 while (isspace(*c) || *c == '\0')
2274 *c-- = '\0';
2275
2276 }
2277 sprintf(device, "/sys/dev/block/%d:%d/device/model", major, minor);
2278 if (load_sys(device, buf, sizeof(buf)) == 0) {
2279 strncpy(model, buf, sizeof(model));
2280 model[sizeof(model) - 1] = '\0';
2281 c = (char *) &model[sizeof(model) - 1];
2282 while (isspace(*c) || *c == '\0')
2283 *c-- = '\0';
2284 }
2285
2286 if (vendor[0] && model[0])
2287 sprintf(buf, "%.64s %.64s", vendor, model);
2288 else
2289 switch (type) { /* numbers from hald/linux/device.c */
2290 case 1: sprintf(buf, "tape"); break;
2291 case 2: sprintf(buf, "printer"); break;
2292 case 3: sprintf(buf, "processor"); break;
2293 case 4:
2294 case 5: sprintf(buf, "cdrom"); break;
2295 case 6: sprintf(buf, "scanner"); break;
2296 case 8: sprintf(buf, "media_changer"); break;
2297 case 9: sprintf(buf, "comm"); break;
2298 case 12: sprintf(buf, "raid"); break;
2299 default: sprintf(buf, "unknown");
2300 }
2301 } else
2302 buf[0] = '\0';
2303 free(device);
2304
2305 /* chop device path to 'host%d' and calculate the port number */
2306 c = strchr(&path[hba_len], '/');
2307 if (!c) {
2308 if (verbose > 0)
2309 pr_err("%s - invalid path name\n", path + hba_len);
2310 err = 2;
2311 break;
2312 }
2313 *c = '\0';
2314 if ((sscanf(&path[hba_len], "ata%d", &port) == 1) ||
2315 ((sscanf(&path[hba_len], "host%d", &port) == 1)))
2316 port -= host_base;
2317 else {
2318 if (verbose > 0) {
2319 *c = '/'; /* repair the full string */
2320 pr_err("failed to determine port number for %s\n",
2321 path);
2322 }
2323 err = 2;
2324 break;
2325 }
2326
2327 /* mark this port as used */
2328 port_mask &= ~(1 << port);
2329
2330 /* print out the device information */
2331 if (buf[0]) {
2332 printf(" Port%d : - non-disk device (%s) -\n", port, buf);
2333 continue;
2334 }
2335
2336 fd = dev_open(ent->d_name, O_RDONLY);
2337 if (fd < 0)
2338 printf(" Port%d : - disk info unavailable -\n", port);
2339 else {
2340 fd2devname(fd, buf);
2341 printf(" Port%d : %s", port, buf);
2342 if (imsm_read_serial(fd, NULL, (__u8 *)buf,
2343 sizeof(buf)) == 0)
2344 printf(" (%s)\n", buf);
2345 else
2346 printf(" ()\n");
2347 close(fd);
2348 }
2349 free(path);
2350 path = NULL;
2351 }
2352 if (path)
2353 free(path);
2354 if (dir)
2355 closedir(dir);
2356 if (err == 0) {
2357 int i;
2358
2359 for (i = 0; i < port_count; i++)
2360 if (port_mask & (1 << i))
2361 printf(" Port%d : - no device attached -\n", i);
2362 }
2363
2364 return err;
2365 }
2366
2367 static int print_nvme_info(struct sys_dev *hba)
2368 {
2369 char buf[1024];
2370 struct dirent *ent;
2371 DIR *dir;
2372 char *rp;
2373 int fd;
2374
2375 dir = opendir("/sys/block/");
2376 if (!dir)
2377 return 1;
2378
2379 for (ent = readdir(dir); ent; ent = readdir(dir)) {
2380 if (strstr(ent->d_name, "nvme")) {
2381 sprintf(buf, "/sys/block/%s", ent->d_name);
2382 rp = realpath(buf, NULL);
2383 if (!rp)
2384 continue;
2385 if (path_attached_to_hba(rp, hba->path)) {
2386 fd = open_dev(ent->d_name);
2387 if (!imsm_is_nvme_supported(fd, 0)) {
2388 if (fd >= 0)
2389 close(fd);
2390 free(rp);
2391 continue;
2392 }
2393
2394 fd2devname(fd, buf);
2395 if (hba->type == SYS_DEV_VMD)
2396 printf(" NVMe under VMD : %s", buf);
2397 else if (hba->type == SYS_DEV_NVME)
2398 printf(" NVMe Device : %s", buf);
2399 if (!imsm_read_serial(fd, NULL, (__u8 *)buf,
2400 sizeof(buf)))
2401 printf(" (%s)\n", buf);
2402 else
2403 printf("()\n");
2404 close(fd);
2405 }
2406 free(rp);
2407 }
2408 }
2409
2410 closedir(dir);
2411 return 0;
2412 }
2413
2414 static void print_found_intel_controllers(struct sys_dev *elem)
2415 {
2416 for (; elem; elem = elem->next) {
2417 pr_err("found Intel(R) ");
2418 if (elem->type == SYS_DEV_SATA)
2419 fprintf(stderr, "SATA ");
2420 else if (elem->type == SYS_DEV_SAS)
2421 fprintf(stderr, "SAS ");
2422 else if (elem->type == SYS_DEV_NVME)
2423 fprintf(stderr, "NVMe ");
2424
2425 if (elem->type == SYS_DEV_VMD)
2426 fprintf(stderr, "VMD domain");
2427 else
2428 fprintf(stderr, "RAID controller");
2429
2430 if (elem->pci_id)
2431 fprintf(stderr, " at %s", elem->pci_id);
2432 fprintf(stderr, ".\n");
2433 }
2434 fflush(stderr);
2435 }
2436
2437 static int ahci_get_port_count(const char *hba_path, int *port_count)
2438 {
2439 struct dirent *ent;
2440 DIR *dir;
2441 int host_base = -1;
2442
2443 *port_count = 0;
2444 if ((dir = opendir(hba_path)) == NULL)
2445 return -1;
2446
2447 for (ent = readdir(dir); ent; ent = readdir(dir)) {
2448 int host;
2449
2450 if ((sscanf(ent->d_name, "ata%d", &host) != 1) &&
2451 ((sscanf(ent->d_name, "host%d", &host) != 1)))
2452 continue;
2453 if (*port_count == 0)
2454 host_base = host;
2455 else if (host < host_base)
2456 host_base = host;
2457
2458 if (host + 1 > *port_count + host_base)
2459 *port_count = host + 1 - host_base;
2460 }
2461 closedir(dir);
2462 return host_base;
2463 }
2464
2465 static void print_imsm_capability(const struct imsm_orom *orom)
2466 {
2467 printf(" Platform : Intel(R) ");
2468 if (orom->capabilities == 0 && orom->driver_features == 0)
2469 printf("Matrix Storage Manager\n");
2470 else if (imsm_orom_is_enterprise(orom) && orom->major_ver >= 6)
2471 printf("Virtual RAID on CPU\n");
2472 else
2473 printf("Rapid Storage Technology%s\n",
2474 imsm_orom_is_enterprise(orom) ? " enterprise" : "");
2475 if (orom->major_ver || orom->minor_ver || orom->hotfix_ver || orom->build)
2476 printf(" Version : %d.%d.%d.%d\n", orom->major_ver,
2477 orom->minor_ver, orom->hotfix_ver, orom->build);
2478 printf(" RAID Levels :%s%s%s%s%s\n",
2479 imsm_orom_has_raid0(orom) ? " raid0" : "",
2480 imsm_orom_has_raid1(orom) ? " raid1" : "",
2481 imsm_orom_has_raid1e(orom) ? " raid1e" : "",
2482 imsm_orom_has_raid10(orom) ? " raid10" : "",
2483 imsm_orom_has_raid5(orom) ? " raid5" : "");
2484 printf(" Chunk Sizes :%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2485 imsm_orom_has_chunk(orom, 2) ? " 2k" : "",
2486 imsm_orom_has_chunk(orom, 4) ? " 4k" : "",
2487 imsm_orom_has_chunk(orom, 8) ? " 8k" : "",
2488 imsm_orom_has_chunk(orom, 16) ? " 16k" : "",
2489 imsm_orom_has_chunk(orom, 32) ? " 32k" : "",
2490 imsm_orom_has_chunk(orom, 64) ? " 64k" : "",
2491 imsm_orom_has_chunk(orom, 128) ? " 128k" : "",
2492 imsm_orom_has_chunk(orom, 256) ? " 256k" : "",
2493 imsm_orom_has_chunk(orom, 512) ? " 512k" : "",
2494 imsm_orom_has_chunk(orom, 1024*1) ? " 1M" : "",
2495 imsm_orom_has_chunk(orom, 1024*2) ? " 2M" : "",
2496 imsm_orom_has_chunk(orom, 1024*4) ? " 4M" : "",
2497 imsm_orom_has_chunk(orom, 1024*8) ? " 8M" : "",
2498 imsm_orom_has_chunk(orom, 1024*16) ? " 16M" : "",
2499 imsm_orom_has_chunk(orom, 1024*32) ? " 32M" : "",
2500 imsm_orom_has_chunk(orom, 1024*64) ? " 64M" : "");
2501 printf(" 2TB volumes :%s supported\n",
2502 (orom->attr & IMSM_OROM_ATTR_2TB)?"":" not");
2503 printf(" 2TB disks :%s supported\n",
2504 (orom->attr & IMSM_OROM_ATTR_2TB_DISK)?"":" not");
2505 printf(" Max Disks : %d\n", orom->tds);
2506 printf(" Max Volumes : %d per array, %d per %s\n",
2507 orom->vpa, orom->vphba,
2508 imsm_orom_is_nvme(orom) ? "platform" : "controller");
2509 return;
2510 }
2511
2512 static void print_imsm_capability_export(const struct imsm_orom *orom)
2513 {
2514 printf("MD_FIRMWARE_TYPE=imsm\n");
2515 if (orom->major_ver || orom->minor_ver || orom->hotfix_ver || orom->build)
2516 printf("IMSM_VERSION=%d.%d.%d.%d\n", orom->major_ver, orom->minor_ver,
2517 orom->hotfix_ver, orom->build);
2518 printf("IMSM_SUPPORTED_RAID_LEVELS=%s%s%s%s%s\n",
2519 imsm_orom_has_raid0(orom) ? "raid0 " : "",
2520 imsm_orom_has_raid1(orom) ? "raid1 " : "",
2521 imsm_orom_has_raid1e(orom) ? "raid1e " : "",
2522 imsm_orom_has_raid5(orom) ? "raid10 " : "",
2523 imsm_orom_has_raid10(orom) ? "raid5 " : "");
2524 printf("IMSM_SUPPORTED_CHUNK_SIZES=%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2525 imsm_orom_has_chunk(orom, 2) ? "2k " : "",
2526 imsm_orom_has_chunk(orom, 4) ? "4k " : "",
2527 imsm_orom_has_chunk(orom, 8) ? "8k " : "",
2528 imsm_orom_has_chunk(orom, 16) ? "16k " : "",
2529 imsm_orom_has_chunk(orom, 32) ? "32k " : "",
2530 imsm_orom_has_chunk(orom, 64) ? "64k " : "",
2531 imsm_orom_has_chunk(orom, 128) ? "128k " : "",
2532 imsm_orom_has_chunk(orom, 256) ? "256k " : "",
2533 imsm_orom_has_chunk(orom, 512) ? "512k " : "",
2534 imsm_orom_has_chunk(orom, 1024*1) ? "1M " : "",
2535 imsm_orom_has_chunk(orom, 1024*2) ? "2M " : "",
2536 imsm_orom_has_chunk(orom, 1024*4) ? "4M " : "",
2537 imsm_orom_has_chunk(orom, 1024*8) ? "8M " : "",
2538 imsm_orom_has_chunk(orom, 1024*16) ? "16M " : "",
2539 imsm_orom_has_chunk(orom, 1024*32) ? "32M " : "",
2540 imsm_orom_has_chunk(orom, 1024*64) ? "64M " : "");
2541 printf("IMSM_2TB_VOLUMES=%s\n",(orom->attr & IMSM_OROM_ATTR_2TB) ? "yes" : "no");
2542 printf("IMSM_2TB_DISKS=%s\n",(orom->attr & IMSM_OROM_ATTR_2TB_DISK) ? "yes" : "no");
2543 printf("IMSM_MAX_DISKS=%d\n",orom->tds);
2544 printf("IMSM_MAX_VOLUMES_PER_ARRAY=%d\n",orom->vpa);
2545 printf("IMSM_MAX_VOLUMES_PER_CONTROLLER=%d\n",orom->vphba);
2546 }
2547
2548 static int detail_platform_imsm(int verbose, int enumerate_only, char *controller_path)
2549 {
2550 /* There are two components to imsm platform support, the ahci SATA
2551 * controller and the option-rom. To find the SATA controller we
2552 * simply look in /sys/bus/pci/drivers/ahci to see if an ahci
2553 * controller with the Intel vendor id is present. This approach
2554 * allows mdadm to leverage the kernel's ahci detection logic, with the
2555 * caveat that if ahci.ko is not loaded mdadm will not be able to
2556 * detect platform raid capabilities. The option-rom resides in a
2557 * platform "Adapter ROM". We scan for its signature to retrieve the
2558 * platform capabilities. If raid support is disabled in the BIOS the
2559 * option-rom capability structure will not be available.
2560 */
2561 struct sys_dev *list, *hba;
2562 int host_base = 0;
2563 int port_count = 0;
2564 int result=1;
2565
2566 if (enumerate_only) {
2567 if (check_env("IMSM_NO_PLATFORM"))
2568 return 0;
2569 list = find_intel_devices();
2570 if (!list)
2571 return 2;
2572 for (hba = list; hba; hba = hba->next) {
2573 if (find_imsm_capability(hba)) {
2574 result = 0;
2575 break;
2576 }
2577 else
2578 result = 2;
2579 }
2580 return result;
2581 }
2582
2583 list = find_intel_devices();
2584 if (!list) {
2585 if (verbose > 0)
2586 pr_err("no active Intel(R) RAID controller found.\n");
2587 return 2;
2588 } else if (verbose > 0)
2589 print_found_intel_controllers(list);
2590
2591 for (hba = list; hba; hba = hba->next) {
2592 if (controller_path && (compare_paths(hba->path, controller_path) != 0))
2593 continue;
2594 if (!find_imsm_capability(hba)) {
2595 char buf[PATH_MAX];
2596 pr_err("imsm capabilities not found for controller: %s (type %s)\n",
2597 hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path,
2598 get_sys_dev_type(hba->type));
2599 continue;
2600 }
2601 result = 0;
2602 }
2603
2604 if (controller_path && result == 1) {
2605 pr_err("no active Intel(R) RAID controller found under %s\n",
2606 controller_path);
2607 return result;
2608 }
2609
2610 const struct orom_entry *entry;
2611
2612 for (entry = orom_entries; entry; entry = entry->next) {
2613 if (entry->type == SYS_DEV_VMD) {
2614 print_imsm_capability(&entry->orom);
2615 printf(" 3rd party NVMe :%s supported\n",
2616 imsm_orom_has_tpv_support(&entry->orom)?"":" not");
2617 for (hba = list; hba; hba = hba->next) {
2618 if (hba->type == SYS_DEV_VMD) {
2619 char buf[PATH_MAX];
2620 printf(" I/O Controller : %s (%s)\n",
2621 vmd_domain_to_controller(hba, buf), get_sys_dev_type(hba->type));
2622 if (print_nvme_info(hba)) {
2623 if (verbose > 0)
2624 pr_err("failed to get devices attached to VMD domain.\n");
2625 result |= 2;
2626 }
2627 }
2628 }
2629 printf("\n");
2630 continue;
2631 }
2632
2633 print_imsm_capability(&entry->orom);
2634 if (entry->type == SYS_DEV_NVME) {
2635 for (hba = list; hba; hba = hba->next) {
2636 if (hba->type == SYS_DEV_NVME)
2637 print_nvme_info(hba);
2638 }
2639 printf("\n");
2640 continue;
2641 }
2642
2643 struct devid_list *devid;
2644 for (devid = entry->devid_list; devid; devid = devid->next) {
2645 hba = device_by_id(devid->devid);
2646 if (!hba)
2647 continue;
2648
2649 printf(" I/O Controller : %s (%s)\n",
2650 hba->path, get_sys_dev_type(hba->type));
2651 if (hba->type == SYS_DEV_SATA) {
2652 host_base = ahci_get_port_count(hba->path, &port_count);
2653 if (ahci_enumerate_ports(hba->path, port_count, host_base, verbose)) {
2654 if (verbose > 0)
2655 pr_err("failed to enumerate ports on SATA controller at %s.\n", hba->pci_id);
2656 result |= 2;
2657 }
2658 }
2659 }
2660 printf("\n");
2661 }
2662
2663 return result;
2664 }
2665
2666 static int export_detail_platform_imsm(int verbose, char *controller_path)
2667 {
2668 struct sys_dev *list, *hba;
2669 int result=1;
2670
2671 list = find_intel_devices();
2672 if (!list) {
2673 if (verbose > 0)
2674 pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_INTEL_DEVICES\n");
2675 result = 2;
2676 return result;
2677 }
2678
2679 for (hba = list; hba; hba = hba->next) {
2680 if (controller_path && (compare_paths(hba->path,controller_path) != 0))
2681 continue;
2682 if (!find_imsm_capability(hba) && verbose > 0) {
2683 char buf[PATH_MAX];
2684 pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_IMSM_CAPABLE_DEVICE_UNDER_%s\n",
2685 hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path);
2686 }
2687 else
2688 result = 0;
2689 }
2690
2691 const struct orom_entry *entry;
2692
2693 for (entry = orom_entries; entry; entry = entry->next) {
2694 if (entry->type == SYS_DEV_VMD) {
2695 for (hba = list; hba; hba = hba->next)
2696 print_imsm_capability_export(&entry->orom);
2697 continue;
2698 }
2699 print_imsm_capability_export(&entry->orom);
2700 }
2701
2702 return result;
2703 }
2704
2705 static int match_home_imsm(struct supertype *st, char *homehost)
2706 {
2707 /* the imsm metadata format does not specify any host
2708 * identification information. We return -1 since we can never
2709 * confirm nor deny whether a given array is "meant" for this
2710 * host. We rely on compare_super and the 'family_num' fields to
2711 * exclude member disks that do not belong, and we rely on
2712 * mdadm.conf to specify the arrays that should be assembled.
2713 * Auto-assembly may still pick up "foreign" arrays.
2714 */
2715
2716 return -1;
2717 }
2718
2719 static void uuid_from_super_imsm(struct supertype *st, int uuid[4])
2720 {
2721 /* The uuid returned here is used for:
2722 * uuid to put into bitmap file (Create, Grow)
2723 * uuid for backup header when saving critical section (Grow)
2724 * comparing uuids when re-adding a device into an array
2725 * In these cases the uuid required is that of the data-array,
2726 * not the device-set.
2727 * uuid to recognise same set when adding a missing device back
2728 * to an array. This is a uuid for the device-set.
2729 *
2730 * For each of these we can make do with a truncated
2731 * or hashed uuid rather than the original, as long as
2732 * everyone agrees.
2733 * In each case the uuid required is that of the data-array,
2734 * not the device-set.
2735 */
2736 /* imsm does not track uuid's so we synthesis one using sha1 on
2737 * - The signature (Which is constant for all imsm array, but no matter)
2738 * - the orig_family_num of the container
2739 * - the index number of the volume
2740 * - the 'serial' number of the volume.
2741 * Hopefully these are all constant.
2742 */
2743 struct intel_super *super = st->sb;
2744
2745 char buf[20];
2746 struct sha1_ctx ctx;
2747 struct imsm_dev *dev = NULL;
2748 __u32 family_num;
2749
2750 /* some mdadm versions failed to set ->orig_family_num, in which
2751 * case fall back to ->family_num. orig_family_num will be
2752 * fixed up with the first metadata update.
2753 */
2754 family_num = super->anchor->orig_family_num;
2755 if (family_num == 0)
2756 family_num = super->anchor->family_num;
2757 sha1_init_ctx(&ctx);
2758 sha1_process_bytes(super->anchor->sig, MPB_SIG_LEN, &ctx);
2759 sha1_process_bytes(&family_num, sizeof(__u32), &ctx);
2760 if (super->current_vol >= 0)
2761 dev = get_imsm_dev(super, super->current_vol);
2762 if (dev) {
2763 __u32 vol = super->current_vol;
2764 sha1_process_bytes(&vol, sizeof(vol), &ctx);
2765 sha1_process_bytes(dev->volume, MAX_RAID_SERIAL_LEN, &ctx);
2766 }
2767 sha1_finish_ctx(&ctx, buf);
2768 memcpy(uuid, buf, 4*4);
2769 }
2770
2771 #if 0
2772 static void
2773 get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p)
2774 {
2775 __u8 *v = get_imsm_version(mpb);
2776 __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH;
2777 char major[] = { 0, 0, 0 };
2778 char minor[] = { 0 ,0, 0 };
2779 char patch[] = { 0, 0, 0 };
2780 char *ver_parse[] = { major, minor, patch };
2781 int i, j;
2782
2783 i = j = 0;
2784 while (*v != '\0' && v < end) {
2785 if (*v != '.' && j < 2)
2786 ver_parse[i][j++] = *v;
2787 else {
2788 i++;
2789 j = 0;
2790 }
2791 v++;
2792 }
2793
2794 *m = strtol(minor, NULL, 0);
2795 *p = strtol(patch, NULL, 0);
2796 }
2797 #endif
2798
2799 static __u32 migr_strip_blocks_resync(struct imsm_dev *dev)
2800 {
2801 /* migr_strip_size when repairing or initializing parity */
2802 struct imsm_map *map = get_imsm_map(dev, MAP_0);
2803 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2804
2805 switch (get_imsm_raid_level(map)) {
2806 case 5:
2807 case 10:
2808 return chunk;
2809 default:
2810 return 128*1024 >> 9;
2811 }
2812 }
2813
2814 static __u32 migr_strip_blocks_rebuild(struct imsm_dev *dev)
2815 {
2816 /* migr_strip_size when rebuilding a degraded disk, no idea why
2817 * this is different than migr_strip_size_resync(), but it's good
2818 * to be compatible
2819 */
2820 struct imsm_map *map = get_imsm_map(dev, MAP_1);
2821 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2822
2823 switch (get_imsm_raid_level(map)) {
2824 case 1:
2825 case 10:
2826 if (map->num_members % map->num_domains == 0)
2827 return 128*1024 >> 9;
2828 else
2829 return chunk;
2830 case 5:
2831 return max((__u32) 64*1024 >> 9, chunk);
2832 default:
2833 return 128*1024 >> 9;
2834 }
2835 }
2836
2837 static __u32 num_stripes_per_unit_resync(struct imsm_dev *dev)
2838 {
2839 struct imsm_map *lo = get_imsm_map(dev, MAP_0);
2840 struct imsm_map *hi = get_imsm_map(dev, MAP_1);
2841 __u32 lo_chunk = __le32_to_cpu(lo->blocks_per_strip);
2842 __u32 hi_chunk = __le32_to_cpu(hi->blocks_per_strip);
2843
2844 return max((__u32) 1, hi_chunk / lo_chunk);
2845 }
2846
2847 static __u32 num_stripes_per_unit_rebuild(struct imsm_dev *dev)
2848 {
2849 struct imsm_map *lo = get_imsm_map(dev, MAP_0);
2850 int level = get_imsm_raid_level(lo);
2851
2852 if (level == 1 || level == 10) {
2853 struct imsm_map *hi = get_imsm_map(dev, MAP_1);
2854
2855 return hi->num_domains;
2856 } else
2857 return num_stripes_per_unit_resync(dev);
2858 }
2859
2860 static __u8 imsm_num_data_members(struct imsm_map *map)
2861 {
2862 /* named 'imsm_' because raid0, raid1 and raid10
2863 * counter-intuitively have the same number of data disks
2864 */
2865 switch (get_imsm_raid_level(map)) {
2866 case 0:
2867 return map->num_members;
2868 break;
2869 case 1:
2870 case 10:
2871 return map->num_members/2;
2872 case 5:
2873 return map->num_members - 1;
2874 default:
2875 dprintf("unsupported raid level\n");
2876 return 0;
2877 }
2878 }
2879
2880 static unsigned long long calc_component_size(struct imsm_map *map,
2881 struct imsm_dev *dev)
2882 {
2883 unsigned long long component_size;
2884 unsigned long long dev_size = imsm_dev_size(dev);
2885 long long calc_dev_size = 0;
2886 unsigned int member_disks = imsm_num_data_members(map);
2887
2888 if (member_disks == 0)
2889 return 0;
2890
2891 component_size = per_dev_array_size(map);
2892 calc_dev_size = component_size * member_disks;
2893
2894 /* Component size is rounded to 1MB so difference between size from
2895 * metadata and size calculated from num_data_stripes equals up to
2896 * 2048 blocks per each device. If the difference is higher it means
2897 * that array size was expanded and num_data_stripes was not updated.
2898 */
2899 if (llabs(calc_dev_size - (long long)dev_size) >
2900 (1 << SECT_PER_MB_SHIFT) * member_disks) {
2901 component_size = dev_size / member_disks;
2902 dprintf("Invalid num_data_stripes in metadata; expected=%llu, found=%llu\n",
2903 component_size / map->blocks_per_strip,
2904 num_data_stripes(map));
2905 }
2906
2907 return component_size;
2908 }
2909
2910 static __u32 parity_segment_depth(struct imsm_dev *dev)
2911 {
2912 struct imsm_map *map = get_imsm_map(dev, MAP_0);
2913 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2914
2915 switch(get_imsm_raid_level(map)) {
2916 case 1:
2917 case 10:
2918 return chunk * map->num_domains;
2919 case 5:
2920 return chunk * map->num_members;
2921 default:
2922 return chunk;
2923 }
2924 }
2925
2926 static __u32 map_migr_block(struct imsm_dev *dev, __u32 block)
2927 {
2928 struct imsm_map *map = get_imsm_map(dev, MAP_1);
2929 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2930 __u32 strip = block / chunk;
2931
2932 switch (get_imsm_raid_level(map)) {
2933 case 1:
2934 case 10: {
2935 __u32 vol_strip = (strip * map->num_domains) + 1;
2936 __u32 vol_stripe = vol_strip / map->num_members;
2937
2938 return vol_stripe * chunk + block % chunk;
2939 } case 5: {
2940 __u32 stripe = strip / (map->num_members - 1);
2941
2942 return stripe * chunk + block % chunk;
2943 }
2944 default:
2945 return 0;
2946 }
2947 }
2948
2949 static __u64 blocks_per_migr_unit(struct intel_super *super,
2950 struct imsm_dev *dev)
2951 {
2952 /* calculate the conversion factor between per member 'blocks'
2953 * (md/{resync,rebuild}_start) and imsm migration units, return
2954 * 0 for the 'not migrating' and 'unsupported migration' cases
2955 */
2956 if (!dev->vol.migr_state)
2957 return 0;
2958
2959 switch (migr_type(dev)) {
2960 case MIGR_GEN_MIGR: {
2961 struct migr_record *migr_rec = super->migr_rec;
2962 return __le32_to_cpu(migr_rec->blocks_per_unit);
2963 }
2964 case MIGR_VERIFY:
2965 case MIGR_REPAIR:
2966 case MIGR_INIT: {
2967 struct imsm_map *map = get_imsm_map(dev, MAP_0);
2968 __u32 stripes_per_unit;
2969 __u32 blocks_per_unit;
2970 __u32 parity_depth;
2971 __u32 migr_chunk;
2972 __u32 block_map;
2973 __u32 block_rel;
2974 __u32 segment;
2975 __u32 stripe;
2976 __u8 disks;
2977
2978 /* yes, this is really the translation of migr_units to
2979 * per-member blocks in the 'resync' case
2980 */
2981 stripes_per_unit = num_stripes_per_unit_resync(dev);
2982 migr_chunk = migr_strip_blocks_resync(dev);
2983 disks = imsm_num_data_members(map);
2984 blocks_per_unit = stripes_per_unit * migr_chunk * disks;
2985 stripe = __le16_to_cpu(map->blocks_per_strip) * disks;
2986 segment = blocks_per_unit / stripe;
2987 block_rel = blocks_per_unit - segment * stripe;
2988 parity_depth = parity_segment_depth(dev);
2989 block_map = map_migr_block(dev, block_rel);
2990 return block_map + parity_depth * segment;
2991 }
2992 case MIGR_REBUILD: {
2993 __u32 stripes_per_unit;
2994 __u32 migr_chunk;
2995
2996 stripes_per_unit = num_stripes_per_unit_rebuild(dev);
2997 migr_chunk = migr_strip_blocks_rebuild(dev);
2998 return migr_chunk * stripes_per_unit;
2999 }
3000 case MIGR_STATE_CHANGE:
3001 default:
3002 return 0;
3003 }
3004 }
3005
3006 static int imsm_level_to_layout(int level)
3007 {
3008 switch (level) {
3009 case 0:
3010 case 1:
3011 return 0;
3012 case 5:
3013 case 6:
3014 return ALGORITHM_LEFT_ASYMMETRIC;
3015 case 10:
3016 return 0x102;
3017 }
3018 return UnSet;
3019 }
3020
3021 /*******************************************************************************
3022 * Function: read_imsm_migr_rec
3023 * Description: Function reads imsm migration record from last sector of disk
3024 * Parameters:
3025 * fd : disk descriptor
3026 * super : metadata info
3027 * Returns:
3028 * 0 : success,
3029 * -1 : fail
3030 ******************************************************************************/
3031 static int read_imsm_migr_rec(int fd, struct intel_super *super)
3032 {
3033 int ret_val = -1;
3034 unsigned int sector_size = super->sector_size;
3035 unsigned long long dsize;
3036
3037 get_dev_size(fd, NULL, &dsize);
3038 if (lseek64(fd, dsize - (sector_size*MIGR_REC_SECTOR_POSITION),
3039 SEEK_SET) < 0) {
3040 pr_err("Cannot seek to anchor block: %s\n",
3041 strerror(errno));
3042 goto out;
3043 }
3044 if ((unsigned int)read(fd, super->migr_rec_buf,
3045 MIGR_REC_BUF_SECTORS*sector_size) !=
3046 MIGR_REC_BUF_SECTORS*sector_size) {
3047 pr_err("Cannot read migr record block: %s\n",
3048 strerror(errno));
3049 goto out;
3050 }
3051 ret_val = 0;
3052 if (sector_size == 4096)
3053 convert_from_4k_imsm_migr_rec(super);
3054
3055 out:
3056 return ret_val;
3057 }
3058
3059 static struct imsm_dev *imsm_get_device_during_migration(
3060 struct intel_super *super)
3061 {
3062
3063 struct intel_dev *dv;
3064
3065 for (dv = super->devlist; dv; dv = dv->next) {
3066 if (is_gen_migration(dv->dev))
3067 return dv->dev;
3068 }
3069 return NULL;
3070 }
3071
3072 /*******************************************************************************
3073 * Function: load_imsm_migr_rec
3074 * Description: Function reads imsm migration record (it is stored at the last
3075 * sector of disk)
3076 * Parameters:
3077 * super : imsm internal array info
3078 * Returns:
3079 * 0 : success
3080 * -1 : fail
3081 * -2 : no migration in progress
3082 ******************************************************************************/
3083 static int load_imsm_migr_rec(struct intel_super *super)
3084 {
3085 struct dl *dl;
3086 char nm[30];
3087 int retval = -1;
3088 int fd = -1;
3089 struct imsm_dev *dev;
3090 struct imsm_map *map;
3091 int slot = -1;
3092 int keep_fd = 1;
3093
3094 /* find map under migration */
3095 dev = imsm_get_device_during_migration(super);
3096 /* nothing to load,no migration in progress?
3097 */
3098 if (dev == NULL)
3099 return -2;
3100
3101 map = get_imsm_map(dev, MAP_0);
3102 if (!map)
3103 return -1;
3104
3105 for (dl = super->disks; dl; dl = dl->next) {
3106 /* skip spare and failed disks
3107 */
3108 if (dl->index < 0)
3109 continue;
3110 /* read only from one of the first two slots
3111 */
3112 slot = get_imsm_disk_slot(map, dl->index);
3113 if (slot > 1 || slot < 0)
3114 continue;
3115
3116 if (dl->fd < 0) {
3117 sprintf(nm, "%d:%d", dl->major, dl->minor);
3118 fd = dev_open(nm, O_RDONLY);
3119 if (fd >= 0) {
3120 keep_fd = 0;
3121 break;
3122 }
3123 } else {
3124 fd = dl->fd;
3125 break;
3126 }
3127 }
3128
3129 if (fd < 0)
3130 return retval;
3131 retval = read_imsm_migr_rec(fd, super);
3132 if (!keep_fd)
3133 close(fd);
3134
3135 return retval;
3136 }
3137
3138 /*******************************************************************************
3139 * function: imsm_create_metadata_checkpoint_update
3140 * Description: It creates update for checkpoint change.
3141 * Parameters:
3142 * super : imsm internal array info
3143 * u : pointer to prepared update
3144 * Returns:
3145 * Uptate length.
3146 * If length is equal to 0, input pointer u contains no update
3147 ******************************************************************************/
3148 static int imsm_create_metadata_checkpoint_update(
3149 struct intel_super *super,
3150 struct imsm_update_general_migration_checkpoint **u)
3151 {
3152
3153 int update_memory_size = 0;
3154
3155 dprintf("(enter)\n");
3156
3157 if (u == NULL)
3158 return 0;
3159 *u = NULL;
3160
3161 /* size of all update data without anchor */
3162 update_memory_size =
3163 sizeof(struct imsm_update_general_migration_checkpoint);
3164
3165 *u = xcalloc(1, update_memory_size);
3166 if (*u == NULL) {
3167 dprintf("error: cannot get memory\n");
3168 return 0;
3169 }
3170 (*u)->type = update_general_migration_checkpoint;
3171 (*u)->curr_migr_unit = current_migr_unit(super->migr_rec);
3172 dprintf("prepared for %u\n", (*u)->curr_migr_unit);
3173
3174 return update_memory_size;
3175 }
3176
3177 static void imsm_update_metadata_locally(struct supertype *st,
3178 void *buf, int len);
3179
3180 /*******************************************************************************
3181 * Function: write_imsm_migr_rec
3182 * Description: Function writes imsm migration record
3183 * (at the last sector of disk)
3184 * Parameters:
3185 * super : imsm internal array info
3186 * Returns:
3187 * 0 : success
3188 * -1 : if fail
3189 ******************************************************************************/
3190 static int write_imsm_migr_rec(struct supertype *st)
3191 {
3192 struct intel_super *super = st->sb;
3193 unsigned int sector_size = super->sector_size;
3194 unsigned long long dsize;
3195 int retval = -1;
3196 struct dl *sd;
3197 int len;
3198 struct imsm_update_general_migration_checkpoint *u;
3199 struct imsm_dev *dev;
3200 struct imsm_map *map;
3201
3202 /* find map under migration */
3203 dev = imsm_get_device_during_migration(super);
3204 /* if no migration, write buffer anyway to clear migr_record
3205 * on disk based on first available device
3206 */
3207 if (dev == NULL)
3208 dev = get_imsm_dev(super, super->current_vol < 0 ? 0 :
3209 super->current_vol);
3210
3211 map = get_imsm_map(dev, MAP_0);
3212
3213 if (sector_size == 4096)
3214 convert_to_4k_imsm_migr_rec(super);
3215 for (sd = super->disks ; sd ; sd = sd->next) {
3216 int slot = -1;
3217
3218 /* skip failed and spare devices */
3219 if (sd->index < 0)
3220 continue;
3221 /* write to 2 first slots only */
3222 if (map)
3223 slot = get_imsm_disk_slot(map, sd->index);
3224 if (map == NULL || slot > 1 || slot < 0)
3225 continue;
3226
3227 get_dev_size(sd->fd, NULL, &dsize);
3228 if (lseek64(sd->fd, dsize - (MIGR_REC_SECTOR_POSITION *
3229 sector_size),
3230 SEEK_SET) < 0) {
3231 pr_err("Cannot seek to anchor block: %s\n",
3232 strerror(errno));
3233 goto out;
3234 }
3235 if ((unsigned int)write(sd->fd, super->migr_rec_buf,
3236 MIGR_REC_BUF_SECTORS*sector_size) !=
3237 MIGR_REC_BUF_SECTORS*sector_size) {
3238 pr_err("Cannot write migr record block: %s\n",
3239 strerror(errno));
3240 goto out;
3241 }
3242 }
3243 if (sector_size == 4096)
3244 convert_from_4k_imsm_migr_rec(super);
3245 /* update checkpoint information in metadata */
3246 len = imsm_create_metadata_checkpoint_update(super, &u);
3247 if (len <= 0) {
3248 dprintf("imsm: Cannot prepare update\n");
3249 goto out;
3250 }
3251 /* update metadata locally */
3252 imsm_update_metadata_locally(st, u, len);
3253 /* and possibly remotely */
3254 if (st->update_tail) {
3255 append_metadata_update(st, u, len);
3256 /* during reshape we do all work inside metadata handler
3257 * manage_reshape(), so metadata update has to be triggered
3258 * insida it
3259 */
3260 flush_metadata_updates(st);
3261 st->update_tail = &st->updates;
3262 } else
3263 free(u);
3264
3265 retval = 0;
3266 out:
3267 return retval;
3268 }
3269
3270 /* spare/missing disks activations are not allowe when
3271 * array/container performs reshape operation, because
3272 * all arrays in container works on the same disks set
3273 */
3274 int imsm_reshape_blocks_arrays_changes(struct intel_super *super)
3275 {
3276 int rv = 0;
3277 struct intel_dev *i_dev;
3278 struct imsm_dev *dev;
3279
3280 /* check whole container
3281 */
3282 for (i_dev = super->devlist; i_dev; i_dev = i_dev->next) {
3283 dev = i_dev->dev;
3284 if (is_gen_migration(dev)) {
3285 /* No repair during any migration in container
3286 */
3287 rv = 1;
3288 break;
3289 }
3290 }
3291 return rv;
3292 }
3293 static unsigned long long imsm_component_size_alignment_check(int level,
3294 int chunk_size,
3295 unsigned int sector_size,
3296 unsigned long long component_size)
3297 {
3298 unsigned int component_size_alignment;
3299
3300 /* check component size alignment
3301 */
3302 component_size_alignment = component_size % (chunk_size/sector_size);
3303
3304 dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alignment = %u\n",
3305 level, chunk_size, component_size,
3306 component_size_alignment);
3307
3308 if (component_size_alignment && (level != 1) && (level != UnSet)) {
3309 dprintf("imsm: reported component size aligned from %llu ",
3310 component_size);
3311 component_size -= component_size_alignment;
3312 dprintf_cont("to %llu (%i).\n",
3313 component_size, component_size_alignment);
3314 }
3315
3316 return component_size;
3317 }
3318
3319 /*******************************************************************************
3320 * Function: get_bitmap_header_sector
3321 * Description: Returns the sector where the bitmap header is placed.
3322 * Parameters:
3323 * st : supertype information
3324 * dev_idx : index of the device with bitmap
3325 *
3326 * Returns:
3327 * The sector where the bitmap header is placed
3328 ******************************************************************************/
3329 static unsigned long long get_bitmap_header_sector(struct intel_super *super,
3330 int dev_idx)
3331 {
3332 struct imsm_dev *dev = get_imsm_dev(super, dev_idx);
3333 struct imsm_map *map = get_imsm_map(dev, MAP_0);
3334
3335 if (!super->sector_size) {
3336 dprintf("sector size is not set\n");
3337 return 0;
3338 }
3339
3340 return pba_of_lba0(map) + calc_component_size(map, dev) +
3341 (IMSM_BITMAP_HEADER_OFFSET / super->sector_size);
3342 }
3343
3344 /*******************************************************************************
3345 * Function: get_bitmap_sector
3346 * Description: Returns the sector where the bitmap is placed.
3347 * Parameters:
3348 * st : supertype information
3349 * dev_idx : index of the device with bitmap
3350 *
3351 * Returns:
3352 * The sector where the bitmap is placed
3353 ******************************************************************************/
3354 static unsigned long long get_bitmap_sector(struct intel_super *super,
3355 int dev_idx)
3356 {
3357 if (!super->sector_size) {
3358 dprintf("sector size is not set\n");
3359 return 0;
3360 }
3361
3362 return get_bitmap_header_sector(super, dev_idx) +
3363 (IMSM_BITMAP_HEADER_SIZE / super->sector_size);
3364 }
3365
3366 static unsigned long long get_ppl_sector(struct intel_super *super, int dev_idx)
3367 {
3368 struct imsm_dev *dev = get_imsm_dev(super, dev_idx);
3369 struct imsm_map *map = get_imsm_map(dev, MAP_0);
3370
3371 return pba_of_lba0(map) +
3372 (num_data_stripes(map) * map->blocks_per_strip);
3373 }
3374
3375 static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info, char *dmap)
3376 {
3377 struct intel_super *super = st->sb;
3378 struct migr_record *migr_rec = super->migr_rec;
3379 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
3380 struct imsm_map *map = get_imsm_map(dev, MAP_0);
3381 struct imsm_map *prev_map = get_imsm_map(dev, MAP_1);
3382 struct imsm_map *map_to_analyse = map;
3383 struct dl *dl;
3384 int map_disks = info->array.raid_disks;
3385
3386 memset(info, 0, sizeof(*info));
3387 if (prev_map)
3388 map_to_analyse = prev_map;
3389
3390 dl = super->current_disk;
3391
3392 info->container_member = super->current_vol;
3393 info->array.raid_disks = map->num_members;
3394 info->array.level = get_imsm_raid_level(map_to_analyse);
3395 info->array.layout = imsm_level_to_layout(info->array.level);
3396 info->array.md_minor = -1;
3397 info->array.ctime = 0;
3398 info->array.utime = 0;
3399 info->array.chunk_size =
3400 __le16_to_cpu(map_to_analyse->blocks_per_strip) << 9;
3401 info->array.state = !(dev->vol.dirty & RAIDVOL_DIRTY);
3402 info->custom_array_size = imsm_dev_size(dev);
3403 info->recovery_blocked = imsm_reshape_blocks_arrays_changes(st->sb);
3404
3405 if (is_gen_migration(dev)) {
3406 info->reshape_active = 1;
3407 info->new_level = get_imsm_raid_level(map);
3408 info->new_layout = imsm_level_to_layout(info->new_level);
3409 info->new_chunk = __le16_to_cpu(map->blocks_per_strip) << 9;
3410 info->delta_disks = map->num_members - prev_map->num_members;
3411 if (info->delta_disks) {
3412 /* this needs to be applied to every array
3413 * in the container.
3414 */
3415 info->reshape_active = CONTAINER_RESHAPE;
3416 }
3417 /* We shape information that we give to md might have to be
3418 * modify to cope with md's requirement for reshaping arrays.
3419 * For example, when reshaping a RAID0, md requires it to be
3420 * presented as a degraded RAID4.
3421 * Also if a RAID0 is migrating to a RAID5 we need to specify
3422 * the array as already being RAID5, but the 'before' layout
3423 * is a RAID4-like layout.
3424 */
3425 switch (info->array.level) {
3426 case 0:
3427 switch(info->new_level) {
3428 case 0:
3429 /* conversion is happening as RAID4 */
3430 info->array.level = 4;
3431 info->array.raid_disks += 1;
3432 break;
3433 case 5:
3434 /* conversion is happening as RAID5 */
3435 info->array.level = 5;
3436 info->array.layout = ALGORITHM_PARITY_N;
3437 info->delta_disks -= 1;
3438 break;
3439 default:
3440 /* FIXME error message */
3441 info->array.level = UnSet;
3442 break;
3443 }
3444 break;
3445 }
3446 } else {
3447 info->new_level = UnSet;
3448 info->new_layout = UnSet;
3449 info->new_chunk = info->array.chunk_size;
3450 info->delta_disks = 0;
3451 }
3452
3453 if (dl) {
3454 info->disk.major = dl->major;
3455 info->disk.minor = dl->minor;
3456 info->disk.number = dl->index;
3457 info->disk.raid_disk = get_imsm_disk_slot(map_to_analyse,
3458 dl->index);
3459 }
3460
3461 info->data_offset = pba_of_lba0(map_to_analyse);
3462 info->component_size = calc_component_size(map, dev);
3463 info->component_size = imsm_component_size_alignment_check(
3464 info->array.level,
3465 info->array.chunk_size,
3466 super->sector_size,
3467 info->component_size);
3468 info->bb.supported = 1;
3469
3470 memset(info->uuid, 0, sizeof(info->uuid));
3471 info->recovery_start = MaxSector;
3472
3473 if (info->array.level == 5 &&
3474 (dev->rwh_policy == RWH_DISTRIBUTED ||
3475 dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)) {
3476 info->consistency_policy = CONSISTENCY_POLICY_PPL;
3477 info->ppl_sector = get_ppl_sector(super, super->current_vol);
3478 if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
3479 info->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
3480 else
3481 info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE)
3482 >> 9;
3483 } else if (info->array.level <= 0) {
3484 info->consistency_policy = CONSISTENCY_POLICY_NONE;
3485 } else {
3486 if (dev->rwh_policy == RWH_BITMAP) {
3487 info->bitmap_offset = get_bitmap_sector(super, super->current_vol);
3488 info->consistency_policy = CONSISTENCY_POLICY_BITMAP;
3489 } else {
3490 info->consistency_policy = CONSISTENCY_POLICY_RESYNC;
3491 }
3492 }
3493
3494 info->reshape_progress = 0;
3495 info->resync_start = MaxSector;
3496 if ((map_to_analyse->map_state == IMSM_T_STATE_UNINITIALIZED ||
3497 !(info->array.state & 1)) &&
3498 imsm_reshape_blocks_arrays_changes(super) == 0) {
3499 info->resync_start = 0;
3500 }
3501 if (dev->vol.migr_state) {
3502 switch (migr_type(dev)) {
3503 case MIGR_REPAIR:
3504 case MIGR_INIT: {
3505 __u64 blocks_per_unit = blocks_per_migr_unit(super,
3506 dev);
3507 __u64 units = __le32_to_cpu(dev->vol.curr_migr_unit);
3508
3509 info->resync_start = blocks_per_unit * units;
3510 break;
3511 }
3512 case MIGR_GEN_MIGR: {
3513 __u64 blocks_per_unit = blocks_per_migr_unit(super,
3514 dev);
3515 __u64 units = current_migr_unit(migr_rec);
3516 int used_disks;
3517
3518 if (__le32_to_cpu(migr_rec->ascending_migr) &&
3519 (units <
3520 (get_num_migr_units(migr_rec)-1)) &&
3521 (super->migr_rec->rec_status ==
3522 __cpu_to_le32(UNIT_SRC_IN_CP_AREA)))
3523 units++;
3524
3525 info->reshape_progress = blocks_per_unit * units;
3526
3527 dprintf("IMSM: General Migration checkpoint : %llu (%llu) -> read reshape progress : %llu\n",
3528 (unsigned long long)units,
3529 (unsigned long long)blocks_per_unit,
3530 info->reshape_progress);
3531
3532 used_disks = imsm_num_data_members(prev_map);
3533 if (used_disks > 0) {
3534 info->custom_array_size = per_dev_array_size(map) *
3535 used_disks;
3536 }
3537 }
3538 case MIGR_VERIFY:
3539 /* we could emulate the checkpointing of
3540 * 'sync_action=check' migrations, but for now
3541 * we just immediately complete them
3542 */
3543 case MIGR_REBUILD:
3544 /* this is handled by container_content_imsm() */
3545 case MIGR_STATE_CHANGE:
3546 /* FIXME handle other migrations */
3547 default:
3548 /* we are not dirty, so... */
3549 info->resync_start = MaxSector;
3550 }
3551 }
3552
3553 strncpy(info->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
3554 info->name[MAX_RAID_SERIAL_LEN] = 0;
3555
3556 info->array.major_version = -1;
3557 info->array.minor_version = -2;
3558 sprintf(info->text_version, "/%s/%d", st->container_devnm, info->container_member);
3559 info->safe_mode_delay = 4000; /* 4 secs like the Matrix driver */
3560 uuid_from_super_imsm(st, info->uuid);
3561
3562 if (dmap) {
3563 int i, j;
3564 for (i=0; i<map_disks; i++) {
3565 dmap[i] = 0;
3566 if (i < info->array.raid_disks) {
3567 struct imsm_disk *dsk;
3568 j = get_imsm_disk_idx(dev, i, MAP_X);
3569 dsk = get_imsm_disk(super, j);
3570 if (dsk && (dsk->status & CONFIGURED_DISK))
3571 dmap[i] = 1;
3572 }
3573 }
3574 }
3575 }
3576
3577 static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev,
3578 int failed, int look_in_map);
3579
3580 static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev,
3581 int look_in_map);
3582
3583 static void manage_second_map(struct intel_super *super, struct imsm_dev *dev)
3584 {
3585 if (is_gen_migration(dev)) {
3586 int failed;
3587 __u8 map_state;
3588 struct imsm_map *map2 = get_imsm_map(dev, MAP_1);
3589
3590 failed = imsm_count_failed(super, dev, MAP_1);
3591 map_state = imsm_check_degraded(super, dev, failed, MAP_1);
3592 if (map2->map_state != map_state) {
3593 map2->map_state = map_state;
3594 super->updates_pending++;
3595 }
3596 }
3597 }
3598
3599 static struct imsm_disk *get_imsm_missing(struct intel_super *super, __u8 index)
3600 {
3601 struct dl *d;
3602
3603 for (d = super->missing; d; d = d->next)
3604 if (d->index == index)
3605 return &d->disk;
3606 return NULL;
3607 }
3608
3609 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info, char *map)
3610 {
3611 struct intel_super *super = st->sb;
3612 struct imsm_disk *disk;
3613 int map_disks = info->array.raid_disks;
3614 int max_enough = -1;
3615 int i;
3616 struct imsm_super *mpb;
3617
3618 if (super->current_vol >= 0) {
3619 getinfo_super_imsm_volume(st, info, map);
3620 return;
3621 }
3622 memset(info, 0, sizeof(*info));
3623
3624 /* Set raid_disks to zero so that Assemble will always pull in valid
3625 * spares
3626 */
3627 info->array.raid_disks = 0;
3628 info->array.level = LEVEL_CONTAINER;
3629 info->array.layout = 0;
3630 info->array.md_minor = -1;
3631 info->array.ctime = 0; /* N/A for imsm */
3632 info->array.utime = 0;
3633 info->array.chunk_size = 0;
3634
3635 info->disk.major = 0;
3636 info->disk.minor = 0;
3637 info->disk.raid_disk = -1;
3638 info->reshape_active = 0;
3639 info->array.major_version = -1;
3640 info->array.minor_version = -2;
3641 strcpy(info->text_version, "imsm");
3642 info->safe_mode_delay = 0;
3643 info->disk.number = -1;
3644 info->disk.state = 0;
3645 info->name[0] = 0;
3646 info->recovery_start = MaxSector;
3647 info->recovery_blocked = imsm_reshape_blocks_arrays_changes(st->sb);
3648 info->bb.supported = 1;
3649
3650 /* do we have the all the insync disks that we expect? */
3651 mpb = super->anchor;
3652 info->events = __le32_to_cpu(mpb->generation_num);
3653
3654 for (i = 0; i < mpb->num_raid_devs; i++) {
3655 struct imsm_dev *dev = get_imsm_dev(super, i);
3656 int failed, enough, j, missing = 0;
3657 struct imsm_map *map;
3658 __u8 state;
3659
3660 failed = imsm_count_failed(super, dev, MAP_0);
3661 state = imsm_check_degraded(super, dev, failed, MAP_0);
3662 map = get_imsm_map(dev, MAP_0);
3663
3664 /* any newly missing disks?
3665 * (catches single-degraded vs double-degraded)
3666 */
3667 for (j = 0; j < map->num_members; j++) {
3668 __u32 ord = get_imsm_ord_tbl_ent(dev, j, MAP_0);
3669 __u32 idx = ord_to_idx(ord);
3670
3671 if (super->disks && super->disks->index == (int)idx)
3672 info->disk.raid_disk = j;
3673
3674 if (!(ord & IMSM_ORD_REBUILD) &&
3675 get_imsm_missing(super, idx)) {
3676 missing = 1;
3677 break;
3678 }
3679 }
3680
3681 if (state == IMSM_T_STATE_FAILED)
3682 enough = -1;
3683 else if (state == IMSM_T_STATE_DEGRADED &&
3684 (state != map->map_state || missing))
3685 enough = 0;
3686 else /* we're normal, or already degraded */
3687 enough = 1;
3688 if (is_gen_migration(dev) && missing) {
3689 /* during general migration we need all disks
3690 * that process is running on.
3691 * No new missing disk is allowed.
3692 */
3693 max_enough = -1;
3694 enough = -1;
3695 /* no more checks necessary
3696 */
3697 break;
3698 }
3699 /* in the missing/failed disk case check to see
3700 * if at least one array is runnable
3701 */
3702 max_enough = max(max_enough, enough);
3703 }
3704 dprintf("enough: %d\n", max_enough);
3705 info->container_enough = max_enough;
3706
3707 if (super->disks) {
3708 __u32 reserved = imsm_reserved_sectors(super, super->disks);
3709
3710 disk = &super->disks->disk;
3711 info->data_offset = total_blocks(&super->disks->disk) - reserved;
3712 info->component_size = reserved;
3713 info->disk.state = is_configured(disk) ? (1 << MD_DISK_ACTIVE) : 0;
3714 /* we don't change info->disk.raid_disk here because
3715 * this state will be finalized in mdmon after we have
3716 * found the 'most fresh' version of the metadata
3717 */
3718 info->disk.state |= is_failed(disk) ? (1 << MD_DISK_FAULTY) : 0;
3719 info->disk.state |= (is_spare(disk) || is_journal(disk)) ?
3720 0 : (1 << MD_DISK_SYNC);
3721 }
3722
3723 /* only call uuid_from_super_imsm when this disk is part of a populated container,
3724 * ->compare_super may have updated the 'num_raid_devs' field for spares
3725 */
3726 if (info->disk.state & (1 << MD_DISK_SYNC) || super->anchor->num_raid_devs)
3727 uuid_from_super_imsm(st, info->uuid);
3728 else
3729 memcpy(info->uuid, uuid_zero, sizeof(uuid_zero));
3730
3731 /* I don't know how to compute 'map' on imsm, so use safe default */
3732 if (map) {
3733 int i;
3734 for (i = 0; i < map_disks; i++)
3735 map[i] = 1;
3736 }
3737
3738 }
3739
3740 /* allocates memory and fills disk in mdinfo structure
3741 * for each disk in array */
3742 struct mdinfo *getinfo_super_disks_imsm(struct supertype *st)
3743 {
3744 struct mdinfo *mddev;
3745 struct intel_super *super = st->sb;
3746 struct imsm_disk *disk;
3747 int count = 0;
3748 struct dl *dl;
3749 if (!super || !super->disks)
3750 return NULL;
3751 dl = super->disks;
3752 mddev = xcalloc(1, sizeof(*mddev));
3753 while (dl) {
3754 struct mdinfo *tmp;
3755 disk = &dl->disk;
3756 tmp = xcalloc(1, sizeof(*tmp));
3757 if (mddev->devs)
3758 tmp->next = mddev->devs;
3759 mddev->devs = tmp;
3760 tmp->disk.number = count++;
3761 tmp->disk.major = dl->major;
3762 tmp->disk.minor = dl->minor;
3763 tmp->disk.state = is_configured(disk) ?
3764 (1 << MD_DISK_ACTIVE) : 0;
3765 tmp->disk.state |= is_failed(disk) ? (1 << MD_DISK_FAULTY) : 0;
3766 tmp->disk.state |= is_spare(disk) ? 0 : (1 << MD_DISK_SYNC);
3767 tmp->disk.raid_disk = -1;
3768 dl = dl->next;
3769 }
3770 return mddev;
3771 }
3772
3773 static int update_super_imsm(struct supertype *st, struct mdinfo *info,
3774 char *update, char *devname, int verbose,
3775 int uuid_set, char *homehost)
3776 {
3777 /* For 'assemble' and 'force' we need to return non-zero if any
3778 * change was made. For others, the return value is ignored.
3779 * Update options are:
3780 * force-one : This device looks a bit old but needs to be included,
3781 * update age info appropriately.
3782 * assemble: clear any 'faulty' flag to allow this device to
3783 * be assembled.
3784 * force-array: Array is degraded but being forced, mark it clean
3785 * if that will be needed to assemble it.
3786 *
3787 * newdev: not used ????
3788 * grow: Array has gained a new device - this is currently for
3789 * linear only
3790 * resync: mark as dirty so a resync will happen.
3791 * name: update the name - preserving the homehost
3792 * uuid: Change the uuid of the array to match watch is given
3793 *
3794 * Following are not relevant for this imsm:
3795 * sparc2.2 : update from old dodgey metadata
3796 * super-minor: change the preferred_minor number
3797 * summaries: update redundant counters.
3798 * homehost: update the recorded homehost
3799 * _reshape_progress: record new reshape_progress position.
3800 */
3801 int rv = 1;
3802 struct intel_super *super = st->sb;
3803 struct imsm_super *mpb;
3804
3805 /* we can only update container info */
3806 if (!super || super->current_vol >= 0 || !super->anchor)
3807 return 1;
3808
3809 mpb = super->anchor;
3810
3811 if (strcmp(update, "uuid") == 0) {
3812 /* We take this to mean that the family_num should be updated.
3813 * However that is much smaller than the uuid so we cannot really
3814 * allow an explicit uuid to be given. And it is hard to reliably
3815 * know if one was.
3816 * So if !uuid_set we know the current uuid is random and just used
3817 * the first 'int' and copy it to the other 3 positions.
3818 * Otherwise we require the 4 'int's to be the same as would be the
3819 * case if we are using a random uuid. So an explicit uuid will be
3820 * accepted as long as all for ints are the same... which shouldn't hurt
3821 */
3822 if (!uuid_set) {
3823 info->uuid[1] = info->uuid[2] = info->uuid[3] = info->uuid[0];
3824 rv = 0;
3825 } else {
3826 if (info->uuid[0] != info->uuid[1] ||
3827 info->uuid[1] != info->uuid[2] ||
3828 info->uuid[2] != info->uuid[3])
3829 rv = -1;
3830 else
3831 rv = 0;
3832 }
3833 if (rv == 0)
3834 mpb->orig_family_num = info->uuid[0];
3835 } else if (strcmp(update, "assemble") == 0)
3836 rv = 0;
3837 else
3838 rv = -1;
3839
3840 /* successful update? recompute checksum */
3841 if (rv == 0)
3842 mpb->check_sum = __le32_to_cpu(__gen_imsm_checksum(mpb));
3843
3844 return rv;
3845 }
3846
3847 static size_t disks_to_mpb_size(int disks)
3848 {
3849 size_t size;
3850
3851 size = sizeof(struct imsm_super);
3852 size += (disks - 1) * sizeof(struct imsm_disk);
3853 size += 2 * sizeof(struct imsm_dev);
3854 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
3855 size += (4 - 2) * sizeof(struct imsm_map);
3856 /* 4 possible disk_ord_tbl's */
3857 size += 4 * (disks - 1) * sizeof(__u32);
3858 /* maximum bbm log */
3859 size += sizeof(struct bbm_log);
3860
3861 return size;
3862 }
3863
3864 static __u64 avail_size_imsm(struct supertype *st, __u64 devsize,
3865 unsigned long long data_offset)
3866 {
3867 if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS))
3868 return 0;
3869
3870 return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
3871 }
3872
3873 static void free_devlist(struct intel_super *super)
3874 {
3875 struct intel_dev *dv;
3876
3877 while (super->devlist) {
3878 dv = super->devlist->next;
3879 free(super->devlist->dev);
3880 free(super->devlist);
3881 super->devlist = dv;
3882 }
3883 }
3884
3885 static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src)
3886 {
3887 memcpy(dest, src, sizeof_imsm_dev(src, 0));
3888 }
3889
3890 static int compare_super_imsm(struct supertype *st, struct supertype *tst,
3891 int verbose)
3892 {
3893 /*
3894 * return:
3895 * 0 same, or first was empty, and second was copied
3896 * 1 second had wrong number
3897 * 2 wrong uuid
3898 * 3 wrong other info
3899 */
3900 struct intel_super *first = st->sb;
3901 struct intel_super *sec = tst->sb;
3902
3903 if (!first) {
3904 st->sb = tst->sb;
3905 tst->sb = NULL;
3906 return 0;
3907 }
3908 /* in platform dependent environment test if the disks
3909 * use the same Intel hba
3910 * If not on Intel hba at all, allow anything.
3911 */
3912 if (!check_env("IMSM_NO_PLATFORM") && first->hba && sec->hba) {
3913 if (first->hba->type != sec->hba->type) {
3914 if (verbose)
3915 pr_err("HBAs of devices do not match %s != %s\n",
3916 get_sys_dev_type(first->hba->type),
3917 get_sys_dev_type(sec->hba->type));
3918 return 3;
3919 }
3920
3921 if (first->orom != sec->orom) {
3922 if (verbose)
3923 pr_err("HBAs of devices do not match %s != %s\n",
3924 first->hba->pci_id, sec->hba->pci_id);
3925 return 3;
3926 }
3927
3928 }
3929
3930 /* if an anchor does not have num_raid_devs set then it is a free
3931 * floating spare
3932 */
3933 if (first->anchor->num_raid_devs > 0 &&
3934 sec->anchor->num_raid_devs > 0) {
3935 /* Determine if these disks might ever have been
3936 * related. Further disambiguation can only take place
3937 * in load_super_imsm_all
3938 */
3939 __u32 first_family = first->anchor->orig_family_num;
3940 __u32 sec_family = sec->anchor->orig_family_num;
3941
3942 if (memcmp(first->anchor->sig, sec->anchor->sig,
3943 MAX_SIGNATURE_LENGTH) != 0)
3944 return 3;
3945
3946 if (first_family == 0)
3947 first_family = first->anchor->family_num;
3948 if (sec_family == 0)
3949 sec_family = sec->anchor->family_num;
3950
3951 if (first_family != sec_family)
3952 return 3;
3953
3954 }
3955
3956 /* if 'first' is a spare promote it to a populated mpb with sec's
3957 * family number
3958 */
3959 if (first->anchor->num_raid_devs == 0 &&
3960 sec->anchor->num_raid_devs > 0) {
3961 int i;
3962 struct intel_dev *dv;
3963 struct imsm_dev *dev;
3964
3965 /* we need to copy raid device info from sec if an allocation
3966 * fails here we don't associate the spare
3967 */
3968 for (i = 0; i < sec->anchor->num_raid_devs; i++) {
3969 dv = xmalloc(sizeof(*dv));
3970 dev = xmalloc(sizeof_imsm_dev(get_imsm_dev(sec, i), 1));
3971 dv->dev = dev;
3972 dv->index = i;
3973 dv->next = first->devlist;
3974 first->devlist = dv;
3975 }
3976 if (i < sec->anchor->num_raid_devs) {
3977 /* allocation failure */
3978 free_devlist(first);
3979 pr_err("imsm: failed to associate spare\n");
3980 return 3;
3981 }
3982 first->anchor->num_raid_devs = sec->anchor->num_raid_devs;
3983 first->anchor->orig_family_num = sec->anchor->orig_family_num;
3984 first->anchor->family_num = sec->anchor->family_num;
3985 memcpy(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH);
3986 for (i = 0; i < sec->anchor->num_raid_devs; i++)
3987 imsm_copy_dev(get_imsm_dev(first, i), get_imsm_dev(sec, i));
3988 }
3989
3990 return 0;
3991 }
3992
3993 static void fd2devname(int fd, char *name)
3994 {
3995 struct stat st;
3996 char path[256];
3997 char dname[PATH_MAX];
3998 char *nm;
3999 int rv;
4000
4001 name[0] = '\0';
4002 if (fstat(fd, &st) != 0)
4003 return;
4004 sprintf(path, "/sys/dev/block/%d:%d",
4005 major(st.st_rdev), minor(st.st_rdev));
4006
4007 rv = readlink(path, dname, sizeof(dname)-1);
4008 if (rv <= 0)
4009 return;
4010
4011 dname[rv] = '\0';
4012 nm = strrchr(dname, '/');
4013 if (nm) {
4014 nm++;
4015 snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
4016 }
4017 }
4018
4019 static int nvme_get_serial(int fd, void *buf, size_t buf_len)
4020 {
4021 char path[60];
4022 char *name = fd2kname(fd);
4023
4024 if (!name)
4025 return 1;
4026
4027 if (strncmp(name, "nvme", 4) != 0)
4028 return 1;
4029
4030 snprintf(path, sizeof(path) - 1, "/sys/block/%s/device/serial", name);
4031
4032 return load_sys(path, buf, buf_len);
4033 }
4034
4035 extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
4036
4037 static int imsm_read_serial(int fd, char *devname,
4038 __u8 *serial, size_t serial_buf_len)
4039 {
4040 char buf[50];
4041 int rv;
4042 size_t len;
4043 char *dest;
4044 char *src;
4045 unsigned int i;
4046
4047 memset(buf, 0, sizeof(buf));
4048
4049 rv = nvme_get_serial(fd, buf, sizeof(buf));
4050
4051 if (rv)
4052 rv = scsi_get_serial(fd, buf, sizeof(buf));
4053
4054 if (rv && check_env("IMSM_DEVNAME_AS_SERIAL")) {
4055 memset(serial, 0, MAX_RAID_SERIAL_LEN);
4056 fd2devname(fd, (char *) serial);
4057 return 0;
4058 }
4059
4060 if (rv != 0) {
4061 if (devname)
4062 pr_err("Failed to retrieve serial for %s\n",
4063 devname);
4064 return rv;
4065 }
4066
4067 /* trim all whitespace and non-printable characters and convert
4068 * ':' to ';'
4069 */
4070 for (i = 0, dest = buf; i < sizeof(buf) && buf[i]; i++) {
4071 src = &buf[i];
4072 if (*src > 0x20) {
4073 /* ':' is reserved for use in placeholder serial
4074 * numbers for missing disks
4075 */
4076 if (*src == ':')
4077 *dest++ = ';';
4078 else
4079 *dest++ = *src;
4080 }
4081 }
4082 len = dest - buf;
4083 dest = buf;
4084
4085 if (len > serial_buf_len) {
4086 /* truncate leading characters */
4087 dest += len - serial_buf_len;
4088 len = serial_buf_len;
4089 }
4090
4091 memset(serial, 0, serial_buf_len);
4092 memcpy(serial, dest, len);
4093
4094 return 0;
4095 }
4096
4097 static int serialcmp(__u8 *s1, __u8 *s2)
4098 {
4099 return strncmp((char *) s1, (char *) s2, MAX_RAID_SERIAL_LEN);
4100 }
4101
4102 static void serialcpy(__u8 *dest, __u8 *src)
4103 {
4104 strncpy((char *) dest, (char *) src, MAX_RAID_SERIAL_LEN);
4105 }
4106
4107 static struct dl *serial_to_dl(__u8 *serial, struct intel_super *super)
4108 {
4109 struct dl *dl;
4110
4111 for (dl = super->disks; dl; dl = dl->next)
4112 if (serialcmp(dl->serial, serial) == 0)
4113 break;
4114
4115 return dl;
4116 }
4117
4118 static struct imsm_disk *
4119 __serial_to_disk(__u8 *serial, struct imsm_super *mpb, int *idx)
4120 {
4121 int i;
4122
4123 for (i = 0; i < mpb->num_disks; i++) {
4124 struct imsm_disk *disk = __get_imsm_disk(mpb, i);
4125
4126 if (serialcmp(disk->serial, serial) == 0) {
4127 if (idx)
4128 *idx = i;
4129 return disk;
4130 }
4131 }
4132
4133 return NULL;
4134 }
4135
4136 static int
4137 load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
4138 {
4139 struct imsm_disk *disk;
4140 struct dl *dl;
4141 struct stat stb;
4142 int rv;
4143 char name[40];
4144 __u8 serial[MAX_RAID_SERIAL_LEN];
4145
4146 rv = imsm_read_serial(fd, devname, serial, MAX_RAID_SERIAL_LEN);
4147
4148 if (rv != 0)
4149 return 2;
4150
4151 dl = xcalloc(1, sizeof(*dl));
4152
4153 fstat(fd, &stb);
4154 dl->major = major(stb.st_rdev);
4155 dl->minor = minor(stb.st_rdev);
4156 dl->next = super->disks;
4157 dl->fd = keep_fd ? fd : -1;
4158 assert(super->disks == NULL);
4159 super->disks = dl;
4160 serialcpy(dl->serial, serial);
4161 dl->index = -2;
4162 dl->e = NULL;
4163 fd2devname(fd, name);
4164 if (devname)
4165 dl->devname = xstrdup(devname);
4166 else
4167 dl->devname = xstrdup(name);
4168
4169 /* look up this disk's index in the current anchor */
4170 disk = __serial_to_disk(dl->serial, super->anchor, &dl->index);
4171 if (disk) {
4172 dl->disk = *disk;
4173 /* only set index on disks that are a member of a
4174 * populated contianer, i.e. one with raid_devs
4175 */
4176 if (is_failed(&dl->disk))
4177 dl->index = -2;
4178 else if (is_spare(&dl->disk) || is_journal(&dl->disk))
4179 dl->index = -1;
4180 }
4181
4182 return 0;
4183 }
4184
4185 /* When migrating map0 contains the 'destination' state while map1
4186 * contains the current state. When not migrating map0 contains the
4187 * current state. This routine assumes that map[0].map_state is set to
4188 * the current array state before being called.
4189 *
4190 * Migration is indicated by one of the following states
4191 * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed)
4192 * 2/ Initialize (migr_state=1 migr_type=MIGR_INIT map0state=normal
4193 * map1state=unitialized)
4194 * 3/ Repair (Resync) (migr_state=1 migr_type=MIGR_REPAIR map0state=normal
4195 * map1state=normal)
4196 * 4/ Rebuild (migr_state=1 migr_type=MIGR_REBUILD map0state=normal
4197 * map1state=degraded)
4198 * 5/ Migration (mig_state=1 migr_type=MIGR_GEN_MIGR map0state=normal
4199 * map1state=normal)
4200 */
4201 static void migrate(struct imsm_dev *dev, struct intel_super *super,
4202 __u8 to_state, int migr_type)
4203 {
4204 struct imsm_map *dest;
4205 struct imsm_map *src = get_imsm_map(dev, MAP_0);
4206
4207 dev->vol.migr_state = 1;
4208 set_migr_type(dev, migr_type);
4209 dev->vol.curr_migr_unit = 0;
4210 dest = get_imsm_map(dev, MAP_1);
4211
4212 /* duplicate and then set the target end state in map[0] */
4213 memcpy(dest, src, sizeof_imsm_map(src));
4214 if (migr_type == MIGR_GEN_MIGR) {
4215 __u32 ord;
4216 int i;
4217
4218 for (i = 0; i < src->num_members; i++) {
4219 ord = __le32_to_cpu(src->disk_ord_tbl[i]);
4220 set_imsm_ord_tbl_ent(src, i, ord_to_idx(ord));
4221 }
4222 }
4223
4224 if (migr_type == MIGR_GEN_MIGR)
4225 /* Clear migration record */
4226 memset(super->migr_rec, 0, sizeof(struct migr_record));
4227
4228 src->map_state = to_state;
4229 }
4230
4231 static void end_migration(struct imsm_dev *dev, struct intel_super *super,
4232 __u8 map_state)
4233 {
4234 struct imsm_map *map = get_imsm_map(dev, MAP_0);
4235 struct imsm_map *prev = get_imsm_map(dev, dev->vol.migr_state == 0 ?
4236 MAP_0 : MAP_1);
4237 int i, j;
4238
4239 /* merge any IMSM_ORD_REBUILD bits that were not successfully
4240 * completed in the last migration.
4241 *
4242 * FIXME add support for raid-level-migration
4243 */
4244 if (map_state != map->map_state && (is_gen_migration(dev) == 0) &&
4245 prev->map_state != IMSM_T_STATE_UNINITIALIZED) {
4246 /* when final map state is other than expected
4247 * merge maps (not for migration)
4248 */
4249 int failed;
4250
4251 for (i = 0; i < prev->num_members; i++)
4252 for (j = 0; j < map->num_members; j++)
4253 /* during online capacity expansion
4254 * disks position can be changed
4255 * if takeover is used
4256 */
4257 if (ord_to_idx(map->disk_ord_tbl[j]) ==
4258 ord_to_idx(prev->disk_ord_tbl[i])) {
4259 map->disk_ord_tbl[j] |=
4260 prev->disk_ord_tbl[i];
4261 break;
4262 }
4263 failed = imsm_count_failed(super, dev, MAP_0);
4264 map_state = imsm_check_degraded(super, dev, failed, MAP_0);
4265 }
4266
4267 dev->vol.migr_state = 0;
4268 set_migr_type(dev, 0);
4269 dev->vol.curr_migr_unit = 0;
4270 map->map_state = map_state;
4271 }
4272
4273 static int parse_raid_devices(struct intel_super *super)
4274 {
4275 int i;
4276 struct imsm_dev *dev_new;
4277 size_t len, len_migr;
4278 size_t max_len = 0;
4279 size_t space_needed = 0;
4280 struct imsm_super *mpb = super->anchor;
4281
4282 for (i = 0; i < super->anchor->num_raid_devs; i++) {
4283 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
4284 struct intel_dev *dv;
4285
4286 len = sizeof_imsm_dev(dev_iter, 0);
4287 len_migr = sizeof_imsm_dev(dev_iter, 1);
4288 if (len_migr > len)
4289 space_needed += len_migr - len;
4290
4291 dv = xmalloc(sizeof(*dv));
4292 if (max_len < len_migr)
4293 max_len = len_migr;
4294 if (max_len > len_migr)
4295 space_needed += max_len - len_migr;
4296 dev_new = xmalloc(max_len);
4297 imsm_copy_dev(dev_new, dev_iter);
4298 dv->dev = dev_new;
4299 dv->index = i;
4300 dv->next = super->devlist;
4301 super->devlist = dv;
4302 }
4303
4304 /* ensure that super->buf is large enough when all raid devices
4305 * are migrating
4306 */
4307 if (__le32_to_cpu(mpb->mpb_size) + space_needed > super->len) {
4308 void *buf;
4309
4310 len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + space_needed,
4311 super->sector_size);
4312 if (posix_memalign(&buf, MAX_SECTOR_SIZE, len) != 0)
4313 return 1;
4314
4315 memcpy(buf, super->buf, super->len);
4316 memset(buf + super->len, 0, len - super->len);
4317 free(super->buf);
4318 super->buf = buf;
4319 super->len = len;
4320 }
4321
4322 super->extra_space += space_needed;
4323
4324 return 0;
4325 }
4326
4327 /*******************************************************************************
4328 * Function: check_mpb_migr_compatibility
4329 * Description: Function checks for unsupported migration features:
4330 * - migration optimization area (pba_of_lba0)
4331 * - descending reshape (ascending_migr)
4332 * Parameters:
4333 * super : imsm metadata information
4334 * Returns:
4335 * 0 : migration is compatible
4336 * -1 : migration is not compatible
4337 ******************************************************************************/
4338 int check_mpb_migr_compatibility(struct intel_super *super)
4339 {
4340 struct imsm_map *map0, *map1;
4341 struct migr_record *migr_rec = super->migr_rec;
4342 int i;
4343
4344 for (i = 0; i < super->anchor->num_raid_devs; i++) {
4345 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
4346
4347 if (dev_iter &&
4348 dev_iter->vol.migr_state == 1 &&
4349 dev_iter->vol.migr_type == MIGR_GEN_MIGR) {
4350 /* This device is migrating */
4351 map0 = get_imsm_map(dev_iter, MAP_0);
4352 map1 = get_imsm_map(dev_iter, MAP_1);
4353 if (pba_of_lba0(map0) != pba_of_lba0(map1))
4354 /* migration optimization area was used */
4355 return -1;
4356 if (migr_rec->ascending_migr == 0 &&
4357 migr_rec->dest_depth_per_unit > 0)
4358 /* descending reshape not supported yet */
4359 return -1;
4360 }
4361 }
4362 return 0;
4363 }
4364
4365 static void __free_imsm(struct intel_super *super, int free_disks);
4366
4367 /* load_imsm_mpb - read matrix metadata
4368 * allocates super->mpb to be freed by free_imsm
4369 */
4370 static int load_imsm_mpb(int fd, struct intel_super *super, char *devname)
4371 {
4372 unsigned long long dsize;
4373 unsigned long long sectors;
4374 unsigned int sector_size = super->sector_size;
4375 struct stat;
4376 struct imsm_super *anchor;
4377 __u32 check_sum;
4378
4379 get_dev_size(fd, NULL, &dsize);
4380 if (dsize < 2*sector_size) {
4381 if (devname)
4382 pr_err("%s: device to small for imsm\n",
4383 devname);
4384 return 1;
4385 }
4386
4387 if (lseek64(fd, dsize - (sector_size * 2), SEEK_SET) < 0) {
4388 if (devname)
4389 pr_err("Cannot seek to anchor block on %s: %s\n",
4390 devname, strerror(errno));
4391 return 1;
4392 }
4393
4394 if (posix_memalign((void **)&anchor, sector_size, sector_size) != 0) {
4395 if (devname)
4396 pr_err("Failed to allocate imsm anchor buffer on %s\n", devname);
4397 return 1;
4398 }
4399 if ((unsigned int)read(fd, anchor, sector_size) != sector_size) {
4400 if (devname)
4401 pr_err("Cannot read anchor block on %s: %s\n",
4402 devname, strerror(errno));
4403 free(anchor);
4404 return 1;
4405 }
4406
4407 if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) {
4408 if (devname)
4409 pr_err("no IMSM anchor on %s\n", devname);
4410 free(anchor);
4411 return 2;
4412 }
4413
4414 __free_imsm(super, 0);
4415 /* reload capability and hba */
4416
4417 /* capability and hba must be updated with new super allocation */
4418 find_intel_hba_capability(fd, super, devname);
4419 super->len = ROUND_UP(anchor->mpb_size, sector_size);
4420 if (posix_memalign(&super->buf, MAX_SECTOR_SIZE, super->len) != 0) {
4421 if (devname)
4422 pr_err("unable to allocate %zu byte mpb buffer\n",
4423 super->len);
4424 free(anchor);
4425 return 2;
4426 }
4427 memcpy(super->buf, anchor, sector_size);
4428
4429 sectors = mpb_sectors(anchor, sector_size) - 1;
4430 free(anchor);
4431
4432 if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
4433 MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE) != 0) {
4434 pr_err("could not allocate migr_rec buffer\n");
4435 free(super->buf);
4436 return 2;
4437 }
4438 super->clean_migration_record_by_mdmon = 0;
4439
4440 if (!sectors) {
4441 check_sum = __gen_imsm_checksum(super->anchor);
4442 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
4443 if (devname)
4444 pr_err("IMSM checksum %x != %x on %s\n",
4445 check_sum,
4446 __le32_to_cpu(super->anchor->check_sum),
4447 devname);
4448 return 2;
4449 }
4450
4451 return 0;
4452 }
4453
4454 /* read the extended mpb */
4455 if (lseek64(fd, dsize - (sector_size * (2 + sectors)), SEEK_SET) < 0) {
4456 if (devname)
4457 pr_err("Cannot seek to extended mpb on %s: %s\n",
4458 devname, strerror(errno));
4459 return 1;
4460 }
4461
4462 if ((unsigned int)read(fd, super->buf + sector_size,
4463 super->len - sector_size) != super->len - sector_size) {
4464 if (devname)
4465 pr_err("Cannot read extended mpb on %s: %s\n",
4466 devname, strerror(errno));
4467 return 2;
4468 }
4469
4470 check_sum = __gen_imsm_checksum(super->anchor);
4471 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
4472 if (devname)
4473 pr_err("IMSM checksum %x != %x on %s\n",
4474 check_sum, __le32_to_cpu(super->anchor->check_sum),
4475 devname);
4476 return 3;
4477 }
4478
4479 return 0;
4480 }
4481
4482 static int read_imsm_migr_rec(int fd, struct intel_super *super);
4483
4484 /* clears hi bits in metadata if MPB_ATTRIB_2TB_DISK not set */
4485 static void clear_hi(struct intel_super *super)
4486 {
4487 struct imsm_super *mpb = super->anchor;
4488 int i, n;
4489 if (mpb->attributes & MPB_ATTRIB_2TB_DISK)
4490 return;
4491 for (i = 0; i < mpb->num_disks; ++i) {
4492 struct imsm_disk *disk = &mpb->disk[i];
4493 disk->total_blocks_hi = 0;
4494 }
4495 for (i = 0; i < mpb->num_raid_devs; ++i) {
4496 struct imsm_dev *dev = get_imsm_dev(super, i);
4497 if (!dev)
4498 return;
4499 for (n = 0; n < 2; ++n) {
4500 struct imsm_map *map = get_imsm_map(dev, n);
4501 if (!map)
4502 continue;
4503 map->pba_of_lba0_hi = 0;
4504 map->blocks_per_member_hi = 0;
4505 map->num_data_stripes_hi = 0;
4506 }
4507 }
4508 }
4509
4510 static int
4511 load_and_parse_mpb(int fd, struct intel_super *super, char *devname, int keep_fd)
4512 {
4513 int err;
4514
4515 err = load_imsm_mpb(fd, super, devname);
4516 if (err)
4517 return err;
4518 if (super->sector_size == 4096)
4519 convert_from_4k(super);
4520 err = load_imsm_disk(fd, super, devname, keep_fd);
4521 if (err)
4522 return err;
4523 err = parse_raid_devices(super);
4524 if (err)
4525 return err;
4526 err = load_bbm_log(super);
4527 clear_hi(super);
4528 return err;
4529 }
4530
4531 static void __free_imsm_disk(struct dl *d)
4532 {
4533 if (d->fd >= 0)
4534 close(d->fd);
4535 if (d->devname)
4536 free(d->devname);
4537 if (d->e)
4538 free(d->e);
4539 free(d);
4540
4541 }
4542
4543 static void free_imsm_disks(struct intel_super *super)
4544 {
4545 struct dl *d;
4546
4547 while (super->disks) {
4548 d = super->disks;
4549 super->disks = d->next;
4550 __free_imsm_disk(d);
4551 }
4552 while (super->disk_mgmt_list) {
4553 d = super->disk_mgmt_list;
4554 super->disk_mgmt_list = d->next;
4555 __free_imsm_disk(d);
4556 }
4557 while (super->missing) {
4558 d = super->missing;
4559 super->missing = d->next;
4560 __free_imsm_disk(d);
4561 }
4562
4563 }
4564
4565 /* free all the pieces hanging off of a super pointer */
4566 static void __free_imsm(struct intel_super *super, int free_disks)
4567 {
4568 struct intel_hba *elem, *next;
4569
4570 if (super->buf) {
4571 free(super->buf);
4572 super->buf = NULL;
4573 }
4574 /* unlink capability description */
4575 super->orom = NULL;
4576 if (super->migr_rec_buf) {
4577 free(super->migr_rec_buf);
4578 super->migr_rec_buf = NULL;
4579 }
4580 if (free_disks)
4581 free_imsm_disks(super);
4582 free_devlist(super);
4583 elem = super->hba;
4584 while (elem) {
4585 if (elem->path)
4586 free((void *)elem->path);
4587 next = elem->next;
4588 free(elem);
4589 elem = next;
4590 }
4591 if (super->bbm_log)
4592 free(super->bbm_log);
4593 super->hba = NULL;
4594 }
4595
4596 static void free_imsm(struct intel_super *super)
4597 {
4598 __free_imsm(super, 1);
4599 free(super->bb.entries);
4600 free(super);
4601 }
4602
4603 static void free_super_imsm(struct supertype *st)
4604 {
4605 struct intel_super *super = st->sb;
4606
4607 if (!super)
4608 return;
4609
4610 free_imsm(super);
4611 st->sb = NULL;
4612 }
4613
4614 static struct intel_super *alloc_super(void)
4615 {
4616 struct intel_super *super = xcalloc(1, sizeof(*super));
4617
4618 super->current_vol = -1;
4619 super->create_offset = ~((unsigned long long) 0);
4620
4621 super->bb.entries = xmalloc(BBM_LOG_MAX_ENTRIES *
4622 sizeof(struct md_bb_entry));
4623 if (!super->bb.entries) {
4624 free(super);
4625 return NULL;
4626 }
4627
4628 return super;
4629 }
4630
4631 /*
4632 * find and allocate hba and OROM/EFI based on valid fd of RAID component device
4633 */
4634 static int find_intel_hba_capability(int fd, struct intel_super *super, char *devname)
4635 {
4636 struct sys_dev *hba_name;
4637 int rv = 0;
4638
4639 if (fd >= 0 && test_partition(fd)) {
4640 pr_err("imsm: %s is a partition, cannot be used in IMSM\n",
4641 devname);
4642 return 1;
4643 }
4644 if (fd < 0 || check_env("IMSM_NO_PLATFORM")) {
4645 super->orom = NULL;
4646 super->hba = NULL;
4647 return 0;
4648 }
4649 hba_name = find_disk_attached_hba(fd, NULL);
4650 if (!hba_name) {
4651 if (devname)
4652 pr_err("%s is not attached to Intel(R) RAID controller.\n",
4653 devname);
4654 return 1;
4655 }
4656 rv = attach_hba_to_super(super, hba_name);
4657 if (rv == 2) {
4658 if (devname) {
4659 struct intel_hba *hba = super->hba;
4660
4661 pr_err("%s is attached to Intel(R) %s %s (%s),\n"
4662 " but the container is assigned to Intel(R) %s %s (",
4663 devname,
4664 get_sys_dev_type(hba_name->type),
4665 hba_name->type == SYS_DEV_VMD ? "domain" : "RAID controller",
4666 hba_name->pci_id ? : "Err!",
4667 get_sys_dev_type(super->hba->type),
4668 hba->type == SYS_DEV_VMD ? "domain" : "RAID controller");
4669
4670 while (hba) {
4671 fprintf(stderr, "%s", hba->pci_id ? : "Err!");
4672 if (hba->next)
4673 fprintf(stderr, ", ");
4674 hba = hba->next;
4675 }
4676 fprintf(stderr, ").\n"
4677 " Mixing devices attached to different controllers is not allowed.\n");
4678 }
4679 return 2;
4680 }
4681 super->orom = find_imsm_capability(hba_name);
4682 if (!super->orom)
4683 return 3;
4684
4685 return 0;
4686 }
4687
4688 /* find_missing - helper routine for load_super_imsm_all that identifies
4689 * disks that have disappeared from the system. This routine relies on
4690 * the mpb being uptodate, which it is at load time.
4691 */
4692 static int find_missing(struct intel_super *super)
4693 {
4694 int i;
4695 struct imsm_super *mpb = super->anchor;
4696 struct dl *dl;
4697 struct imsm_disk *disk;
4698
4699 for (i = 0; i < mpb->num_disks; i++) {
4700 disk = __get_imsm_disk(mpb, i);
4701 dl = serial_to_dl(disk->serial, super);
4702 if (dl)
4703 continue;
4704
4705 dl = xmalloc(sizeof(*dl));
4706 dl->major = 0;
4707 dl->minor = 0;
4708 dl->fd = -1;
4709 dl->devname = xstrdup("missing");
4710 dl->index = i;
4711 serialcpy(dl->serial, disk->serial);
4712 dl->disk = *disk;
4713 dl->e = NULL;
4714 dl->next = super->missing;
4715 super->missing = dl;
4716 }
4717
4718 return 0;
4719 }
4720
4721 static struct intel_disk *disk_list_get(__u8 *serial, struct intel_disk *disk_list)
4722 {
4723 struct intel_disk *idisk = disk_list;
4724
4725 while (idisk) {
4726 if (serialcmp(idisk->disk.serial, serial) == 0)
4727 break;
4728 idisk = idisk->next;
4729 }
4730
4731 return idisk;
4732 }
4733
4734 static int __prep_thunderdome(struct intel_super **table, int tbl_size,
4735 struct intel_super *super,
4736 struct intel_disk **disk_list)
4737 {
4738 struct imsm_disk *d = &super->disks->disk;
4739 struct imsm_super *mpb = super->anchor;
4740 int i, j;
4741
4742 for (i = 0; i < tbl_size; i++) {
4743 struct imsm_super *tbl_mpb = table[i]->anchor;
4744 struct imsm_disk *tbl_d = &table[i]->disks->disk;
4745
4746 if (tbl_mpb->family_num == mpb->family_num) {
4747 if (tbl_mpb->check_sum == mpb->check_sum) {
4748 dprintf("mpb from %d:%d matches %d:%d\n",
4749 super->disks->major,
4750 super->disks->minor,
4751 table[i]->disks->major,
4752 table[i]->disks->minor);
4753 break;
4754 }
4755
4756 if (((is_configured(d) && !is_configured(tbl_d)) ||
4757 is_configured(d) == is_configured(tbl_d)) &&
4758 tbl_mpb->generation_num < mpb->generation_num) {
4759 /* current version of the mpb is a
4760 * better candidate than the one in
4761 * super_table, but copy over "cross
4762 * generational" status
4763 */
4764 struct intel_disk *idisk;
4765
4766 dprintf("mpb from %d:%d replaces %d:%d\n",
4767 super->disks->major,
4768 super->disks->minor,
4769 table[i]->disks->major,
4770 table[i]->disks->minor);
4771
4772 idisk = disk_list_get(tbl_d->serial, *disk_list);
4773 if (idisk && is_failed(&idisk->disk))
4774 tbl_d->status |= FAILED_DISK;
4775 break;
4776 } else {
4777 struct intel_disk *idisk;
4778 struct imsm_disk *disk;
4779
4780 /* tbl_mpb is more up to date, but copy
4781 * over cross generational status before
4782 * returning
4783 */
4784 disk = __serial_to_disk(d->serial, mpb, NULL);
4785 if (disk && is_failed(disk))
4786 d->status |= FAILED_DISK;
4787
4788 idisk = disk_list_get(d->serial, *disk_list);
4789 if (idisk) {
4790 idisk->owner = i;
4791 if (disk && is_configured(disk))
4792 idisk->disk.status |= CONFIGURED_DISK;
4793 }
4794
4795 dprintf("mpb from %d:%d prefer %d:%d\n",
4796 super->disks->major,
4797 super->disks->minor,
4798 table[i]->disks->major,
4799 table[i]->disks->minor);
4800
4801 return tbl_size;
4802 }
4803 }
4804 }
4805
4806 if (i >= tbl_size)
4807 table[tbl_size++] = super;
4808 else
4809 table[i] = super;
4810
4811 /* update/extend the merged list of imsm_disk records */
4812 for (j = 0; j < mpb->num_disks; j++) {
4813 struct imsm_disk *disk = __get_imsm_disk(mpb, j);
4814 struct intel_disk *idisk;
4815
4816 idisk = disk_list_get(disk->serial, *disk_list);
4817 if (idisk) {
4818 idisk->disk.status |= disk->status;
4819 if (is_configured(&idisk->disk) ||
4820 is_failed(&idisk->disk))
4821 idisk->disk.status &= ~(SPARE_DISK);
4822 } else {
4823 idisk = xcalloc(1, sizeof(*idisk));
4824 idisk->owner = IMSM_UNKNOWN_OWNER;
4825 idisk->disk = *disk;
4826 idisk->next = *disk_list;
4827 *disk_list = idisk;
4828 }
4829
4830 if (serialcmp(idisk->disk.serial, d->serial) == 0)
4831 idisk->owner = i;
4832 }
4833
4834 return tbl_size;
4835 }
4836
4837 static struct intel_super *
4838 validate_members(struct intel_super *super, struct intel_disk *disk_list,
4839 const int owner)
4840 {
4841 struct imsm_super *mpb = super->anchor;
4842 int ok_count = 0;
4843 int i;
4844
4845 for (i = 0; i < mpb->num_disks; i++) {
4846 struct imsm_disk *disk = __get_imsm_disk(mpb, i);
4847 struct intel_disk *idisk;
4848
4849 idisk = disk_list_get(disk->serial, disk_list);
4850 if (idisk) {
4851 if (idisk->owner == owner ||
4852 idisk->owner == IMSM_UNKNOWN_OWNER)
4853 ok_count++;
4854 else
4855 dprintf("'%.16s' owner %d != %d\n",
4856 disk->serial, idisk->owner,
4857 owner);
4858 } else {
4859 dprintf("unknown disk %x [%d]: %.16s\n",
4860 __le32_to_cpu(mpb->family_num), i,
4861 disk->serial);
4862 break;
4863 }
4864 }
4865
4866 if (ok_count == mpb->num_disks)
4867 return super;
4868 return NULL;
4869 }
4870
4871 static void show_conflicts(__u32 family_num, struct intel_super *super_list)
4872 {
4873 struct intel_super *s;
4874
4875 for (s = super_list; s; s = s->next) {
4876 if (family_num != s->anchor->family_num)
4877 continue;
4878 pr_err("Conflict, offlining family %#x on '%s'\n",
4879 __le32_to_cpu(family_num), s->disks->devname);
4880 }
4881 }
4882
4883 static struct intel_super *
4884 imsm_thunderdome(struct intel_super **super_list, int len)
4885 {
4886 struct intel_super *super_table[len];
4887 struct intel_disk *disk_list = NULL;
4888 struct intel_super *champion, *spare;
4889 struct intel_super *s, **del;
4890 int tbl_size = 0;
4891 int conflict;
4892 int i;
4893
4894 memset(super_table, 0, sizeof(super_table));
4895 for (s = *super_list; s; s = s->next)
4896 tbl_size = __prep_thunderdome(super_table, tbl_size, s, &disk_list);
4897
4898 for (i = 0; i < tbl_size; i++) {
4899 struct imsm_disk *d;
4900 struct intel_disk *idisk;
4901 struct imsm_super *mpb = super_table[i]->anchor;
4902
4903 s = super_table[i];
4904 d = &s->disks->disk;
4905
4906 /* 'd' must appear in merged disk list for its
4907 * configuration to be valid
4908 */
4909 idisk = disk_list_get(d->serial, disk_list);
4910 if (idisk && idisk->owner == i)
4911 s = validate_members(s, disk_list, i);
4912 else
4913 s = NULL;
4914
4915 if (!s)
4916 dprintf("marking family: %#x from %d:%d offline\n",
4917 mpb->family_num,
4918 super_table[i]->disks->major,
4919 super_table[i]->disks->minor);
4920 super_table[i] = s;
4921 }
4922
4923 /* This is where the mdadm implementation differs from the Windows
4924 * driver which has no strict concept of a container. We can only
4925 * assemble one family from a container, so when returning a prodigal
4926 * array member to this system the code will not be able to disambiguate
4927 * the container contents that should be assembled ("foreign" versus
4928 * "local"). It requires user intervention to set the orig_family_num
4929 * to a new value to establish a new container. The Windows driver in
4930 * this situation fixes up the volume name in place and manages the
4931 * foreign array as an independent entity.
4932 */
4933 s = NULL;
4934 spare = NULL;
4935 conflict = 0;
4936 for (i = 0; i < tbl_size; i++) {
4937 struct intel_super *tbl_ent = super_table[i];
4938 int is_spare = 0;
4939
4940 if (!tbl_ent)
4941 continue;
4942
4943 if (tbl_ent->anchor->num_raid_devs == 0) {
4944 spare = tbl_ent;
4945 is_spare = 1;
4946 }
4947
4948 if (s && !is_spare) {
4949 show_conflicts(tbl_ent->anchor->family_num, *super_list);
4950 conflict++;
4951 } else if (!s && !is_spare)
4952 s = tbl_ent;
4953 }
4954
4955 if (!s)
4956 s = spare;
4957 if (!s) {
4958 champion = NULL;
4959 goto out;
4960 }
4961 champion = s;
4962
4963 if (conflict)
4964 pr_err("Chose family %#x on '%s', assemble conflicts to new container with '--update=uuid'\n",
4965 __le32_to_cpu(s->anchor->family_num), s->disks->devname);
4966
4967 /* collect all dl's onto 'champion', and update them to
4968 * champion's version of the status
4969 */
4970 for (s = *super_list; s; s = s->next) {
4971 struct imsm_super *mpb = champion->anchor;
4972 struct dl *dl = s->disks;
4973
4974 if (s == champion)
4975 continue;
4976
4977 mpb->attributes |= s->anchor->attributes & MPB_ATTRIB_2TB_DISK;
4978
4979 for (i = 0; i < mpb->num_disks; i++) {
4980 struct imsm_disk *disk;
4981
4982 disk = __serial_to_disk(dl->serial, mpb, &dl->index);
4983 if (disk) {
4984 dl->disk = *disk;
4985 /* only set index on disks that are a member of
4986 * a populated contianer, i.e. one with
4987 * raid_devs
4988 */
4989 if (is_failed(&dl->disk))
4990 dl->index = -2;
4991 else if (is_spare(&dl->disk))
4992 dl->index = -1;
4993 break;
4994 }
4995 }
4996
4997 if (i >= mpb->num_disks) {
4998 struct intel_disk *idisk;
4999
5000 idisk = disk_list_get(dl->serial, disk_list);
5001 if (idisk && is_spare(&idisk->disk) &&
5002 !is_failed(&idisk->disk) && !is_configured(&idisk->disk))
5003 dl->index = -1;
5004 else {
5005 dl->index = -2;
5006 continue;
5007 }
5008 }
5009
5010 dl->next = champion->disks;
5011 champion->disks = dl;
5012 s->disks = NULL;
5013 }
5014
5015 /* delete 'champion' from super_list */
5016 for (del = super_list; *del; ) {
5017 if (*del == champion) {
5018 *del = (*del)->next;
5019 break;
5020 } else
5021 del = &(*del)->next;
5022 }
5023 champion->next = NULL;
5024
5025 out:
5026 while (disk_list) {
5027 struct intel_disk *idisk = disk_list;
5028
5029 disk_list = disk_list->next;
5030 free(idisk);
5031 }
5032
5033 return champion;
5034 }
5035
5036 static int
5037 get_sra_super_block(int fd, struct intel_super **super_list, char *devname, int *max, int keep_fd);
5038 static int get_super_block(struct intel_super **super_list, char *devnm, char *devname,
5039 int major, int minor, int keep_fd);
5040 static int
5041 get_devlist_super_block(struct md_list *devlist, struct intel_super **super_list,
5042 int *max, int keep_fd);
5043
5044 static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
5045 char *devname, struct md_list *devlist,
5046 int keep_fd)
5047 {
5048 struct intel_super *super_list = NULL;
5049 struct intel_super *super = NULL;
5050 int err = 0;
5051 int i = 0;
5052
5053 if (fd >= 0)
5054 /* 'fd' is an opened container */
5055 err = get_sra_super_block(fd, &super_list, devname, &i, keep_fd);
5056 else
5057 /* get super block from devlist devices */
5058 err = get_devlist_super_block(devlist, &super_list, &i, keep_fd);
5059 if (err)
5060 goto error;
5061 /* all mpbs enter, maybe one leaves */
5062 super = imsm_thunderdome(&super_list, i);
5063 if (!super) {
5064 err = 1;
5065 goto error;
5066 }
5067
5068 if (find_missing(super) != 0) {
5069 free_imsm(super);
5070 err = 2;
5071 goto error;
5072 }
5073
5074 /* load migration record */
5075 err = load_imsm_migr_rec(super);
5076 if (err == -1) {
5077 /* migration is in progress,
5078 * but migr_rec cannot be loaded,
5079 */
5080 err = 4;
5081 goto error;
5082 }
5083
5084 /* Check migration compatibility */
5085 if (err == 0 && check_mpb_migr_compatibility(super) != 0) {
5086 pr_err("Unsupported migration detected");
5087 if (devname)
5088 fprintf(stderr, " on %s\n", devname);
5089 else
5090 fprintf(stderr, " (IMSM).\n");
5091
5092 err = 5;
5093 goto error;
5094 }
5095
5096 err = 0;
5097
5098 error:
5099 while (super_list) {
5100 struct intel_super *s = super_list;
5101
5102 super_list = super_list->next;
5103 free_imsm(s);
5104 }
5105
5106 if (err)
5107 return err;
5108
5109 *sbp = super;
5110 if (fd >= 0)
5111 strcpy(st->container_devnm, fd2devnm(fd));
5112 else
5113 st->container_devnm[0] = 0;
5114 if (err == 0 && st->ss == NULL) {
5115 st->ss = &super_imsm;
5116 st->minor_version = 0;
5117 st->max_devs = IMSM_MAX_DEVICES;
5118 }
5119 return 0;
5120 }
5121
5122 static int
5123 get_devlist_super_block(struct md_list *devlist, struct intel_super **super_list,
5124 int *max, int keep_fd)
5125 {
5126 struct md_list *tmpdev;
5127 int err = 0;
5128 int i = 0;
5129
5130 for (i = 0, tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
5131 if (tmpdev->used != 1)
5132 continue;
5133 if (tmpdev->container == 1) {
5134 int lmax = 0;
5135 int fd = dev_open(tmpdev->devname, O_RDONLY|O_EXCL);
5136 if (fd < 0) {
5137 pr_err("cannot open device %s: %s\n",
5138 tmpdev->devname, strerror(errno));
5139 err = 8;
5140 goto error;
5141 }
5142 err = get_sra_super_block(fd, super_list,
5143 tmpdev->devname, &lmax,
5144 keep_fd);
5145 i += lmax;
5146 close(fd);
5147 if (err) {
5148 err = 7;
5149 goto error;
5150 }
5151 } else {
5152 int major = major(tmpdev->st_rdev);
5153 int minor = minor(tmpdev->st_rdev);
5154 err = get_super_block(super_list,
5155 NULL,
5156 tmpdev->devname,
5157 major, minor,
5158 keep_fd);
5159 i++;
5160 if (err) {
5161 err = 6;
5162 goto error;
5163 }
5164 }
5165 }
5166 error:
5167 *max = i;
5168 return err;
5169 }
5170
5171 static int get_super_block(struct intel_super **super_list, char *devnm, char *devname,
5172 int major, int minor, int keep_fd)
5173 {
5174 struct intel_super *s;
5175 char nm[32];
5176 int dfd = -1;
5177 int err = 0;
5178 int retry;
5179
5180 s = alloc_super();
5181 if (!s) {
5182 err = 1;
5183 goto error;
5184 }
5185
5186 sprintf(nm, "%d:%d", major, minor);
5187 dfd = dev_open(nm, O_RDWR);
5188 if (dfd < 0) {
5189 err = 2;
5190 goto error;
5191 }
5192
5193 get_dev_sector_size(dfd, NULL, &s->sector_size);
5194 find_intel_hba_capability(dfd, s, devname);
5195 err = load_and_parse_mpb(dfd, s, NULL, keep_fd);
5196
5197 /* retry the load if we might have raced against mdmon */
5198 if (err == 3 && devnm && mdmon_running(devnm))
5199 for (retry = 0; retry < 3; retry++) {
5200 usleep(3000);
5201 err = load_and_parse_mpb(dfd, s, NULL, keep_fd);
5202 if (err != 3)
5203 break;
5204 }
5205 error:
5206 if (!err) {
5207 s->next = *super_list;
5208 *super_list = s;
5209 } else {
5210 if (s)
5211 free_imsm(s);
5212 if (dfd >= 0)
5213 close(dfd);
5214 }
5215 if (dfd >= 0 && !keep_fd)
5216 close(dfd);
5217 return err;
5218
5219 }
5220
5221 static int
5222 get_sra_super_block(int fd, struct intel_super **super_list, char *devname, int *max, int keep_fd)
5223 {
5224 struct mdinfo *sra;
5225 char *devnm;
5226 struct mdinfo *sd;
5227 int err = 0;
5228 int i = 0;
5229 sra = sysfs_read(fd, NULL, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
5230 if (!sra)
5231 return 1;
5232
5233 if (sra->array.major_version != -1 ||
5234 sra->array.minor_version != -2 ||
5235 strcmp(sra->text_version, "imsm") != 0) {
5236 err = 1;
5237 goto error;
5238 }
5239 /* load all mpbs */
5240 devnm = fd2devnm(fd);
5241 for (sd = sra->devs, i = 0; sd; sd = sd->next, i++) {
5242 if (get_super_block(super_list, devnm, devname,
5243 sd->disk.major, sd->disk.minor, keep_fd) != 0) {
5244 err = 7;
5245 goto error;
5246 }
5247 }
5248 error:
5249 sysfs_free(sra);
5250 *max = i;
5251 return err;
5252 }
5253
5254 static int load_container_imsm(struct supertype *st, int fd, char *devname)
5255 {
5256 return load_super_imsm_all(st, fd, &st->sb, devname, NULL, 1);
5257 }
5258
5259 static int load_super_imsm(struct supertype *st, int fd, char *devname)
5260 {
5261 struct intel_super *super;
5262 int rv;
5263 int retry;
5264
5265 if (test_partition(fd))
5266 /* IMSM not allowed on partitions */
5267 return 1;
5268
5269 free_super_imsm(st);
5270
5271 super = alloc_super();
5272 get_dev_sector_size(fd, NULL, &super->sector_size);
5273 if (!super)
5274 return 1;
5275 /* Load hba and capabilities if they exist.
5276 * But do not preclude loading metadata in case capabilities or hba are
5277 * non-compliant and ignore_hw_compat is set.
5278 */
5279 rv = find_intel_hba_capability(fd, super, devname);
5280 /* no orom/efi or non-intel hba of the disk */
5281 if (rv != 0 && st->ignore_hw_compat == 0) {
5282 if (devname)
5283 pr_err("No OROM/EFI properties for %s\n", devname);
5284 free_imsm(super);
5285 return 2;
5286 }
5287 rv = load_and_parse_mpb(fd, super, devname, 0);
5288
5289 /* retry the load if we might have raced against mdmon */
5290 if (rv == 3) {
5291 struct mdstat_ent *mdstat = NULL;
5292 char *name = fd2kname(fd);
5293
5294 if (name)
5295 mdstat = mdstat_by_component(name);
5296
5297 if (mdstat && mdmon_running(mdstat->devnm) && getpid() != mdmon_pid(mdstat->devnm)) {
5298 for (retry = 0; retry < 3; retry++) {
5299 usleep(3000);
5300 rv = load_and_parse_mpb(fd, super, devname, 0);
5301 if (rv != 3)
5302 break;
5303 }
5304 }
5305
5306 free_mdstat(mdstat);
5307 }
5308
5309 if (rv) {
5310 if (devname)
5311 pr_err("Failed to load all information sections on %s\n", devname);
5312 free_imsm(super);
5313 return rv;
5314 }
5315
5316 st->sb = super;
5317 if (st->ss == NULL) {
5318 st->ss = &super_imsm;
5319 st->minor_version = 0;
5320 st->max_devs = IMSM_MAX_DEVICES;
5321 }
5322
5323 /* load migration record */
5324 if (load_imsm_migr_rec(super) == 0) {
5325 /* Check for unsupported migration features */
5326 if (check_mpb_migr_compatibility(super) != 0) {
5327 pr_err("Unsupported migration detected");
5328 if (devname)
5329 fprintf(stderr, " on %s\n", devname);
5330 else
5331 fprintf(stderr, " (IMSM).\n");
5332 return 3;
5333 }
5334 }
5335
5336 return 0;
5337 }
5338
5339 static __u16 info_to_blocks_per_strip(mdu_array_info_t *info)
5340 {
5341 if (info->level == 1)
5342 return 128;
5343 return info->chunk_size >> 9;
5344 }
5345
5346 static unsigned long long info_to_blocks_per_member(mdu_array_info_t *info,
5347 unsigned long long size)
5348 {
5349 if (info->level == 1)
5350 return size * 2;
5351 else
5352 return (size * 2) & ~(info_to_blocks_per_strip(info) - 1);
5353 }
5354
5355 static void imsm_update_version_info(struct intel_super *super)
5356 {
5357 /* update the version and attributes */
5358 struct imsm_super *mpb = super->anchor;
5359 char *version;
5360 struct imsm_dev *dev;
5361 struct imsm_map *map;
5362 int i;
5363
5364 for (i = 0; i < mpb->num_raid_devs; i++) {
5365 dev = get_imsm_dev(super, i);
5366 map = get_imsm_map(dev, MAP_0);
5367 if (__le32_to_cpu(dev->size_high) > 0)
5368 mpb->attributes |= MPB_ATTRIB_2TB;
5369
5370 /* FIXME detect when an array spans a port multiplier */
5371 #if 0
5372 mpb->attributes |= MPB_ATTRIB_PM;
5373 #endif
5374
5375 if (mpb->num_raid_devs > 1 ||
5376 mpb->attributes != MPB_ATTRIB_CHECKSUM_VERIFY) {
5377 version = MPB_VERSION_ATTRIBS;
5378 switch (get_imsm_raid_level(map)) {
5379 case 0: mpb->attributes |= MPB_ATTRIB_RAID0; break;
5380 case 1: mpb->attributes |= MPB_ATTRIB_RAID1; break;
5381 case 10: mpb->attributes |= MPB_ATTRIB_RAID10; break;
5382 case 5: mpb->attributes |= MPB_ATTRIB_RAID5; break;
5383 }
5384 } else {
5385 if (map->num_members >= 5)
5386 version = MPB_VERSION_5OR6_DISK_ARRAY;
5387 else if (dev->status == DEV_CLONE_N_GO)
5388 version = MPB_VERSION_CNG;
5389 else if (get_imsm_raid_level(map) == 5)
5390 version = MPB_VERSION_RAID5;
5391 else if (map->num_members >= 3)
5392 version = MPB_VERSION_3OR4_DISK_ARRAY;
5393 else if (get_imsm_raid_level(map) == 1)
5394 version = MPB_VERSION_RAID1;
5395 else
5396 version = MPB_VERSION_RAID0;
5397 }
5398 strcpy(((char *) mpb->sig) + strlen(MPB_SIGNATURE), version);
5399 }
5400 }
5401
5402 static int check_name(struct intel_super *super, char *name, int quiet)
5403 {
5404 struct imsm_super *mpb = super->anchor;
5405 char *reason = NULL;
5406 char *start = name;
5407 size_t len = strlen(name);
5408 int i;
5409
5410 if (len > 0) {
5411 while (isspace(start[len - 1]))
5412 start[--len] = 0;
5413 while (*start && isspace(*start))
5414 ++start, --len;
5415 memmove(name, start, len + 1);
5416 }
5417
5418 if (len > MAX_RAID_SERIAL_LEN)
5419 reason = "must be 16 characters or less";
5420 else if (len == 0)
5421 reason = "must be a non-empty string";
5422
5423 for (i = 0; i < mpb->num_raid_devs; i++) {
5424 struct imsm_dev *dev = get_imsm_dev(super, i);
5425
5426 if (strncmp((char *) dev->volume, name, MAX_RAID_SERIAL_LEN) == 0) {
5427 reason = "already exists";
5428 break;
5429 }
5430 }
5431
5432 if (reason && !quiet)
5433 pr_err("imsm volume name %s\n", reason);
5434
5435 return !reason;
5436 }
5437
5438 static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
5439 struct shape *s, char *name,
5440 char *homehost, int *uuid,
5441 long long data_offset)
5442 {
5443 /* We are creating a volume inside a pre-existing container.
5444 * so st->sb is already set.
5445 */
5446 struct intel_super *super = st->sb;
5447 unsigned int sector_size = super->sector_size;
5448 struct imsm_super *mpb = super->anchor;
5449 struct intel_dev *dv;
5450 struct imsm_dev *dev;
5451 struct imsm_vol *vol;
5452 struct imsm_map *map;
5453 int idx = mpb->num_raid_devs;
5454 int i;
5455 int namelen;
5456 unsigned long long array_blocks;
5457 size_t size_old, size_new;
5458 unsigned long long num_data_stripes;
5459 unsigned int data_disks;
5460 unsigned long long size_per_member;
5461
5462 if (super->orom && mpb->num_raid_devs >= super->orom->vpa) {
5463 pr_err("This imsm-container already has the maximum of %d volumes\n", super->orom->vpa);
5464 return 0;
5465 }
5466
5467 /* ensure the mpb is large enough for the new data */
5468 size_old = __le32_to_cpu(mpb->mpb_size);
5469 size_new = disks_to_mpb_size(info->nr_disks);
5470 if (size_new > size_old) {
5471 void *mpb_new;
5472 size_t size_round = ROUND_UP(size_new, sector_size);
5473
5474 if (posix_memalign(&mpb_new, sector_size, size_round) != 0) {
5475 pr_err("could not allocate new mpb\n");
5476 return 0;
5477 }
5478 if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
5479 MIGR_REC_BUF_SECTORS*
5480 MAX_SECTOR_SIZE) != 0) {
5481 pr_err("could not allocate migr_rec buffer\n");
5482 free(super->buf);
5483 free(super);
5484 free(mpb_new);
5485 return 0;
5486 }
5487 memcpy(mpb_new, mpb, size_old);
5488 free(mpb);
5489 mpb = mpb_new;
5490 super->anchor = mpb_new;
5491 mpb->mpb_size = __cpu_to_le32(size_new);
5492 memset(mpb_new + size_old, 0, size_round - size_old);
5493 super->len = size_round;
5494 }
5495 super->current_vol = idx;
5496
5497 /* handle 'failed_disks' by either:
5498 * a) create dummy disk entries in the table if this the first
5499 * volume in the array. We add them here as this is the only
5500 * opportunity to add them. add_to_super_imsm_volume()
5501 * handles the non-failed disks and continues incrementing
5502 * mpb->num_disks.
5503 * b) validate that 'failed_disks' matches the current number
5504 * of missing disks if the container is populated
5505 */
5506 if (super->current_vol == 0) {
5507 mpb->num_disks = 0;
5508 for (i = 0; i < info->failed_disks; i++) {
5509 struct imsm_disk *disk;
5510
5511 mpb->num_disks++;
5512 disk = __get_imsm_disk(mpb, i);
5513 disk->status = CONFIGURED_DISK | FAILED_DISK;
5514 disk->scsi_id = __cpu_to_le32(~(__u32)0);
5515 snprintf((char *) disk->serial, MAX_RAID_SERIAL_LEN,
5516 "missing:%d", (__u8)i);
5517 }
5518 find_missing(super);
5519 } else {
5520 int missing = 0;
5521 struct dl *d;
5522
5523 for (d = super->missing; d; d = d->next)
5524 missing++;
5525 if (info->failed_disks > missing) {
5526 pr_err("unable to add 'missing' disk to container\n");
5527 return 0;
5528 }
5529 }
5530
5531 if (!check_name(super, name, 0))
5532 return 0;
5533 dv = xmalloc(sizeof(*dv));
5534 dev = xcalloc(1, sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
5535 /*
5536 * Explicitly allow truncating to not confuse gcc's
5537 * -Werror=stringop-truncation
5538 */
5539 namelen = min((int) strlen(name), MAX_RAID_SERIAL_LEN);
5540 memcpy(dev->volume, name, namelen);
5541 array_blocks = calc_array_size(info->level, info->raid_disks,
5542 info->layout, info->chunk_size,
5543 s->size * BLOCKS_PER_KB);
5544 data_disks = get_data_disks(info->level, info->layout,
5545 info->raid_disks);
5546 array_blocks = round_size_to_mb(array_blocks, data_disks);
5547 size_per_member = array_blocks / data_disks;
5548
5549 set_imsm_dev_size(dev, array_blocks);
5550 dev->status = (DEV_READ_COALESCING | DEV_WRITE_COALESCING);
5551 vol = &dev->vol;
5552 vol->migr_state = 0;
5553 set_migr_type(dev, MIGR_INIT);
5554 vol->dirty = !info->state;
5555 vol->curr_migr_unit = 0;
5556 map = get_imsm_map(dev, MAP_0);
5557 set_pba_of_lba0(map, super->create_offset);
5558 map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
5559 map->failed_disk_num = ~0;
5560 if (info->level > 0)
5561 map->map_state = (info->state ? IMSM_T_STATE_NORMAL
5562 : IMSM_T_STATE_UNINITIALIZED);
5563 else
5564 map->map_state = info->failed_disks ? IMSM_T_STATE_FAILED :
5565 IMSM_T_STATE_NORMAL;
5566 map->ddf = 1;
5567
5568 if (info->level == 1 && info->raid_disks > 2) {
5569 free(dev);
5570 free(dv);
5571 pr_err("imsm does not support more than 2 disksin a raid1 volume\n");
5572 return 0;
5573 }
5574
5575 map->raid_level = info->level;
5576 if (info->level == 10) {
5577 map->raid_level = 1;
5578 map->num_domains = info->raid_disks / 2;
5579 } else if (info->level == 1)
5580 map->num_domains = info->raid_disks;
5581 else
5582 map->num_domains = 1;
5583
5584 /* info->size is only int so use the 'size' parameter instead */
5585 num_data_stripes = size_per_member / info_to_blocks_per_strip(info);
5586 num_data_stripes /= map->num_domains;
5587 set_num_data_stripes(map, num_data_stripes);
5588
5589 size_per_member += NUM_BLOCKS_DIRTY_STRIPE_REGION;
5590 set_blocks_per_member(map, info_to_blocks_per_member(info,
5591 size_per_member /
5592 BLOCKS_PER_KB));
5593
5594 map->num_members = info->raid_disks;
5595 for (i = 0; i < map->num_members; i++) {
5596 /* initialized in add_to_super */
5597 set_imsm_ord_tbl_ent(map, i, IMSM_ORD_REBUILD);
5598 }
5599 mpb->num_raid_devs++;
5600 mpb->num_raid_devs_created++;
5601 dev->my_vol_raid_dev_num = mpb->num_raid_devs_created;
5602
5603 if (s->consistency_policy <= CONSISTENCY_POLICY_RESYNC) {
5604 dev->rwh_policy = RWH_MULTIPLE_OFF;
5605 } else if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
5606 dev->rwh_policy = RWH_MULTIPLE_DISTRIBUTED;
5607 } else {
5608 free(dev);
5609 free(dv);
5610 pr_err("imsm does not support consistency policy %s\n",
5611 map_num(consistency_policies, s->consistency_policy));
5612 return 0;
5613 }
5614
5615 dv->dev = dev;
5616 dv->index = super->current_vol;
5617 dv->next = super->devlist;
5618 super->devlist = dv;
5619
5620 imsm_update_version_info(super);
5621
5622 return 1;
5623 }
5624
5625 static int init_super_imsm(struct supertype *st, mdu_array_info_t *info,
5626 struct shape *s, char *name,
5627 char *homehost, int *uuid,
5628 unsigned long long data_offset)
5629 {
5630 /* This is primarily called by Create when creating a new array.
5631 * We will then get add_to_super called for each component, and then
5632 * write_init_super called to write it out to each device.
5633 * For IMSM, Create can create on fresh devices or on a pre-existing
5634 * array.
5635 * To create on a pre-existing array a different method will be called.
5636 * This one is just for fresh drives.
5637 */
5638 struct intel_super *super;
5639 struct imsm_super *mpb;
5640 size_t mpb_size;
5641 char *version;
5642
5643 if (data_offset != INVALID_SECTORS) {
5644 pr_err("data-offset not supported by imsm\n");
5645 return 0;
5646 }
5647
5648 if (st->sb)
5649 return init_super_imsm_volume(st, info, s, name, homehost, uuid,
5650 data_offset);
5651
5652 if (info)
5653 mpb_size = disks_to_mpb_size(info->nr_disks);
5654 else
5655 mpb_size = MAX_SECTOR_SIZE;
5656
5657 super = alloc_super();
5658 if (super &&
5659 posix_memalign(&super->buf, MAX_SECTOR_SIZE, mpb_size) != 0) {
5660 free_imsm(super);
5661 super = NULL;
5662 }
5663 if (!super) {
5664 pr_err("could not allocate superblock\n");
5665 return 0;
5666 }
5667 if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
5668 MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE) != 0) {
5669 pr_err("could not allocate migr_rec buffer\n");
5670 free(super->buf);
5671 free_imsm(super);
5672 return 0;
5673 }
5674 memset(super->buf, 0, mpb_size);
5675 mpb = super->buf;
5676 mpb->mpb_size = __cpu_to_le32(mpb_size);
5677 st->sb = super;
5678
5679 if (info == NULL) {
5680 /* zeroing superblock */
5681 return 0;
5682 }
5683
5684 mpb->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
5685
5686 version = (char *) mpb->sig;
5687 strcpy(version, MPB_SIGNATURE);
5688 version += strlen(MPB_SIGNATURE);
5689 strcpy(version, MPB_VERSION_RAID0);
5690
5691 return 1;
5692 }
5693
5694 static int drive_validate_sector_size(struct intel_super *super, struct dl *dl)
5695 {
5696 unsigned int member_sector_size;
5697
5698 if (dl->fd < 0) {
5699 pr_err("Invalid file descriptor for %s\n", dl->devname);
5700 return 0;
5701 }
5702
5703 if (!get_dev_sector_size(dl->fd, dl->devname, &member_sector_size))
5704 return 0;
5705 if (member_sector_size != super->sector_size)
5706 return 0;
5707 return 1;
5708 }
5709
5710 static int add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
5711 int fd, char *devname)
5712 {
5713 struct intel_super *super = st->sb;
5714 struct imsm_super *mpb = super->anchor;
5715 struct imsm_disk *_disk;
5716 struct imsm_dev *dev;
5717 struct imsm_map *map;
5718 struct dl *dl, *df;
5719 int slot;
5720
5721 dev = get_imsm_dev(super, super->current_vol);
5722 map = get_imsm_map(dev, MAP_0);
5723
5724 if (! (dk->state & (1<<MD_DISK_SYNC))) {
5725 pr_err("%s: Cannot add spare devices to IMSM volume\n",
5726 devname);
5727 return 1;
5728 }
5729
5730 if (fd == -1) {
5731 /* we're doing autolayout so grab the pre-marked (in
5732 * validate_geometry) raid_disk
5733 */
5734 for (dl = super->disks; dl; dl = dl->next)
5735 if (dl->raiddisk == dk->raid_disk)
5736 break;
5737 } else {
5738 for (dl = super->disks; dl ; dl = dl->next)
5739 if (dl->major == dk->major &&
5740 dl->minor == dk->minor)
5741 break;
5742 }
5743
5744 if (!dl) {
5745 pr_err("%s is not a member of the same container\n", devname);
5746 return 1;
5747 }
5748
5749 if (mpb->num_disks == 0)
5750 if (!get_dev_sector_size(dl->fd, dl->devname,
5751 &super->sector_size))
5752 return 1;
5753
5754 if (!drive_validate_sector_size(super, dl)) {
5755 pr_err("Combining drives of different sector size in one volume is not allowed\n");
5756 return 1;
5757 }
5758
5759 /* add a pristine spare to the metadata */
5760 if (dl->index < 0) {
5761 dl->index = super->anchor->num_disks;
5762 super->anchor->num_disks++;
5763 }
5764 /* Check the device has not already been added */
5765 slot = get_imsm_disk_slot(map, dl->index);
5766 if (slot >= 0 &&
5767 (get_imsm_ord_tbl_ent(dev, slot, MAP_X) & IMSM_ORD_REBUILD) == 0) {
5768 pr_err("%s has been included in this array twice\n",
5769 devname);
5770 return 1;
5771 }
5772 set_imsm_ord_tbl_ent(map, dk->raid_disk, dl->index);
5773 dl->disk.status = CONFIGURED_DISK;
5774
5775 /* update size of 'missing' disks to be at least as large as the
5776 * largest acitve member (we only have dummy missing disks when
5777 * creating the first volume)
5778 */
5779 if (super->current_vol == 0) {
5780 for (df = super->missing; df; df = df->next) {
5781 if (total_blocks(&dl->disk) > total_blocks(&df->disk))
5782 set_total_blocks(&df->disk, total_blocks(&dl->disk));
5783 _disk = __get_imsm_disk(mpb, df->index);
5784 *_disk = df->disk;
5785 }
5786 }
5787
5788 /* refresh unset/failed slots to point to valid 'missing' entries */
5789 for (df = super->missing; df; df = df->next)
5790 for (slot = 0; slot < mpb->num_disks; slot++) {
5791 __u32 ord = get_imsm_ord_tbl_ent(dev, slot, MAP_X);
5792
5793 if ((ord & IMSM_ORD_REBUILD) == 0)
5794 continue;
5795 set_imsm_ord_tbl_ent(map, slot, df->index | IMSM_ORD_REBUILD);
5796 if (is_gen_migration(dev)) {
5797 struct imsm_map *map2 = get_imsm_map(dev,
5798 MAP_1);
5799 int slot2 = get_imsm_disk_slot(map2, df->index);
5800 if (slot2 < map2->num_members && slot2 >= 0) {
5801 __u32 ord2 = get_imsm_ord_tbl_ent(dev,
5802 slot2,
5803 MAP_1);
5804 if ((unsigned)df->index ==
5805 ord_to_idx(ord2))
5806 set_imsm_ord_tbl_ent(map2,
5807 slot2,
5808 df->index |
5809 IMSM_ORD_REBUILD);
5810 }
5811 }
5812 dprintf("set slot:%d to missing disk:%d\n", slot, df->index);
5813 break;
5814 }
5815
5816 /* if we are creating the first raid device update the family number */
5817 if (super->current_vol == 0) {
5818 __u32 sum;
5819 struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
5820
5821 _disk = __get_imsm_disk(mpb, dl->index);
5822 if (!_dev || !_disk) {
5823 pr_err("BUG mpb setup error\n");
5824 return 1;
5825 }
5826 *_dev = *dev;
5827 *_disk = dl->disk;
5828 sum = random32();
5829 sum += __gen_imsm_checksum(mpb);
5830 mpb->family_num = __cpu_to_le32(sum);
5831 mpb->orig_family_num = mpb->family_num;
5832 mpb->creation_time = __cpu_to_le64((__u64)time(NULL));
5833 }
5834 super->current_disk = dl;
5835 return 0;
5836 }
5837
5838 /* mark_spare()
5839 * Function marks disk as spare and restores disk serial
5840 * in case it was previously marked as failed by takeover operation
5841 * reruns:
5842 * -1 : critical error
5843 * 0 : disk is marked as spare but serial is not set
5844 * 1 : success
5845 */
5846 int mark_spare(struct dl *disk)
5847 {
5848 __u8 serial[MAX_RAID_SERIAL_LEN];
5849 int ret_val = -1;
5850
5851 if (!disk)
5852 return ret_val;
5853
5854 ret_val = 0;
5855 if (!imsm_read_serial(disk->fd, NULL, serial, MAX_RAID_SERIAL_LEN)) {
5856 /* Restore disk serial number, because takeover marks disk
5857 * as failed and adds to serial ':0' before it becomes
5858 * a spare disk.
5859 */
5860 serialcpy(disk->serial, serial);
5861 serialcpy(disk->disk.serial, serial);
5862 ret_val = 1;
5863 }
5864 disk->disk.status = SPARE_DISK;
5865 disk->index = -1;
5866
5867 return ret_val;
5868 }
5869
5870
5871 static int write_super_imsm_spare(struct intel_super *super, struct dl *d);
5872
5873 static int add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
5874 int fd, char *devname,
5875 unsigned long long data_offset)
5876 {
5877 struct intel_super *super = st->sb;
5878 struct dl *dd;
5879 unsigned long long size;
5880 unsigned int member_sector_size;
5881 __u32 id;
5882 int rv;
5883 struct stat stb;
5884
5885 /* If we are on an RAID enabled platform check that the disk is
5886 * attached to the raid controller.
5887 * We do not need to test disks attachment for container based additions,
5888 * they shall be already tested when container was created/assembled.
5889 */
5890 rv = find_intel_hba_capability(fd, super, devname);
5891 /* no orom/efi or non-intel hba of the disk */
5892 if (rv != 0) {
5893 dprintf("capability: %p fd: %d ret: %d\n",
5894 super->orom, fd, rv);
5895 return 1;
5896 }
5897
5898 if (super->current_vol >= 0)
5899 return add_to_super_imsm_volume(st, dk, fd, devname);
5900
5901 fstat(fd, &stb);
5902 dd = xcalloc(sizeof(*dd), 1);
5903 dd->major = major(stb.st_rdev);
5904 dd->minor = minor(stb.st_rdev);
5905 dd->devname = devname ? xstrdup(devname) : NULL;
5906 dd->fd = fd;
5907 dd->e = NULL;
5908 dd->action = DISK_ADD;
5909 rv = imsm_read_serial(fd, devname, dd->serial, MAX_RAID_SERIAL_LEN);
5910 if (rv) {
5911 pr_err("failed to retrieve scsi serial, aborting\n");
5912 if (dd->devname)
5913 free(dd->devname);
5914 free(dd);
5915 abort();
5916 }
5917 if (super->hba && ((super->hba->type == SYS_DEV_NVME) ||
5918 (super->hba->type == SYS_DEV_VMD))) {
5919 int i;
5920 char *devpath = diskfd_to_devpath(fd);
5921 char controller_path[PATH_MAX];
5922
5923 if (!devpath) {
5924 pr_err("failed to get devpath, aborting\n");
5925 if (dd->devname)
5926 free(dd->devname);
5927 free(dd);
5928 return 1;
5929 }
5930
5931 snprintf(controller_path, PATH_MAX-1, "%s/device", devpath);
5932 free(devpath);
5933
5934 if (!imsm_is_nvme_supported(dd->fd, 1)) {
5935 if (dd->devname)
5936 free(dd->devname);
5937 free(dd);
5938 return 1;
5939 }
5940
5941 if (devpath_to_vendor(controller_path) == 0x8086) {
5942 /*
5943 * If Intel's NVMe drive has serial ended with
5944 * "-A","-B","-1" or "-2" it means that this is "x8"
5945 * device (double drive on single PCIe card).
5946 * User should be warned about potential data loss.
5947 */
5948 for (i = MAX_RAID_SERIAL_LEN-1; i > 0; i--) {
5949 /* Skip empty character at the end */
5950 if (dd->serial[i] == 0)
5951 continue;
5952
5953 if (((dd->serial[i] == 'A') ||
5954 (dd->serial[i] == 'B') ||
5955 (dd->serial[i] == '1') ||
5956 (dd->serial[i] == '2')) &&
5957 (dd->serial[i-1] == '-'))
5958 pr_err("\tThe action you are about to take may put your data at risk.\n"
5959 "\tPlease note that x8 devices may consist of two separate x4 devices "
5960 "located on a single PCIe port.\n"
5961 "\tRAID 0 is the only supported configuration for this type of x8 device.\n");
5962 break;
5963 }
5964 } else if (super->hba->type == SYS_DEV_VMD && super->orom &&
5965 !imsm_orom_has_tpv_support(super->orom)) {
5966 pr_err("\tPlatform configuration does not support non-Intel NVMe drives.\n"
5967 "\tPlease refer to Intel(R) RSTe/VROC user guide.\n");
5968 free(dd->devname);
5969 free(dd);
5970 return 1;
5971 }
5972 }
5973
5974 get_dev_size(fd, NULL, &size);
5975 get_dev_sector_size(fd, NULL, &member_sector_size);
5976
5977 if (super->sector_size == 0) {
5978 /* this a first device, so sector_size is not set yet */
5979 super->sector_size = member_sector_size;
5980 }
5981
5982 /* clear migr_rec when adding disk to container */
5983 memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
5984 if (lseek64(fd, size - MIGR_REC_SECTOR_POSITION*member_sector_size,
5985 SEEK_SET) >= 0) {
5986 if ((unsigned int)write(fd, super->migr_rec_buf,
5987 MIGR_REC_BUF_SECTORS*member_sector_size) !=
5988 MIGR_REC_BUF_SECTORS*member_sector_size)
5989 perror("Write migr_rec failed");
5990 }
5991
5992 size /= 512;
5993 serialcpy(dd->disk.serial, dd->serial);
5994 set_total_blocks(&dd->disk, size);
5995 if (__le32_to_cpu(dd->disk.total_blocks_hi) > 0) {
5996 struct imsm_super *mpb = super->anchor;
5997 mpb->attributes |= MPB_ATTRIB_2TB_DISK;
5998 }
5999 mark_spare(dd);
6000 if (sysfs_disk_to_scsi_id(fd, &id) == 0)
6001 dd->disk.scsi_id = __cpu_to_le32(id);
6002 else
6003 dd->disk.scsi_id = __cpu_to_le32(0);
6004
6005 if (st->update_tail) {
6006 dd->next = super->disk_mgmt_list;
6007 super->disk_mgmt_list = dd;
6008 } else {
6009 /* this is called outside of mdmon
6010 * write initial spare metadata
6011 * mdmon will overwrite it.
6012 */
6013 dd->next = super->disks;
6014 super->disks = dd;
6015 write_super_imsm_spare(super, dd);
6016 }
6017
6018 return 0;
6019 }
6020
6021 static int remove_from_super_imsm(struct supertype *st, mdu_disk_info_t *dk)
6022 {
6023 struct intel_super *super = st->sb;
6024 struct dl *dd;
6025
6026 /* remove from super works only in mdmon - for communication
6027 * manager - monitor. Check if communication memory buffer
6028 * is prepared.
6029 */
6030 if (!st->update_tail) {
6031 pr_err("shall be used in mdmon context only\n");
6032 return 1;
6033 }
6034 dd = xcalloc(1, sizeof(*dd));
6035 dd->major = dk->major;
6036 dd->minor = dk->minor;
6037 dd->fd = -1;
6038 mark_spare(dd);
6039 dd->action = DISK_REMOVE;
6040
6041 dd->next = super->disk_mgmt_list;
6042 super->disk_mgmt_list = dd;
6043
6044 return 0;
6045 }
6046
6047 static int store_imsm_mpb(int fd, struct imsm_super *mpb);
6048
6049 static union {
6050 char buf[MAX_SECTOR_SIZE];
6051 struct imsm_super anchor;
6052 } spare_record __attribute__ ((aligned(MAX_SECTOR_SIZE)));
6053
6054
6055 static int write_super_imsm_spare(struct intel_super *super, struct dl *d)
6056 {
6057 struct imsm_super *mpb = super->anchor;
6058 struct imsm_super *spare = &spare_record.anchor;
6059 __u32 sum;
6060
6061 if (d->index != -1)
6062 return 1;
6063
6064 spare->mpb_size = __cpu_to_le32(sizeof(struct imsm_super));
6065 spare->generation_num = __cpu_to_le32(1UL);
6066 spare->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
6067 spare->num_disks = 1;
6068 spare->num_raid_devs = 0;
6069 spare->cache_size = mpb->cache_size;
6070 spare->pwr_cycle_count = __cpu_to_le32(1);
6071
6072 snprintf((char *) spare->sig, MAX_SIGNATURE_LENGTH,
6073 MPB_SIGNATURE MPB_VERSION_RAID0);
6074
6075 spare->disk[0] = d->disk;
6076 if (__le32_to_cpu(d->disk.total_blocks_hi) > 0)
6077 spare->attributes |= MPB_ATTRIB_2TB_DISK;
6078
6079 if (super->sector_size == 4096)
6080 convert_to_4k_imsm_disk(&spare->disk[0]);
6081
6082 sum = __gen_imsm_checksum(spare);
6083 spare->family_num = __cpu_to_le32(sum);
6084 spare->orig_family_num = 0;
6085 sum = __gen_imsm_checksum(spare);
6086 spare->check_sum = __cpu_to_le32(sum);
6087
6088 if (store_imsm_mpb(d->fd, spare)) {
6089 pr_err("failed for device %d:%d %s\n",
6090 d->major, d->minor, strerror(errno));
6091 return 1;
6092 }
6093
6094 return 0;
6095 }
6096 /* spare records have their own family number and do not have any defined raid
6097 * devices
6098 */
6099 static int write_super_imsm_spares(struct intel_super *super, int doclose)
6100 {
6101 struct dl *d;
6102
6103 for (d = super->disks; d; d = d->next) {
6104 if (d->index != -1)
6105 continue;
6106
6107 if (write_super_imsm_spare(super, d))
6108 return 1;
6109
6110 if (doclose) {
6111 close(d->fd);
6112 d->fd = -1;
6113 }
6114 }
6115
6116 return 0;
6117 }
6118
6119 static int write_super_imsm(struct supertype *st, int doclose)
6120 {
6121 struct intel_super *super = st->sb;
6122 unsigned int sector_size = super->sector_size;
6123 struct imsm_super *mpb = super->anchor;
6124 struct dl *d;
6125 __u32 generation;
6126 __u32 sum;
6127 int spares = 0;
6128 int i;
6129 __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk);
6130 int num_disks = 0;
6131 int clear_migration_record = 1;
6132 __u32 bbm_log_size;
6133
6134 /* 'generation' is incremented everytime the metadata is written */
6135 generation = __le32_to_cpu(mpb->generation_num);
6136 generation++;
6137 mpb->generation_num = __cpu_to_le32(generation);
6138
6139 /* fix up cases where previous mdadm releases failed to set
6140 * orig_family_num
6141 */
6142 if (mpb->orig_family_num == 0)
6143 mpb->orig_family_num = mpb->family_num;
6144
6145 for (d = super->disks; d; d = d->next) {
6146 if (d->index == -1)
6147 spares++;
6148 else {
6149 mpb->disk[d->index] = d->disk;
6150 num_disks++;
6151 }
6152 }
6153 for (d = super->missing; d; d = d->next) {
6154 mpb->disk[d->index] = d->disk;
6155 num_disks++;
6156 }
6157 mpb->num_disks = num_disks;
6158 mpb_size += sizeof(struct imsm_disk) * mpb->num_disks;
6159
6160 for (i = 0; i < mpb->num_raid_devs; i++) {
6161 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
6162 struct imsm_dev *dev2 = get_imsm_dev(super, i);
6163 if (dev && dev2) {
6164 imsm_copy_dev(dev, dev2);
6165 mpb_size += sizeof_imsm_dev(dev, 0);
6166 }
6167 if (is_gen_migration(dev2))
6168 clear_migration_record = 0;
6169 }
6170
6171 bbm_log_size = get_imsm_bbm_log_size(super->bbm_log);
6172
6173 if (bbm_log_size) {
6174 memcpy((void *)mpb + mpb_size, super->bbm_log, bbm_log_size);
6175 mpb->attributes |= MPB_ATTRIB_BBM;
6176 } else
6177 mpb->attributes &= ~MPB_ATTRIB_BBM;
6178
6179 super->anchor->bbm_log_size = __cpu_to_le32(bbm_log_size);
6180 mpb_size += bbm_log_size;
6181 mpb->mpb_size = __cpu_to_le32(mpb_size);
6182
6183 #ifdef DEBUG
6184 assert(super->len == 0 || mpb_size <= super->len);
6185 #endif
6186
6187 /* recalculate checksum */
6188 sum = __gen_imsm_checksum(mpb);
6189 mpb->check_sum = __cpu_to_le32(sum);
6190
6191 if (super->clean_migration_record_by_mdmon) {
6192 clear_migration_record = 1;
6193 super->clean_migration_record_by_mdmon = 0;
6194 }
6195 if (clear_migration_record)
6196 memset(super->migr_rec_buf, 0,
6197 MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
6198
6199 if (sector_size == 4096)
6200 convert_to_4k(super);
6201
6202 /* write the mpb for disks that compose raid devices */
6203 for (d = super->disks; d ; d = d->next) {
6204 if (d->index < 0 || is_failed(&d->disk))
6205 continue;
6206
6207 if (clear_migration_record) {
6208 unsigned long long dsize;
6209
6210 get_dev_size(d->fd, NULL, &dsize);
6211 if (lseek64(d->fd, dsize - sector_size,
6212 SEEK_SET) >= 0) {
6213 if ((unsigned int)write(d->fd,
6214 super->migr_rec_buf,
6215 MIGR_REC_BUF_SECTORS*sector_size) !=
6216 MIGR_REC_BUF_SECTORS*sector_size)
6217 perror("Write migr_rec failed");
6218 }
6219 }
6220
6221 if (store_imsm_mpb(d->fd, mpb))
6222 fprintf(stderr,
6223 "failed for device %d:%d (fd: %d)%s\n",
6224 d->major, d->minor,
6225 d->fd, strerror(errno));
6226
6227 if (doclose) {
6228 close(d->fd);
6229 d->fd = -1;
6230 }
6231 }
6232
6233 if (spares)
6234 return write_super_imsm_spares(super, doclose);
6235
6236 return 0;
6237 }
6238
6239 static int create_array(struct supertype *st, int dev_idx)
6240 {
6241 size_t len;
6242 struct imsm_update_create_array *u;
6243 struct intel_super *super = st->sb;
6244 struct imsm_dev *dev = get_imsm_dev(super, dev_idx);
6245 struct imsm_map *map = get_imsm_map(dev, MAP_0);
6246 struct disk_info *inf;
6247 struct imsm_disk *disk;
6248 int i;
6249
6250 len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0) +
6251 sizeof(*inf) * map->num_members;
6252 u = xmalloc(len);
6253 u->type = update_create_array;
6254 u->dev_idx = dev_idx;
6255 imsm_copy_dev(&u->dev, dev);
6256 inf = get_disk_info(u);
6257 for (i = 0; i < map->num_members; i++) {
6258 int idx = get_imsm_disk_idx(dev, i, MAP_X);
6259
6260 disk = get_imsm_disk(super, idx);
6261 if (!disk)
6262 disk = get_imsm_missing(super, idx);
6263 serialcpy(inf[i].serial, disk->serial);
6264 }
6265 append_metadata_update(st, u, len);
6266
6267 return 0;
6268 }
6269
6270 static int mgmt_disk(struct supertype *st)
6271 {
6272 struct intel_super *super = st->sb;
6273 size_t len;
6274 struct imsm_update_add_remove_disk *u;
6275
6276 if (!super->disk_mgmt_list)
6277 return 0;
6278
6279 len = sizeof(*u);
6280 u = xmalloc(len);
6281 u->type = update_add_remove_disk;
6282 append_metadata_update(st, u, len);
6283
6284 return 0;
6285 }
6286
6287 __u32 crc32c_le(__u32 crc, unsigned char const *p, size_t len);
6288
6289 static int write_ppl_header(unsigned long long ppl_sector, int fd, void *buf)
6290 {
6291 struct ppl_header *ppl_hdr = buf;
6292 int ret;
6293
6294 ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
6295
6296 if (lseek64(fd, ppl_sector * 512, SEEK_SET) < 0) {
6297 ret = -errno;
6298 perror("Failed to seek to PPL header location");
6299 return ret;
6300 }
6301
6302 if (write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
6303 ret = -errno;
6304 perror("Write PPL header failed");
6305 return ret;
6306 }
6307
6308 fsync(fd);
6309
6310 return 0;
6311 }
6312
6313 static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd)
6314 {
6315 struct intel_super *super = st->sb;
6316 void *buf;
6317 struct ppl_header *ppl_hdr;
6318 int ret;
6319
6320 /* first clear entire ppl space */
6321 ret = zero_disk_range(fd, info->ppl_sector, info->ppl_size);
6322 if (ret)
6323 return ret;
6324
6325 ret = posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE);
6326 if (ret) {
6327 pr_err("Failed to allocate PPL header buffer\n");
6328 return -ret;
6329 }
6330
6331 memset(buf, 0, PPL_HEADER_SIZE);
6332 ppl_hdr = buf;
6333 memset(ppl_hdr->reserved, 0xff, PPL_HDR_RESERVED);
6334 ppl_hdr->signature = __cpu_to_le32(super->anchor->orig_family_num);
6335
6336 if (info->mismatch_cnt) {
6337 /*
6338 * We are overwriting an invalid ppl. Make one entry with wrong
6339 * checksum to prevent the kernel from skipping resync.
6340 */
6341 ppl_hdr->entries_count = __cpu_to_le32(1);
6342 ppl_hdr->entries[0].checksum = ~0;
6343 }
6344
6345 ret = write_ppl_header(info->ppl_sector, fd, buf);
6346
6347 free(buf);
6348 return ret;
6349 }
6350
6351 static int is_rebuilding(struct imsm_dev *dev);
6352
6353 static int validate_ppl_imsm(struct supertype *st, struct mdinfo *info,
6354 struct mdinfo *disk)
6355 {
6356 struct intel_super *super = st->sb;
6357 struct dl *d;
6358 void *buf_orig, *buf, *buf_prev = NULL;
6359 int ret = 0;
6360 struct ppl_header *ppl_hdr = NULL;
6361 __u32 crc;
6362 struct imsm_dev *dev;
6363 __u32 idx;
6364 unsigned int i;
6365 unsigned long long ppl_offset = 0;
6366 unsigned long long prev_gen_num = 0;
6367
6368 if (disk->disk.raid_disk < 0)
6369 return 0;
6370
6371 dev = get_imsm_dev(super, info->container_member);
6372 idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_0);
6373 d = get_imsm_dl_disk(super, idx);
6374
6375 if (!d || d->index < 0 || is_failed(&d->disk))
6376 return 0;
6377
6378 if (posix_memalign(&buf_orig, MAX_SECTOR_SIZE, PPL_HEADER_SIZE * 2)) {
6379 pr_err("Failed to allocate PPL header buffer\n");
6380 return -1;
6381 }
6382 buf = buf_orig;
6383
6384 ret = 1;
6385 while (ppl_offset < MULTIPLE_PPL_AREA_SIZE_IMSM) {
6386 void *tmp;
6387
6388 dprintf("Checking potential PPL at offset: %llu\n", ppl_offset);
6389
6390 if (lseek64(d->fd, info->ppl_sector * 512 + ppl_offset,
6391 SEEK_SET) < 0) {
6392 perror("Failed to seek to PPL header location");
6393 ret = -1;
6394 break;
6395 }
6396
6397 if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
6398 perror("Read PPL header failed");
6399 ret = -1;
6400 break;
6401 }
6402
6403 ppl_hdr = buf;
6404
6405 crc = __le32_to_cpu(ppl_hdr->checksum);
6406 ppl_hdr->checksum = 0;
6407
6408 if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
6409 dprintf("Wrong PPL header checksum on %s\n",
6410 d->devname);
6411 break;
6412 }
6413
6414 if (prev_gen_num > __le64_to_cpu(ppl_hdr->generation)) {
6415 /* previous was newest, it was already checked */
6416 break;
6417 }
6418
6419 if ((__le32_to_cpu(ppl_hdr->signature) !=
6420 super->anchor->orig_family_num)) {
6421 dprintf("Wrong PPL header signature on %s\n",
6422 d->devname);
6423 ret = 1;
6424 break;
6425 }
6426
6427 ret = 0;
6428 prev_gen_num = __le64_to_cpu(ppl_hdr->generation);
6429
6430 ppl_offset += PPL_HEADER_SIZE;
6431 for (i = 0; i < __le32_to_cpu(ppl_hdr->entries_count); i++)
6432 ppl_offset +=
6433 __le32_to_cpu(ppl_hdr->entries[i].pp_size);
6434
6435 if (!buf_prev)
6436 buf_prev = buf + PPL_HEADER_SIZE;
6437 tmp = buf_prev;
6438 buf_prev = buf;
6439 buf = tmp;
6440 }
6441
6442 if (buf_prev) {
6443 buf = buf_prev;
6444 ppl_hdr = buf_prev;
6445 }
6446
6447 /*
6448 * Update metadata to use mutliple PPLs area (1MB).
6449 * This is done once for all RAID members
6450 */
6451 if (info->consistency_policy == CONSISTENCY_POLICY_PPL &&
6452 info->ppl_size != (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9)) {
6453 char subarray[20];
6454 struct mdinfo *member_dev;
6455
6456 sprintf(subarray, "%d", info->container_member);
6457
6458 if (mdmon_running(st->container_devnm))
6459 st->update_tail = &st->updates;
6460
6461 if (st->ss->update_subarray(st, subarray, "ppl", NULL)) {
6462 pr_err("Failed to update subarray %s\n",
6463 subarray);
6464 } else {
6465 if (st->update_tail)
6466 flush_metadata_updates(st);
6467 else
6468 st->ss->sync_metadata(st);
6469 info->ppl_size = (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
6470 for (member_dev = info->devs; member_dev;
6471 member_dev = member_dev->next)
6472 member_dev->ppl_size =
6473 (MULTIPLE_PPL_AREA_SIZE_IMSM >> 9);
6474 }
6475 }
6476
6477 if (ret == 1) {
6478 struct imsm_map *map = get_imsm_map(dev, MAP_X);
6479
6480 if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
6481 (map->map_state == IMSM_T_STATE_NORMAL &&
6482 !(dev->vol.dirty & RAIDVOL_DIRTY)) ||
6483 (is_rebuilding(dev) &&
6484 dev->vol.curr_migr_unit == 0 &&
6485 get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_1) != idx))
6486 ret = st->ss->write_init_ppl(st, info, d->fd);
6487 else
6488 info->mismatch_cnt++;
6489 } else if (ret == 0 &&
6490 ppl_hdr->entries_count == 0 &&
6491 is_rebuilding(dev) &&
6492 info->resync_start == 0) {
6493 /*
6494 * The header has no entries - add a single empty entry and
6495 * rewrite the header to prevent the kernel from going into
6496 * resync after an interrupted rebuild.
6497 */
6498 ppl_hdr->entries_count = __cpu_to_le32(1);
6499 ret = write_ppl_header(info->ppl_sector, d->fd, buf);
6500 }
6501
6502 free(buf_orig);
6503
6504 return ret;
6505 }
6506
6507 static int write_init_ppl_imsm_all(struct supertype *st, struct mdinfo *info)
6508 {
6509 struct intel_super *super = st->sb;
6510 struct dl *d;
6511 int ret = 0;
6512
6513 if (info->consistency_policy != CONSISTENCY_POLICY_PPL ||
6514 info->array.level != 5)
6515 return 0;
6516
6517 for (d = super->disks; d ; d = d->next) {
6518 if (d->index < 0 || is_failed(&d->disk))
6519 continue;
6520
6521 ret = st->ss->write_init_ppl(st, info, d->fd);
6522 if (ret)
6523 break;
6524 }
6525
6526 return ret;
6527 }
6528
6529 /*******************************************************************************
6530 * Function: write_init_bitmap_imsm_vol
6531 * Description: Write a bitmap header and prepares the area for the bitmap.
6532 * Parameters:
6533 * st : supertype information
6534 * vol_idx : the volume index to use
6535 *
6536 * Returns:
6537 * 0 : success
6538 * -1 : fail
6539 ******************************************************************************/
6540 static int write_init_bitmap_imsm_vol(struct supertype *st, int vol_idx)
6541 {
6542 struct intel_super *super = st->sb;
6543 int prev_current_vol = super->current_vol;
6544 struct dl *d;
6545 int ret = 0;
6546
6547 super->current_vol = vol_idx;
6548 for (d = super->disks; d; d = d->next) {
6549 if (d->index < 0 || is_failed(&d->disk))
6550 continue;
6551 ret = st->ss->write_bitmap(st, d->fd, NoUpdate);
6552 if (ret)
6553 break;
6554 }
6555 super->current_vol = prev_current_vol;
6556 return ret;
6557 }
6558
6559 /*******************************************************************************
6560 * Function: write_init_bitmap_imsm_all
6561 * Description: Write a bitmap header and prepares the area for the bitmap.
6562 * Operation is executed for volumes with CONSISTENCY_POLICY_BITMAP.
6563 * Parameters:
6564 * st : supertype information
6565 * info : info about the volume where the bitmap should be written
6566 * vol_idx : the volume index to use
6567 *
6568 * Returns:
6569 * 0 : success
6570 * -1 : fail
6571 ******************************************************************************/
6572 static int write_init_bitmap_imsm_all(struct supertype *st, struct mdinfo *info,
6573 int vol_idx)
6574 {
6575 int ret = 0;
6576
6577 if (info && (info->consistency_policy == CONSISTENCY_POLICY_BITMAP))
6578 ret = write_init_bitmap_imsm_vol(st, vol_idx);
6579
6580 return ret;
6581 }
6582
6583 static int write_init_super_imsm(struct supertype *st)
6584 {
6585 struct intel_super *super = st->sb;
6586 int current_vol = super->current_vol;
6587 int rv = 0;
6588 struct mdinfo info;
6589
6590 getinfo_super_imsm(st, &info, NULL);
6591
6592 /* we are done with current_vol reset it to point st at the container */
6593 super->current_vol = -1;
6594
6595 if (st->update_tail) {
6596 /* queue the recently created array / added disk
6597 * as a metadata update */
6598
6599 /* determine if we are creating a volume or adding a disk */
6600 if (current_vol < 0) {
6601 /* in the mgmt (add/remove) disk case we are running
6602 * in mdmon context, so don't close fd's
6603 */
6604 rv = mgmt_disk(st);
6605 } else {
6606 /* adding the second volume to the array */
6607 rv = write_init_ppl_imsm_all(st, &info);
6608 if (!rv)
6609 rv = write_init_bitmap_imsm_all(st, &info, current_vol);
6610 if (!rv)
6611 rv = create_array(st, current_vol);
6612 }
6613 } else {
6614 struct dl *d;
6615 for (d = super->disks; d; d = d->next)
6616 Kill(d->devname, NULL, 0, -1, 1);
6617 if (current_vol >= 0) {
6618 rv = write_init_ppl_imsm_all(st, &info);
6619 if (!rv)
6620 rv = write_init_bitmap_imsm_all(st, &info, current_vol);
6621 }
6622
6623 if (!rv)
6624 rv = write_super_imsm(st, 1);
6625 }
6626
6627 return rv;
6628 }
6629
6630 static int store_super_imsm(struct supertype *st, int fd)
6631 {
6632 struct intel_super *super = st->sb;
6633 struct imsm_super *mpb = super ? super->anchor : NULL;
6634
6635 if (!mpb)
6636 return 1;
6637
6638 if (super->sector_size == 4096)
6639 convert_to_4k(super);
6640 return store_imsm_mpb(fd, mpb);
6641 }
6642
6643 static int validate_geometry_imsm_container(struct supertype *st, int level,
6644 int layout, int raiddisks, int chunk,
6645 unsigned long long size,
6646 unsigned long long data_offset,
6647 char *dev,
6648 unsigned long long *freesize,
6649 int verbose)
6650 {
6651 int fd;
6652 unsigned long long ldsize;
6653 struct intel_super *super;
6654 int rv = 0;
6655
6656 if (level != LEVEL_CONTAINER)
6657 return 0;
6658 if (!dev)
6659 return 1;
6660
6661 fd = open(dev, O_RDONLY|O_EXCL, 0);
6662 if (fd < 0) {
6663 if (verbose > 0)
6664 pr_err("imsm: Cannot open %s: %s\n",
6665 dev, strerror(errno));
6666 return 0;
6667 }
6668 if (!get_dev_size(fd, dev, &ldsize)) {
6669 close(fd);
6670 return 0;
6671 }
6672
6673 /* capabilities retrieve could be possible
6674 * note that there is no fd for the disks in array.
6675 */
6676 super = alloc_super();
6677 if (!super) {
6678 close(fd);
6679 return 0;
6680 }
6681 if (!get_dev_sector_size(fd, NULL, &super->sector_size)) {
6682 close(fd);
6683 free_imsm(super);
6684 return 0;
6685 }
6686
6687 rv = find_intel_hba_capability(fd, super, verbose > 0 ? dev : NULL);
6688 if (rv != 0) {
6689 #if DEBUG
6690 char str[256];
6691 fd2devname(fd, str);
6692 dprintf("fd: %d %s orom: %p rv: %d raiddisk: %d\n",
6693 fd, str, super->orom, rv, raiddisks);
6694 #endif
6695 /* no orom/efi or non-intel hba of the disk */
6696 close(fd);
6697 free_imsm(super);
6698 return 0;
6699 }
6700 close(fd);
6701 if (super->orom) {
6702 if (raiddisks > super->orom->tds) {
6703 if (verbose)
6704 pr_err("%d exceeds maximum number of platform supported disks: %d\n",
6705 raiddisks, super->orom->tds);
6706 free_imsm(super);
6707 return 0;
6708 }
6709 if ((super->orom->attr & IMSM_OROM_ATTR_2TB_DISK) == 0 &&
6710 (ldsize >> 9) >> 32 > 0) {
6711 if (verbose)
6712 pr_err("%s exceeds maximum platform supported size\n", dev);
6713 free_imsm(super);
6714 return 0;
6715 }
6716 }
6717
6718 *freesize = avail_size_imsm(st, ldsize >> 9, data_offset);
6719 free_imsm(super);
6720
6721 return 1;
6722 }
6723
6724 static unsigned long long find_size(struct extent *e, int *idx, int num_extents)
6725 {
6726 const unsigned long long base_start = e[*idx].start;
6727 unsigned long long end = base_start + e[*idx].size;
6728 int i;
6729
6730 if (base_start == end)
6731 return 0;
6732
6733 *idx = *idx + 1;
6734 for (i = *idx; i < num_extents; i++) {
6735 /* extend overlapping extents */
6736 if (e[i].start >= base_start &&
6737 e[i].start <= end) {
6738 if (e[i].size == 0)
6739 return 0;
6740 if (e[i].start + e[i].size > end)
6741 end = e[i].start + e[i].size;
6742 } else if (e[i].start > end) {
6743 *idx = i;
6744 break;
6745 }
6746 }
6747
6748 return end - base_start;
6749 }
6750
6751 static unsigned long long merge_extents(struct intel_super *super, int sum_extents)
6752 {
6753 /* build a composite disk with all known extents and generate a new
6754 * 'maxsize' given the "all disks in an array must share a common start
6755 * offset" constraint
6756 */
6757 struct extent *e = xcalloc(sum_extents, sizeof(*e));
6758 struct dl *dl;
6759 int i, j;
6760 int start_extent;
6761 unsigned long long pos;
6762 unsigned long long start = 0;
6763 unsigned long long maxsize;
6764 unsigned long reserve;
6765
6766 /* coalesce and sort all extents. also, check to see if we need to
6767 * reserve space between member arrays
6768 */
6769 j = 0;
6770 for (dl = super->disks; dl; dl = dl->next) {
6771 if (!dl->e)
6772 continue;
6773 for (i = 0; i < dl->extent_cnt; i++)
6774 e[j++] = dl->e[i];
6775 }
6776 qsort(e, sum_extents, sizeof(*e), cmp_extent);
6777
6778 /* merge extents */
6779 i = 0;
6780 j = 0;
6781 while (i < sum_extents) {
6782 e[j].start = e[i].start;
6783 e[j].size = find_size(e, &i, sum_extents);
6784 j++;
6785 if (e[j-1].size == 0)
6786 break;
6787 }
6788
6789 pos = 0;
6790 maxsize = 0;
6791 start_extent = 0;
6792 i = 0;
6793 do {
6794 unsigned long long esize;
6795
6796 esize = e[i].start - pos;
6797 if (esize >= maxsize) {
6798 maxsize = esize;
6799 start = pos;
6800 start_extent = i;
6801 }
6802 pos = e[i].start + e[i].size;
6803 i++;
6804 } while (e[i-1].size);
6805 free(e);
6806
6807 if (maxsize == 0)
6808 return 0;
6809
6810 /* FIXME assumes volume at offset 0 is the first volume in a
6811 * container
6812 */
6813 if (start_extent > 0)
6814 reserve = IMSM_RESERVED_SECTORS; /* gap between raid regions */
6815 else
6816 reserve = 0;
6817
6818 if (maxsize < reserve)
6819 return 0;
6820
6821 super->create_offset = ~((unsigned long long) 0);
6822 if (start + reserve > super->create_offset)
6823 return 0; /* start overflows create_offset */
6824 super->create_offset = start + reserve;
6825
6826 return maxsize - reserve;
6827 }
6828
6829 static int is_raid_level_supported(const struct imsm_orom *orom, int level, int raiddisks)
6830 {
6831 if (level < 0 || level == 6 || level == 4)
6832 return 0;
6833
6834 /* if we have an orom prevent invalid raid levels */
6835 if (orom)
6836 switch (level) {
6837 case 0: return imsm_orom_has_raid0(orom);
6838 case 1:
6839 if (raiddisks > 2)
6840 return imsm_orom_has_raid1e(orom);
6841 return imsm_orom_has_raid1(orom) && raiddisks == 2;
6842 case 10: return imsm_orom_has_raid10(orom) && raiddisks == 4;
6843 case 5: return imsm_orom_has_raid5(orom) && raiddisks > 2;
6844 }
6845 else
6846 return 1; /* not on an Intel RAID platform so anything goes */
6847
6848 return 0;
6849 }
6850
6851 static int
6852 active_arrays_by_format(char *name, char* hba, struct md_list **devlist,
6853 int dpa, int verbose)
6854 {
6855 struct mdstat_ent *mdstat = mdstat_read(0, 0);
6856 struct mdstat_ent *memb;
6857 int count = 0;
6858 int num = 0;
6859 struct md_list *dv;
6860 int found;
6861
6862 for (memb = mdstat ; memb ; memb = memb->next) {
6863 if (memb->metadata_version &&
6864 (strncmp(memb->metadata_version, "external:", 9) == 0) &&
6865 (strcmp(&memb->metadata_version[9], name) == 0) &&
6866 !is_subarray(memb->metadata_version+9) &&
6867 memb->members) {
6868 struct dev_member *dev = memb->members;
6869 int fd = -1;
6870 while(dev && (fd < 0)) {
6871 char *path = xmalloc(strlen(dev->name) + strlen("/dev/") + 1);
6872 num = sprintf(path, "%s%s", "/dev/", dev->name);
6873 if (num > 0)
6874 fd = open(path, O_RDONLY, 0);
6875 if (num <= 0 || fd < 0) {
6876 pr_vrb("Cannot open %s: %s\n",
6877 dev->name, strerror(errno));
6878 }
6879 free(path);
6880 dev = dev->next;
6881 }
6882 found = 0;
6883 if (fd >= 0 && disk_attached_to_hba(fd, hba)) {
6884 struct mdstat_ent *vol;
6885 for (vol = mdstat ; vol ; vol = vol->next) {
6886 if (vol->active > 0 &&
6887 vol->metadata_version &&
6888 is_container_member(vol, memb->devnm)) {
6889 found++;
6890 count++;
6891 }
6892 }
6893 if (*devlist && (found < dpa)) {
6894 dv = xcalloc(1, sizeof(*dv));
6895 dv->devname = xmalloc(strlen(memb->devnm) + strlen("/dev/") + 1);
6896 sprintf(dv->devname, "%s%s", "/dev/", memb->devnm);
6897 dv->found = found;
6898 dv->used = 0;
6899 dv->next = *devlist;
6900 *devlist = dv;
6901 }
6902 }
6903 if (fd >= 0)
6904 close(fd);
6905 }
6906 }
6907 free_mdstat(mdstat);
6908 return count;
6909 }
6910
6911 #ifdef DEBUG_LOOP
6912 static struct md_list*
6913 get_loop_devices(void)
6914 {
6915 int i;
6916 struct md_list *devlist = NULL;
6917 struct md_list *dv;
6918
6919 for(i = 0; i < 12; i++) {
6920 dv = xcalloc(1, sizeof(*dv));
6921 dv->devname = xmalloc(40);
6922 sprintf(dv->devname, "/dev/loop%d", i);
6923 dv->next = devlist;
6924 devlist = dv;
6925 }
6926 return devlist;
6927 }
6928 #endif
6929
6930 static struct md_list*
6931 get_devices(const char *hba_path)
6932 {
6933 struct md_list *devlist = NULL;
6934 struct md_list *dv;
6935 struct dirent *ent;
6936 DIR *dir;
6937 int err = 0;
6938
6939 #if DEBUG_LOOP
6940 devlist = get_loop_devices();
6941 return devlist;
6942 #endif
6943 /* scroll through /sys/dev/block looking for devices attached to
6944 * this hba
6945 */
6946 dir = opendir("/sys/dev/block");
6947 for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
6948 int fd;
6949 char buf[1024];
6950 int major, minor;
6951 char *path = NULL;
6952 if (sscanf(ent->d_name, "%d:%d", &major, &minor) != 2)
6953 continue;
6954 path = devt_to_devpath(makedev(major, minor));
6955 if (!path)
6956 continue;
6957 if (!path_attached_to_hba(path, hba_path)) {
6958 free(path);
6959 path = NULL;
6960 continue;
6961 }
6962 free(path);
6963 path = NULL;
6964 fd = dev_open(ent->d_name, O_RDONLY);
6965 if (fd >= 0) {
6966 fd2devname(fd, buf);
6967 close(fd);
6968 } else {
6969 pr_err("cannot open device: %s\n",
6970 ent->d_name);
6971 continue;
6972 }
6973
6974 dv = xcalloc(1, sizeof(*dv));
6975 dv->devname = xstrdup(buf);
6976 dv->next = devlist;
6977 devlist = dv;
6978 }
6979 if (err) {
6980 while(devlist) {
6981 dv = devlist;
6982 devlist = devlist->next;
6983 free(dv->devname);
6984 free(dv);
6985 }
6986 }
6987 closedir(dir);
6988 return devlist;
6989 }
6990
6991 static int
6992 count_volumes_list(struct md_list *devlist, char *homehost,
6993 int verbose, int *found)
6994 {
6995 struct md_list *tmpdev;
6996 int count = 0;
6997 struct supertype *st;
6998
6999 /* first walk the list of devices to find a consistent set
7000 * that match the criterea, if that is possible.
7001 * We flag the ones we like with 'used'.
7002 */
7003 *found = 0;
7004 st = match_metadata_desc_imsm("imsm");
7005 if (st == NULL) {
7006 pr_vrb("cannot allocate memory for imsm supertype\n");
7007 return 0;
7008 }
7009
7010 for (tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
7011 char *devname = tmpdev->devname;
7012 dev_t rdev;
7013 struct supertype *tst;
7014 int dfd;
7015 if (tmpdev->used > 1)
7016 continue;
7017 tst = dup_super(st);
7018 if (tst == NULL) {
7019 pr_vrb("cannot allocate memory for imsm supertype\n");
7020 goto err_1;
7021 }
7022 tmpdev->container = 0;
7023 dfd = dev_open(devname, O_RDONLY|O_EXCL);
7024 if (dfd < 0) {
7025 dprintf("cannot open device %s: %s\n",
7026 devname, strerror(errno));
7027 tmpdev->used = 2;
7028 } else if (!fstat_is_blkdev(dfd, devname, &rdev)) {
7029 tmpdev->used = 2;
7030 } else if (must_be_container(dfd)) {
7031 struct supertype *cst;
7032 cst = super_by_fd(dfd, NULL);
7033 if (cst == NULL) {
7034 dprintf("cannot recognize container type %s\n",
7035 devname);
7036 tmpdev->used = 2;
7037 } else if (tst->ss != st->ss) {
7038 dprintf("non-imsm container - ignore it: %s\n",
7039 devname);
7040 tmpdev->used = 2;
7041 } else if (!tst->ss->load_container ||
7042 tst->ss->load_container(tst, dfd, NULL))
7043 tmpdev->used = 2;
7044 else {
7045 tmpdev->container = 1;
7046 }
7047 if (cst)
7048 cst->ss->free_super(cst);
7049 } else {
7050 tmpdev->st_rdev = rdev;
7051 if (tst->ss->load_super(tst,dfd, NULL)) {
7052 dprintf("no RAID superblock on %s\n",
7053 devname);
7054 tmpdev->used = 2;
7055 } else if (tst->ss->compare_super == NULL) {
7056 dprintf("Cannot assemble %s metadata on %s\n",
7057 tst->ss->name, devname);
7058 tmpdev->used = 2;
7059 }
7060 }
7061 if (dfd >= 0)
7062 close(dfd);
7063 if (tmpdev->used == 2 || tmpdev->used == 4) {
7064 /* Ignore unrecognised devices during auto-assembly */
7065 goto loop;
7066 }
7067 else {
7068 struct mdinfo info;
7069 tst->ss->getinfo_super(tst, &info, NULL);
7070
7071 if (st->minor_version == -1)
7072 st->minor_version = tst->minor_version;
7073
7074 if (memcmp(info.uuid, uuid_zero,
7075 sizeof(int[4])) == 0) {
7076 /* this is a floating spare. It cannot define
7077 * an array unless there are no more arrays of
7078 * this type to be found. It can be included
7079 * in an array of this type though.
7080 */
7081 tmpdev->used = 3;
7082 goto loop;
7083 }
7084
7085 if (st->ss != tst->ss ||
7086 st->minor_version != tst->minor_version ||
7087 st->ss->compare_super(st, tst, 1) != 0) {
7088 /* Some mismatch. If exactly one array matches this host,
7089 * we can resolve on that one.
7090 * Or, if we are auto assembling, we just ignore the second
7091 * for now.
7092 */
7093 dprintf("superblock on %s doesn't match others - assembly aborted\n",
7094 devname);
7095 goto loop;
7096 }
7097 tmpdev->used = 1;
7098 *found = 1;
7099 dprintf("found: devname: %s\n", devname);
7100 }
7101 loop:
7102 if (tst)
7103 tst->ss->free_super(tst);
7104 }
7105 if (*found != 0) {
7106 int err;
7107 if ((err = load_super_imsm_all(st, -1, &st->sb, NULL, devlist, 0)) == 0) {
7108 struct mdinfo *iter, *head = st->ss->container_content(st, NULL);
7109 for (iter = head; iter; iter = iter->next) {
7110 dprintf("content->text_version: %s vol\n",
7111 iter->text_version);
7112 if (iter->array.state & (1<<MD_SB_BLOCK_VOLUME)) {
7113 /* do not assemble arrays with unsupported
7114 configurations */
7115 dprintf("Cannot activate member %s.\n",
7116 iter->text_version);
7117 } else
7118 count++;
7119 }
7120 sysfs_free(head);
7121
7122 } else {
7123 dprintf("No valid super block on device list: err: %d %p\n",
7124 err, st->sb);
7125 }
7126 } else {
7127 dprintf("no more devices to examine\n");
7128 }
7129
7130 for (tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
7131 if (tmpdev->used == 1 && tmpdev->found) {
7132 if (count) {
7133 if (count < tmpdev->found)
7134 count = 0;
7135 else
7136 count -= tmpdev->found;
7137 }
7138 }
7139 if (tmpdev->used == 1)
7140 tmpdev->used = 4;
7141 }
7142 err_1:
7143 if (st)
7144 st->ss->free_super(st);
7145 return count;
7146 }
7147
7148 static int __count_volumes(char *hba_path, int dpa, int verbose,
7149 int cmp_hba_path)
7150 {
7151 struct sys_dev *idev, *intel_devices = find_intel_devices();
7152 int count = 0;
7153 const struct orom_entry *entry;
7154 struct devid_list *dv, *devid_list;
7155
7156 if (!hba_path)
7157 return 0;
7158
7159 for (idev = intel_devices; idev; idev = idev->next) {
7160 if (strstr(idev->path, hba_path))
7161 break;
7162 }
7163
7164 if (!idev || !idev->dev_id)
7165 return 0;
7166
7167 entry = get_orom_entry_by_device_id(idev->dev_id);
7168
7169 if (!entry || !entry->devid_list)
7170 return 0;
7171
7172 devid_list = entry->devid_list;
7173 for (dv = devid_list; dv; dv = dv->next) {
7174 struct md_list *devlist;
7175 struct sys_dev *device = NULL;
7176 char *hpath;
7177 int found = 0;
7178
7179 if (cmp_hba_path)
7180 device = device_by_id_and_path(dv->devid, hba_path);
7181 else
7182 device = device_by_id(dv->devid);
7183
7184 if (device)
7185 hpath = device->path;
7186 else
7187 return 0;
7188
7189 devlist = get_devices(hpath);
7190 /* if no intel devices return zero volumes */
7191 if (devlist == NULL)
7192 return 0;
7193
7194 count += active_arrays_by_format("imsm", hpath, &devlist, dpa,
7195 verbose);
7196 dprintf("path: %s active arrays: %d\n", hpath, count);
7197 if (devlist == NULL)
7198 return 0;
7199 do {
7200 found = 0;
7201 count += count_volumes_list(devlist,
7202 NULL,
7203 verbose,
7204 &found);
7205 dprintf("found %d count: %d\n", found, count);
7206 } while (found);
7207
7208 dprintf("path: %s total number of volumes: %d\n", hpath, count);
7209
7210 while (devlist) {
7211 struct md_list *dv = devlist;
7212 devlist = devlist->next;
7213 free(dv->devname);
7214 free(dv);
7215 }
7216 }
7217 return count;
7218 }
7219
7220 static int count_volumes(struct intel_hba *hba, int dpa, int verbose)
7221 {
7222 if (!hba)
7223 return 0;
7224 if (hba->type == SYS_DEV_VMD) {
7225 struct sys_dev *dev;
7226 int count = 0;
7227
7228 for (dev = find_intel_devices(); dev; dev = dev->next) {
7229 if (dev->type == SYS_DEV_VMD)
7230 count += __count_volumes(dev->path, dpa,
7231 verbose, 1);
7232 }
7233 return count;
7234 }
7235 return __count_volumes(hba->path, dpa, verbose, 0);
7236 }
7237
7238 static int imsm_default_chunk(const struct imsm_orom *orom)
7239 {
7240 /* up to 512 if the plaform supports it, otherwise the platform max.
7241 * 128 if no platform detected
7242 */
7243 int fs = max(7, orom ? fls(orom->sss) : 0);
7244
7245 return min(512, (1 << fs));
7246 }
7247
7248 static int
7249 validate_geometry_imsm_orom(struct intel_super *super, int level, int layout,
7250 int raiddisks, int *chunk, unsigned long long size, int verbose)
7251 {
7252 /* check/set platform and metadata limits/defaults */
7253 if (super->orom && raiddisks > super->orom->dpa) {
7254 pr_vrb("platform supports a maximum of %d disks per array\n",
7255 super->orom->dpa);
7256 return 0;
7257 }
7258
7259 /* capabilities of OROM tested - copied from validate_geometry_imsm_volume */
7260 if (!is_raid_level_supported(super->orom, level, raiddisks)) {
7261 pr_vrb("platform does not support raid%d with %d disk%s\n",
7262 level, raiddisks, raiddisks > 1 ? "s" : "");
7263 return 0;
7264 }
7265
7266 if (*chunk == 0 || *chunk == UnSet)
7267 *chunk = imsm_default_chunk(super->orom);
7268
7269 if (super->orom && !imsm_orom_has_chunk(super->orom, *chunk)) {
7270 pr_vrb("platform does not support a chunk size of: %d\n", *chunk);
7271 return 0;
7272 }
7273
7274 if (layout != imsm_level_to_layout(level)) {
7275 if (level == 5)
7276 pr_vrb("imsm raid 5 only supports the left-asymmetric layout\n");
7277 else if (level == 10)
7278 pr_vrb("imsm raid 10 only supports the n2 layout\n");
7279 else
7280 pr_vrb("imsm unknown layout %#x for this raid level %d\n",
7281 layout, level);
7282 return 0;
7283 }
7284
7285 if (super->orom && (super->orom->attr & IMSM_OROM_ATTR_2TB) == 0 &&
7286 (calc_array_size(level, raiddisks, layout, *chunk, size) >> 32) > 0) {
7287 pr_vrb("platform does not support a volume size over 2TB\n");
7288 return 0;
7289 }
7290
7291 return 1;
7292 }
7293
7294 /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
7295 * FIX ME add ahci details
7296 */
7297 static int validate_geometry_imsm_volume(struct supertype *st, int level,
7298 int layout, int raiddisks, int *chunk,
7299 unsigned long long size,
7300 unsigned long long data_offset,
7301 char *dev,
7302 unsigned long long *freesize,
7303 int verbose)
7304 {
7305 dev_t rdev;
7306 struct intel_super *super = st->sb;
7307 struct imsm_super *mpb;
7308 struct dl *dl;
7309 unsigned long long pos = 0;
7310 unsigned long long maxsize;
7311 struct extent *e;
7312 int i;
7313
7314 /* We must have the container info already read in. */
7315 if (!super)
7316 return 0;
7317
7318 mpb = super->anchor;
7319
7320 if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, size, verbose)) {
7321 pr_err("RAID geometry validation failed. Cannot proceed with the action(s).\n");
7322 return 0;
7323 }
7324 if (!dev) {
7325 /* General test: make sure there is space for
7326 * 'raiddisks' device extents of size 'size' at a given
7327 * offset
7328 */
7329 unsigned long long minsize = size;
7330 unsigned long long start_offset = MaxSector;
7331 int dcnt = 0;
7332 if (minsize == 0)
7333 minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
7334 for (dl = super->disks; dl ; dl = dl->next) {
7335 int found = 0;
7336
7337 pos = 0;
7338 i = 0;
7339 e = get_extents(super, dl, 0);
7340 if (!e) continue;
7341 do {
7342 unsigned long long esize;
7343 esize = e[i].start - pos;
7344 if (esize >= minsize)
7345 found = 1;
7346 if (found && start_offset == MaxSector) {
7347 start_offset = pos;
7348 break;
7349 } else if (found && pos != start_offset) {
7350 found = 0;
7351 break;
7352 }
7353 pos = e[i].start + e[i].size;
7354 i++;
7355 } while (e[i-1].size);
7356 if (found)
7357 dcnt++;
7358 free(e);
7359 }
7360 if (dcnt < raiddisks) {
7361 if (verbose)
7362 pr_err("imsm: Not enough devices with space for this array (%d < %d)\n",
7363 dcnt, raiddisks);
7364 return 0;
7365 }
7366 return 1;
7367 }
7368
7369 /* This device must be a member of the set */
7370 if (!stat_is_blkdev(dev, &rdev))
7371 return 0;
7372 for (dl = super->disks ; dl ; dl = dl->next) {
7373 if (dl->major == (int)major(rdev) &&
7374 dl->minor == (int)minor(rdev))
7375 break;
7376 }
7377 if (!dl) {
7378 if (verbose)
7379 pr_err("%s is not in the same imsm set\n", dev);
7380 return 0;
7381 } else if (super->orom && dl->index < 0 && mpb->num_raid_devs) {
7382 /* If a volume is present then the current creation attempt
7383 * cannot incorporate new spares because the orom may not
7384 * understand this configuration (all member disks must be
7385 * members of each array in the container).
7386 */
7387 pr_err("%s is a spare and a volume is already defined for this container\n", dev);
7388 pr_err("The option-rom requires all member disks to be a member of all volumes\n");
7389 return 0;
7390 } else if (super->orom && mpb->num_raid_devs > 0 &&
7391 mpb->num_disks != raiddisks) {
7392 pr_err("The option-rom requires all member disks to be a member of all volumes\n");
7393 return 0;
7394 }
7395
7396 /* retrieve the largest free space block */
7397 e = get_extents(super, dl, 0);
7398 maxsize = 0;
7399 i = 0;
7400 if (e) {
7401 do {
7402 unsigned long long esize;
7403
7404 esize = e[i].start - pos;
7405 if (esize >= maxsize)
7406 maxsize = esize;
7407 pos = e[i].start + e[i].size;
7408 i++;
7409 } while (e[i-1].size);
7410 dl->e = e;
7411 dl->extent_cnt = i;
7412 } else {
7413 if (verbose)
7414 pr_err("unable to determine free space for: %s\n",
7415 dev);
7416 return 0;
7417 }
7418 if (maxsize < size) {
7419 if (verbose)
7420 pr_err("%s not enough space (%llu < %llu)\n",
7421 dev, maxsize, size);
7422 return 0;
7423 }
7424
7425 /* count total number of extents for merge */
7426 i = 0;
7427 for (dl = super->disks; dl; dl = dl->next)
7428 if (dl->e)
7429 i += dl->extent_cnt;
7430
7431 maxsize = merge_extents(super, i);
7432
7433 if (mpb->num_raid_devs > 0 && size && size != maxsize)
7434 pr_err("attempting to create a second volume with size less then remaining space.\n");
7435
7436 if (maxsize < size || maxsize == 0) {
7437 if (verbose) {
7438 if (maxsize == 0)
7439 pr_err("no free space left on device. Aborting...\n");
7440 else
7441 pr_err("not enough space to create volume of given size (%llu < %llu). Aborting...\n",
7442 maxsize, size);
7443 }
7444 return 0;
7445 }
7446
7447 *freesize = maxsize;
7448
7449 if (super->orom) {
7450 int count = count_volumes(super->hba,
7451 super->orom->dpa, verbose);
7452 if (super->orom->vphba <= count) {
7453 pr_vrb("platform does not support more than %d raid volumes.\n",
7454 super->orom->vphba);
7455 return 0;
7456 }
7457 }
7458 return 1;
7459 }
7460
7461 static int imsm_get_free_size(struct supertype *st, int raiddisks,
7462 unsigned long long size, int chunk,
7463 unsigned long long *freesize)
7464 {
7465 struct intel_super *super = st->sb;
7466 struct imsm_super *mpb = super->anchor;
7467 struct dl *dl;
7468 int i;
7469 int extent_cnt;
7470 struct extent *e;
7471 unsigned long long maxsize;
7472 unsigned long long minsize;
7473 int cnt;
7474 int used;
7475
7476 /* find the largest common start free region of the possible disks */
7477 used = 0;
7478 extent_cnt = 0;
7479 cnt = 0;
7480 for (dl = super->disks; dl; dl = dl->next) {
7481 dl->raiddisk = -1;
7482
7483 if (dl->index >= 0)
7484 used++;
7485
7486 /* don't activate new spares if we are orom constrained
7487 * and there is already a volume active in the container
7488 */
7489 if (super->orom && dl->index < 0 && mpb->num_raid_devs)
7490 continue;
7491
7492 e = get_extents(super, dl, 0);
7493 if (!e)
7494 continue;
7495 for (i = 1; e[i-1].size; i++)
7496 ;
7497 dl->e = e;
7498 dl->extent_cnt = i;
7499 extent_cnt += i;
7500 cnt++;
7501 }
7502
7503 maxsize = merge_extents(super, extent_cnt);
7504 minsize = size;
7505 if (size == 0)
7506 /* chunk is in K */
7507 minsize = chunk * 2;
7508
7509 if (cnt < raiddisks ||
7510 (super->orom && used && used != raiddisks) ||
7511 maxsize < minsize ||
7512 maxsize == 0) {
7513 pr_err("not enough devices with space to create array.\n");
7514 return 0; /* No enough free spaces large enough */
7515 }
7516
7517 if (size == 0) {
7518 size = maxsize;
7519 if (chunk) {
7520 size /= 2 * chunk;
7521 size *= 2 * chunk;
7522 }
7523 maxsize = size;
7524 }
7525 if (mpb->num_raid_devs > 0 && size && size != maxsize)
7526 pr_err("attempting to create a second volume with size less then remaining space.\n");
7527 cnt = 0;
7528 for (dl = super->disks; dl; dl = dl->next)
7529 if (dl->e)
7530 dl->raiddisk = cnt++;
7531
7532 *freesize = size;
7533
7534 dprintf("imsm: imsm_get_free_size() returns : %llu\n", size);
7535
7536 return 1;
7537 }
7538
7539 static int reserve_space(struct supertype *st, int raiddisks,
7540 unsigned long long size, int chunk,
7541 unsigned long long *freesize)
7542 {
7543 struct intel_super *super = st->sb;
7544 struct dl *dl;
7545 int cnt;
7546 int rv = 0;
7547
7548 rv = imsm_get_free_size(st, raiddisks, size, chunk, freesize);
7549 if (rv) {
7550 cnt = 0;
7551 for (dl = super->disks; dl; dl = dl->next)
7552 if (dl->e)
7553 dl->raiddisk = cnt++;
7554 rv = 1;
7555 }
7556
7557 return rv;
7558 }
7559
7560 static int validate_geometry_imsm(struct supertype *st, int level, int layout,
7561 int raiddisks, int *chunk, unsigned long long size,
7562 unsigned long long data_offset,
7563 char *dev, unsigned long long *freesize,
7564 int consistency_policy, int verbose)
7565 {
7566 int fd, cfd;
7567 struct mdinfo *sra;
7568 int is_member = 0;
7569
7570 /* load capability
7571 * if given unused devices create a container
7572 * if given given devices in a container create a member volume
7573 */
7574 if (level == LEVEL_CONTAINER) {
7575 /* Must be a fresh device to add to a container */
7576 return validate_geometry_imsm_container(st, level, layout,
7577 raiddisks,
7578 *chunk,
7579 size, data_offset,
7580 dev, freesize,
7581 verbose);
7582 }
7583
7584 /*
7585 * Size is given in sectors.
7586 */
7587 if (size && (size < 2048)) {
7588 pr_err("Given size must be greater than 1M.\n");
7589 /* Depends on algorithm in Create.c :
7590 * if container was given (dev == NULL) return -1,
7591 * if block device was given ( dev != NULL) return 0.
7592 */
7593 return dev ? -1 : 0;
7594 }
7595
7596 if (!dev) {
7597 if (st->sb) {
7598 struct intel_super *super = st->sb;
7599 if (!validate_geometry_imsm_orom(st->sb, level, layout,
7600 raiddisks, chunk, size,
7601 verbose))
7602 return 0;
7603 /* we are being asked to automatically layout a
7604 * new volume based on the current contents of
7605 * the container. If the the parameters can be
7606 * satisfied reserve_space will record the disks,
7607 * start offset, and size of the volume to be
7608 * created. add_to_super and getinfo_super
7609 * detect when autolayout is in progress.
7610 */
7611 /* assuming that freesize is always given when array is
7612 created */
7613 if (super->orom && freesize) {
7614 int count;
7615 count = count_volumes(super->hba,
7616 super->orom->dpa, verbose);
7617 if (super->orom->vphba <= count) {
7618 pr_vrb("platform does not support more than %d raid volumes.\n",
7619 super->orom->vphba);
7620 return 0;
7621 }
7622 }
7623 if (freesize)
7624 return reserve_space(st, raiddisks, size,
7625 *chunk, freesize);
7626 }
7627 return 1;
7628 }
7629 if (st->sb) {
7630 /* creating in a given container */
7631 return validate_geometry_imsm_volume(st, level, layout,
7632 raiddisks, chunk, size,
7633 data_offset,
7634 dev, freesize, verbose);
7635 }
7636
7637 /* This device needs to be a device in an 'imsm' container */
7638 fd = open(dev, O_RDONLY|O_EXCL, 0);
7639 if (fd >= 0) {
7640 if (verbose)
7641 pr_err("Cannot create this array on device %s\n",
7642 dev);
7643 close(fd);
7644 return 0;
7645 }
7646 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
7647 if (verbose)
7648 pr_err("Cannot open %s: %s\n",
7649 dev, strerror(errno));
7650 return 0;
7651 }
7652 /* Well, it is in use by someone, maybe an 'imsm' container. */
7653 cfd = open_container(fd);
7654 close(fd);
7655 if (cfd < 0) {
7656 if (verbose)
7657 pr_err("Cannot use %s: It is busy\n",
7658 dev);
7659 return 0;
7660 }
7661 sra = sysfs_read(cfd, NULL, GET_VERSION);
7662 if (sra && sra->array.major_version == -1 &&
7663 strcmp(sra->text_version, "imsm") == 0)
7664 is_member = 1;
7665 sysfs_free(sra);
7666 if (is_member) {
7667 /* This is a member of a imsm container. Load the container
7668 * and try to create a volume
7669 */
7670 struct intel_super *super;
7671
7672 if (load_super_imsm_all(st, cfd, (void **) &super, NULL, NULL, 1) == 0) {
7673 st->sb = super;
7674 strcpy(st->container_devnm, fd2devnm(cfd));
7675 close(cfd);
7676 return validate_geometry_imsm_volume(st, level, layout,
7677 raiddisks, chunk,
7678 size, data_offset, dev,
7679 freesize, 1)
7680 ? 1 : -1;
7681 }
7682 }
7683
7684 if (verbose)
7685 pr_err("failed container membership check\n");
7686
7687 close(cfd);
7688 return 0;
7689 }
7690
7691 static void default_geometry_imsm(struct supertype *st, int *level, int *layout, int *chunk)
7692 {
7693 struct intel_super *super = st->sb;
7694
7695 if (level && *level == UnSet)
7696 *level = LEVEL_CONTAINER;
7697
7698 if (level && layout && *layout == UnSet)
7699 *layout = imsm_level_to_layout(*level);
7700
7701 if (chunk && (*chunk == UnSet || *chunk == 0))
7702 *chunk = imsm_default_chunk(super->orom);
7703 }
7704
7705 static void handle_missing(struct intel_super *super, struct imsm_dev *dev);
7706
7707 static int kill_subarray_imsm(struct supertype *st, char *subarray_id)
7708 {
7709 /* remove the subarray currently referenced by subarray_id */
7710 __u8 i;
7711 struct intel_dev **dp;
7712 struct intel_super *super = st->sb;
7713 __u8 current_vol = strtoul(subarray_id, NULL, 10);
7714 struct imsm_super *mpb = super->anchor;
7715
7716 if (mpb->num_raid_devs == 0)
7717 return 2;
7718
7719 /* block deletions that would change the uuid of active subarrays
7720 *
7721 * FIXME when immutable ids are available, but note that we'll
7722 * also need to fixup the invalidated/active subarray indexes in
7723 * mdstat
7724 */
7725 for (i = 0; i < mpb->num_raid_devs; i++) {
7726 char subarray[4];
7727
7728 if (i < current_vol)
7729 continue;
7730 sprintf(subarray, "%u", i);
7731 if (is_subarray_active(subarray, st->devnm)) {
7732 pr_err("deleting subarray-%d would change the UUID of active subarray-%d, aborting\n",
7733 current_vol, i);
7734
7735 return 2;
7736 }
7737 }
7738
7739 if (st->update_tail) {
7740 struct imsm_update_kill_array *u = xmalloc(sizeof(*u));
7741
7742 u->type = update_kill_array;
7743 u->dev_idx = current_vol;
7744 append_metadata_update(st, u, sizeof(*u));
7745
7746 return 0;
7747 }
7748
7749 for (dp = &super->devlist; *dp;)
7750 if ((*dp)->index == current_vol) {
7751 *dp = (*dp)->next;
7752 } else {
7753 handle_missing(super, (*dp)->dev);
7754 if ((*dp)->index > current_vol)
7755 (*dp)->index--;
7756 dp = &(*dp)->next;
7757 }
7758
7759 /* no more raid devices, all active components are now spares,
7760 * but of course failed are still failed
7761 */
7762 if (--mpb->num_raid_devs == 0) {
7763 struct dl *d;
7764
7765 for (d = super->disks; d; d = d->next)
7766 if (d->index > -2)
7767 mark_spare(d);
7768 }
7769
7770 super->updates_pending++;
7771
7772 return 0;
7773 }
7774
7775 static int update_subarray_imsm(struct supertype *st, char *subarray,
7776 char *update, struct mddev_ident *ident)
7777 {
7778 /* update the subarray currently referenced by ->current_vol */
7779 struct intel_super *super = st->sb;
7780 struct imsm_super *mpb = super->anchor;
7781
7782 if (strcmp(update, "name") == 0) {
7783 char *name = ident->name;
7784 char *ep;
7785 int vol;
7786
7787 if (is_subarray_active(subarray, st->devnm)) {
7788 pr_err("Unable to update name of active subarray\n");
7789 return 2;
7790 }
7791
7792 if (!check_name(super, name, 0))
7793 return 2;
7794
7795 vol = strtoul(subarray, &ep, 10);
7796 if (*ep != '\0' || vol >= super->anchor->num_raid_devs)
7797 return 2;
7798
7799 if (st->update_tail) {
7800 struct imsm_update_rename_array *u = xmalloc(sizeof(*u));
7801
7802 u->type = update_rename_array;
7803 u->dev_idx = vol;
7804 strncpy((char *) u->name, name, MAX_RAID_SERIAL_LEN);
7805 u->name[MAX_RAID_SERIAL_LEN-1] = '\0';
7806 append_metadata_update(st, u, sizeof(*u));
7807 } else {
7808 struct imsm_dev *dev;
7809 int i, namelen;
7810
7811 dev = get_imsm_dev(super, vol);
7812 memset(dev->volume, '\0', MAX_RAID_SERIAL_LEN);
7813 namelen = min((int)strlen(name), MAX_RAID_SERIAL_LEN);
7814 memcpy(dev->volume, name, namelen);
7815 for (i = 0; i < mpb->num_raid_devs; i++) {
7816 dev = get_imsm_dev(super, i);
7817 handle_missing(super, dev);
7818 }
7819 super->updates_pending++;
7820 }
7821 } else if (strcmp(update, "ppl") == 0 ||
7822 strcmp(update, "no-ppl") == 0) {
7823 int new_policy;
7824 char *ep;
7825 int vol = strtoul(subarray, &ep, 10);
7826
7827 if (*ep != '\0' || vol >= super->anchor->num_raid_devs)
7828 return 2;
7829
7830 if (strcmp(update, "ppl") == 0)
7831 new_policy = RWH_MULTIPLE_DISTRIBUTED;
7832 else
7833 new_policy = RWH_MULTIPLE_OFF;
7834
7835 if (st->update_tail) {
7836 struct imsm_update_rwh_policy *u = xmalloc(sizeof(*u));
7837
7838 u->type = update_rwh_policy;
7839 u->dev_idx = vol;
7840 u->new_policy = new_policy;
7841 append_metadata_update(st, u, sizeof(*u));
7842 } else {
7843 struct imsm_dev *dev;
7844
7845 dev = get_imsm_dev(super, vol);
7846 dev->rwh_policy = new_policy;
7847 super->updates_pending++;
7848 }
7849 } else
7850 return 2;
7851
7852 return 0;
7853 }
7854
7855 static int is_gen_migration(struct imsm_dev *dev)
7856 {
7857 if (dev == NULL)
7858 return 0;
7859
7860 if (!dev->vol.migr_state)
7861 return 0;
7862
7863 if (migr_type(dev) == MIGR_GEN_MIGR)
7864 return 1;
7865
7866 return 0;
7867 }
7868
7869 static int is_rebuilding(struct imsm_dev *dev)
7870 {
7871 struct imsm_map *migr_map;
7872
7873 if (!dev->vol.migr_state)
7874 return 0;
7875
7876 if (migr_type(dev) != MIGR_REBUILD)
7877 return 0;
7878
7879 migr_map = get_imsm_map(dev, MAP_1);
7880
7881 if (migr_map->map_state == IMSM_T_STATE_DEGRADED)
7882 return 1;
7883 else
7884 return 0;
7885 }
7886
7887 static int is_initializing(struct imsm_dev *dev)
7888 {
7889 struct imsm_map *migr_map;
7890
7891 if (!dev->vol.migr_state)
7892 return 0;
7893
7894 if (migr_type(dev) != MIGR_INIT)
7895 return 0;
7896
7897 migr_map = get_imsm_map(dev, MAP_1);
7898
7899 if (migr_map->map_state == IMSM_T_STATE_UNINITIALIZED)
7900 return 1;
7901
7902 return 0;
7903 }
7904
7905 static void update_recovery_start(struct intel_super *super,
7906 struct imsm_dev *dev,
7907 struct mdinfo *array)
7908 {
7909 struct mdinfo *rebuild = NULL;
7910 struct mdinfo *d;
7911 __u32 units;
7912
7913 if (!is_rebuilding(dev))
7914 return;
7915
7916 /* Find the rebuild target, but punt on the dual rebuild case */
7917 for (d = array->devs; d; d = d->next)
7918 if (d->recovery_start == 0) {
7919 if (rebuild)
7920 return;
7921 rebuild = d;
7922 }
7923
7924 if (!rebuild) {
7925 /* (?) none of the disks are marked with
7926 * IMSM_ORD_REBUILD, so assume they are missing and the
7927 * disk_ord_tbl was not correctly updated
7928 */
7929 dprintf("failed to locate out-of-sync disk\n");
7930 return;
7931 }
7932
7933 units = __le32_to_cpu(dev->vol.curr_migr_unit);
7934 rebuild->recovery_start = units * blocks_per_migr_unit(super, dev);
7935 }
7936
7937 static int recover_backup_imsm(struct supertype *st, struct mdinfo *info);
7938
7939 static struct mdinfo *container_content_imsm(struct supertype *st, char *subarray)
7940 {
7941 /* Given a container loaded by load_super_imsm_all,
7942 * extract information about all the arrays into
7943 * an mdinfo tree.
7944 * If 'subarray' is given, just extract info about that array.
7945 *
7946 * For each imsm_dev create an mdinfo, fill it in,
7947 * then look for matching devices in super->disks
7948 * and create appropriate device mdinfo.
7949 */
7950 struct intel_super *super = st->sb;
7951 struct imsm_super *mpb = super->anchor;
7952 struct mdinfo *rest = NULL;
7953 unsigned int i;
7954 int sb_errors = 0;
7955 struct dl *d;
7956 int spare_disks = 0;
7957 int current_vol = super->current_vol;
7958
7959 /* do not assemble arrays when not all attributes are supported */
7960 if (imsm_check_attributes(mpb->attributes) == 0) {
7961 sb_errors = 1;
7962 pr_err("Unsupported attributes in IMSM metadata.Arrays activation is blocked.\n");
7963 }
7964
7965 /* count spare devices, not used in maps
7966 */
7967 for (d = super->disks; d; d = d->next)
7968 if (d->index == -1)
7969 spare_disks++;
7970
7971 for (i = 0; i < mpb->num_raid_devs; i++) {
7972 struct imsm_dev *dev;
7973 struct imsm_map *map;
7974 struct imsm_map *map2;
7975 struct mdinfo *this;
7976 int slot;
7977 int chunk;
7978 char *ep;
7979 int level;
7980
7981 if (subarray &&
7982 (i != strtoul(subarray, &ep, 10) || *ep != '\0'))
7983 continue;
7984
7985 dev = get_imsm_dev(super, i);
7986 map = get_imsm_map(dev, MAP_0);
7987 map2 = get_imsm_map(dev, MAP_1);
7988 level = get_imsm_raid_level(map);
7989
7990 /* do not publish arrays that are in the middle of an
7991 * unsupported migration
7992 */
7993 if (dev->vol.migr_state &&
7994 (migr_type(dev) == MIGR_STATE_CHANGE)) {
7995 pr_err("cannot assemble volume '%.16s': unsupported migration in progress\n",
7996 dev->volume);
7997 continue;
7998 }
7999 /* do not publish arrays that are not support by controller's
8000 * OROM/EFI
8001 */
8002
8003 this = xmalloc(sizeof(*this));
8004
8005 super->current_vol = i;
8006 getinfo_super_imsm_volume(st, this, NULL);
8007 this->next = rest;
8008 chunk = __le16_to_cpu(map->blocks_per_strip) >> 1;
8009 /* mdadm does not support all metadata features- set the bit in all arrays state */
8010 if (!validate_geometry_imsm_orom(super,
8011 level, /* RAID level */
8012 imsm_level_to_layout(level),
8013 map->num_members, /* raid disks */
8014 &chunk, imsm_dev_size(dev),
8015 1 /* verbose */)) {
8016 pr_err("IMSM RAID geometry validation failed. Array %s activation is blocked.\n",
8017 dev->volume);
8018 this->array.state |=
8019 (1<<MD_SB_BLOCK_CONTAINER_RESHAPE) |
8020 (1<<MD_SB_BLOCK_VOLUME);
8021 }
8022
8023 /* if array has bad blocks, set suitable bit in all arrays state */
8024 if (sb_errors)
8025 this->array.state |=
8026 (1<<MD_SB_BLOCK_CONTAINER_RESHAPE) |
8027 (1<<MD_SB_BLOCK_VOLUME);
8028
8029 for (slot = 0 ; slot < map->num_members; slot++) {
8030 unsigned long long recovery_start;
8031 struct mdinfo *info_d;
8032 struct dl *d;
8033 int idx;
8034 int skip;
8035 __u32 ord;
8036 int missing = 0;
8037
8038 skip = 0;
8039 idx = get_imsm_disk_idx(dev, slot, MAP_0);
8040 ord = get_imsm_ord_tbl_ent(dev, slot, MAP_X);
8041 for (d = super->disks; d ; d = d->next)
8042 if (d->index == idx)
8043 break;
8044
8045 recovery_start = MaxSector;
8046 if (d == NULL)
8047 skip = 1;
8048 if (d && is_failed(&d->disk))
8049 skip = 1;
8050 if (!skip && (ord & IMSM_ORD_REBUILD))
8051 recovery_start = 0;
8052 if (!(ord & IMSM_ORD_REBUILD))
8053 this->array.working_disks++;
8054 /*
8055 * if we skip some disks the array will be assmebled degraded;
8056 * reset resync start to avoid a dirty-degraded
8057 * situation when performing the intial sync
8058 */
8059 if (skip)
8060 missing++;
8061
8062 if (!(dev->vol.dirty & RAIDVOL_DIRTY)) {
8063 if ((!able_to_resync(level, missing) ||
8064 recovery_start == 0))
8065 this->resync_start = MaxSector;
8066 } else {
8067 /*
8068 * FIXME handle dirty degraded
8069 */
8070 }
8071
8072 if (skip)
8073 continue;
8074
8075 info_d = xcalloc(1, sizeof(*info_d));
8076 info_d->next = this->devs;
8077 this->devs = info_d;
8078
8079 info_d->disk.number = d->index;
8080 info_d->disk.major = d->major;
8081 info_d->disk.minor = d->minor;
8082 info_d->disk.raid_disk = slot;
8083 info_d->recovery_start = recovery_start;
8084 if (map2) {
8085 if (slot < map2->num_members)
8086 info_d->disk.state = (1 << MD_DISK_ACTIVE);
8087 else
8088 this->array.spare_disks++;
8089 } else {
8090 if (slot < map->num_members)
8091 info_d->disk.state = (1 << MD_DISK_ACTIVE);
8092 else
8093 this->array.spare_disks++;
8094 }
8095
8096 info_d->events = __le32_to_cpu(mpb->generation_num);
8097 info_d->data_offset = pba_of_lba0(map);
8098 info_d->component_size = calc_component_size(map, dev);
8099
8100 if (map->raid_level == 5) {
8101 info_d->ppl_sector = this->ppl_sector;
8102 info_d->ppl_size = this->ppl_size;
8103 if (this->consistency_policy == CONSISTENCY_POLICY_PPL &&
8104 recovery_start == 0)
8105 this->resync_start = 0;
8106 }
8107
8108 info_d->bb.supported = 1;
8109 get_volume_badblocks(super->bbm_log, ord_to_idx(ord),
8110 info_d->data_offset,
8111 info_d->component_size,
8112 &info_d->bb);
8113 }
8114 /* now that the disk list is up-to-date fixup recovery_start */
8115 update_recovery_start(super, dev, this);
8116 this->array.spare_disks += spare_disks;
8117
8118 /* check for reshape */
8119 if (this->reshape_active == 1)
8120 recover_backup_imsm(st, this);
8121 rest = this;
8122 }
8123
8124 super->current_vol = current_vol;
8125 return rest;
8126 }
8127
8128 static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev,
8129 int failed, int look_in_map)
8130 {
8131 struct imsm_map *map;
8132
8133 map = get_imsm_map(dev, look_in_map);
8134
8135 if (!failed)
8136 return map->map_state == IMSM_T_STATE_UNINITIALIZED ?
8137 IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL;
8138
8139 switch (get_imsm_raid_level(map)) {
8140 case 0:
8141 return IMSM_T_STATE_FAILED;
8142 break;
8143 case 1:
8144 if (failed < map->num_members)
8145 return IMSM_T_STATE_DEGRADED;
8146 else
8147 return IMSM_T_STATE_FAILED;
8148 break;
8149 case 10:
8150 {
8151 /**
8152 * check to see if any mirrors have failed, otherwise we
8153 * are degraded. Even numbered slots are mirrored on
8154 * slot+1
8155 */
8156 int i;
8157 /* gcc -Os complains that this is unused */
8158 int insync = insync;
8159
8160 for (i = 0; i < map->num_members; i++) {
8161 __u32 ord = get_imsm_ord_tbl_ent(dev, i, MAP_X);
8162 int idx = ord_to_idx(ord);
8163 struct imsm_disk *disk;
8164
8165 /* reset the potential in-sync count on even-numbered
8166 * slots. num_copies is always 2 for imsm raid10
8167 */
8168 if ((i & 1) == 0)
8169 insync = 2;
8170
8171 disk = get_imsm_disk(super, idx);
8172 if (!disk || is_failed(disk) || ord & IMSM_ORD_REBUILD)
8173 insync--;
8174
8175 /* no in-sync disks left in this mirror the
8176 * array has failed
8177 */
8178 if (insync == 0)
8179 return IMSM_T_STATE_FAILED;
8180 }
8181
8182 return IMSM_T_STATE_DEGRADED;
8183 }
8184 case 5:
8185 if (failed < 2)
8186 return IMSM_T_STATE_DEGRADED;
8187 else
8188 return IMSM_T_STATE_FAILED;
8189 break;
8190 default:
8191 break;
8192 }
8193
8194 return map->map_state;
8195 }
8196
8197 static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev,
8198 int look_in_map)
8199 {
8200 int i;
8201 int failed = 0;
8202 struct imsm_disk *disk;
8203 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8204 struct imsm_map *prev = get_imsm_map(dev, MAP_1);
8205 struct imsm_map *map_for_loop;
8206 __u32 ord;
8207 int idx;
8208 int idx_1;
8209
8210 /* at the beginning of migration we set IMSM_ORD_REBUILD on
8211 * disks that are being rebuilt. New failures are recorded to
8212 * map[0]. So we look through all the disks we started with and
8213 * see if any failures are still present, or if any new ones
8214 * have arrived
8215 */
8216 map_for_loop = map;
8217 if (prev && (map->num_members < prev->num_members))
8218 map_for_loop = prev;
8219
8220 for (i = 0; i < map_for_loop->num_members; i++) {
8221 idx_1 = -255;
8222 /* when MAP_X is passed both maps failures are counted
8223 */
8224 if (prev &&
8225 (look_in_map == MAP_1 || look_in_map == MAP_X) &&
8226 i < prev->num_members) {
8227 ord = __le32_to_cpu(prev->disk_ord_tbl[i]);
8228 idx_1 = ord_to_idx(ord);
8229
8230 disk = get_imsm_disk(super, idx_1);
8231 if (!disk || is_failed(disk) || ord & IMSM_ORD_REBUILD)
8232 failed++;
8233 }
8234 if ((look_in_map == MAP_0 || look_in_map == MAP_X) &&
8235 i < map->num_members) {
8236 ord = __le32_to_cpu(map->disk_ord_tbl[i]);
8237 idx = ord_to_idx(ord);
8238
8239 if (idx != idx_1) {
8240 disk = get_imsm_disk(super, idx);
8241 if (!disk || is_failed(disk) ||
8242 ord & IMSM_ORD_REBUILD)
8243 failed++;
8244 }
8245 }
8246 }
8247
8248 return failed;
8249 }
8250
8251 static int imsm_open_new(struct supertype *c, struct active_array *a,
8252 char *inst)
8253 {
8254 struct intel_super *super = c->sb;
8255 struct imsm_super *mpb = super->anchor;
8256 struct imsm_update_prealloc_bb_mem u;
8257
8258 if (atoi(inst) >= mpb->num_raid_devs) {
8259 pr_err("subarry index %d, out of range\n", atoi(inst));
8260 return -ENODEV;
8261 }
8262
8263 dprintf("imsm: open_new %s\n", inst);
8264 a->info.container_member = atoi(inst);
8265
8266 u.type = update_prealloc_badblocks_mem;
8267 imsm_update_metadata_locally(c, &u, sizeof(u));
8268
8269 return 0;
8270 }
8271
8272 static int is_resyncing(struct imsm_dev *dev)
8273 {
8274 struct imsm_map *migr_map;
8275
8276 if (!dev->vol.migr_state)
8277 return 0;
8278
8279 if (migr_type(dev) == MIGR_INIT ||
8280 migr_type(dev) == MIGR_REPAIR)
8281 return 1;
8282
8283 if (migr_type(dev) == MIGR_GEN_MIGR)
8284 return 0;
8285
8286 migr_map = get_imsm_map(dev, MAP_1);
8287
8288 if (migr_map->map_state == IMSM_T_STATE_NORMAL &&
8289 dev->vol.migr_type != MIGR_GEN_MIGR)
8290 return 1;
8291 else
8292 return 0;
8293 }
8294
8295 /* return true if we recorded new information */
8296 static int mark_failure(struct intel_super *super,
8297 struct imsm_dev *dev, struct imsm_disk *disk, int idx)
8298 {
8299 __u32 ord;
8300 int slot;
8301 struct imsm_map *map;
8302 char buf[MAX_RAID_SERIAL_LEN+3];
8303 unsigned int len, shift = 0;
8304
8305 /* new failures are always set in map[0] */
8306 map = get_imsm_map(dev, MAP_0);
8307
8308 slot = get_imsm_disk_slot(map, idx);
8309 if (slot < 0)
8310 return 0;
8311
8312 ord = __le32_to_cpu(map->disk_ord_tbl[slot]);
8313 if (is_failed(disk) && (ord & IMSM_ORD_REBUILD))
8314 return 0;
8315
8316 memcpy(buf, disk->serial, MAX_RAID_SERIAL_LEN);
8317 buf[MAX_RAID_SERIAL_LEN] = '\000';
8318 strcat(buf, ":0");
8319 if ((len = strlen(buf)) >= MAX_RAID_SERIAL_LEN)
8320 shift = len - MAX_RAID_SERIAL_LEN + 1;
8321 memcpy(disk->serial, &buf[shift], len + 1 - shift);
8322
8323 disk->status |= FAILED_DISK;
8324 set_imsm_ord_tbl_ent(map, slot, idx | IMSM_ORD_REBUILD);
8325 /* mark failures in second map if second map exists and this disk
8326 * in this slot.
8327 * This is valid for migration, initialization and rebuild
8328 */
8329 if (dev->vol.migr_state) {
8330 struct imsm_map *map2 = get_imsm_map(dev, MAP_1);
8331 int slot2 = get_imsm_disk_slot(map2, idx);
8332
8333 if (slot2 < map2->num_members && slot2 >= 0)
8334 set_imsm_ord_tbl_ent(map2, slot2,
8335 idx | IMSM_ORD_REBUILD);
8336 }
8337 if (map->failed_disk_num == 0xff ||
8338 (!is_rebuilding(dev) && map->failed_disk_num > slot))
8339 map->failed_disk_num = slot;
8340
8341 clear_disk_badblocks(super->bbm_log, ord_to_idx(ord));
8342
8343 return 1;
8344 }
8345
8346 static void mark_missing(struct intel_super *super,
8347 struct imsm_dev *dev, struct imsm_disk *disk, int idx)
8348 {
8349 mark_failure(super, dev, disk, idx);
8350
8351 if (disk->scsi_id == __cpu_to_le32(~(__u32)0))
8352 return;
8353
8354 disk->scsi_id = __cpu_to_le32(~(__u32)0);
8355 memmove(&disk->serial[0], &disk->serial[1], MAX_RAID_SERIAL_LEN - 1);
8356 }
8357
8358 static void handle_missing(struct intel_super *super, struct imsm_dev *dev)
8359 {
8360 struct dl *dl;
8361
8362 if (!super->missing)
8363 return;
8364
8365 /* When orom adds replacement for missing disk it does
8366 * not remove entry of missing disk, but just updates map with
8367 * new added disk. So it is not enough just to test if there is
8368 * any missing disk, we have to look if there are any failed disks
8369 * in map to stop migration */
8370
8371 dprintf("imsm: mark missing\n");
8372 /* end process for initialization and rebuild only
8373 */
8374 if (is_gen_migration(dev) == 0) {
8375 int failed = imsm_count_failed(super, dev, MAP_0);
8376
8377 if (failed) {
8378 __u8 map_state;
8379 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8380 struct imsm_map *map1;
8381 int i, ord, ord_map1;
8382 int rebuilt = 1;
8383
8384 for (i = 0; i < map->num_members; i++) {
8385 ord = get_imsm_ord_tbl_ent(dev, i, MAP_0);
8386 if (!(ord & IMSM_ORD_REBUILD))
8387 continue;
8388
8389 map1 = get_imsm_map(dev, MAP_1);
8390 if (!map1)
8391 continue;
8392
8393 ord_map1 = __le32_to_cpu(map1->disk_ord_tbl[i]);
8394 if (ord_map1 & IMSM_ORD_REBUILD)
8395 rebuilt = 0;
8396 }
8397
8398 if (rebuilt) {
8399 map_state = imsm_check_degraded(super, dev,
8400 failed, MAP_0);
8401 end_migration(dev, super, map_state);
8402 }
8403 }
8404 }
8405 for (dl = super->missing; dl; dl = dl->next)
8406 mark_missing(super, dev, &dl->disk, dl->index);
8407 super->updates_pending++;
8408 }
8409
8410 static unsigned long long imsm_set_array_size(struct imsm_dev *dev,
8411 long long new_size)
8412 {
8413 unsigned long long array_blocks;
8414 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8415 int used_disks = imsm_num_data_members(map);
8416
8417 if (used_disks == 0) {
8418 /* when problems occures
8419 * return current array_blocks value
8420 */
8421 array_blocks = imsm_dev_size(dev);
8422
8423 return array_blocks;
8424 }
8425
8426 /* set array size in metadata
8427 */
8428 if (new_size <= 0)
8429 /* OLCE size change is caused by added disks
8430 */
8431 array_blocks = per_dev_array_size(map) * used_disks;
8432 else
8433 /* Online Volume Size Change
8434 * Using available free space
8435 */
8436 array_blocks = new_size;
8437
8438 array_blocks = round_size_to_mb(array_blocks, used_disks);
8439 set_imsm_dev_size(dev, array_blocks);
8440
8441 return array_blocks;
8442 }
8443
8444 static void imsm_set_disk(struct active_array *a, int n, int state);
8445
8446 static void imsm_progress_container_reshape(struct intel_super *super)
8447 {
8448 /* if no device has a migr_state, but some device has a
8449 * different number of members than the previous device, start
8450 * changing the number of devices in this device to match
8451 * previous.
8452 */
8453 struct imsm_super *mpb = super->anchor;
8454 int prev_disks = -1;
8455 int i;
8456 int copy_map_size;
8457
8458 for (i = 0; i < mpb->num_raid_devs; i++) {
8459 struct imsm_dev *dev = get_imsm_dev(super, i);
8460 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8461 struct imsm_map *map2;
8462 int prev_num_members;
8463
8464 if (dev->vol.migr_state)
8465 return;
8466
8467 if (prev_disks == -1)
8468 prev_disks = map->num_members;
8469 if (prev_disks == map->num_members)
8470 continue;
8471
8472 /* OK, this array needs to enter reshape mode.
8473 * i.e it needs a migr_state
8474 */
8475
8476 copy_map_size = sizeof_imsm_map(map);
8477 prev_num_members = map->num_members;
8478 map->num_members = prev_disks;
8479 dev->vol.migr_state = 1;
8480 dev->vol.curr_migr_unit = 0;
8481 set_migr_type(dev, MIGR_GEN_MIGR);
8482 for (i = prev_num_members;
8483 i < map->num_members; i++)
8484 set_imsm_ord_tbl_ent(map, i, i);
8485 map2 = get_imsm_map(dev, MAP_1);
8486 /* Copy the current map */
8487 memcpy(map2, map, copy_map_size);
8488 map2->num_members = prev_num_members;
8489
8490 imsm_set_array_size(dev, -1);
8491 super->clean_migration_record_by_mdmon = 1;
8492 super->updates_pending++;
8493 }
8494 }
8495
8496 /* Handle dirty -> clean transititions, resync and reshape. Degraded and rebuild
8497 * states are handled in imsm_set_disk() with one exception, when a
8498 * resync is stopped due to a new failure this routine will set the
8499 * 'degraded' state for the array.
8500 */
8501 static int imsm_set_array_state(struct active_array *a, int consistent)
8502 {
8503 int inst = a->info.container_member;
8504 struct intel_super *super = a->container->sb;
8505 struct imsm_dev *dev = get_imsm_dev(super, inst);
8506 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8507 int failed = imsm_count_failed(super, dev, MAP_0);
8508 __u8 map_state = imsm_check_degraded(super, dev, failed, MAP_0);
8509 __u32 blocks_per_unit;
8510
8511 if (dev->vol.migr_state &&
8512 dev->vol.migr_type == MIGR_GEN_MIGR) {
8513 /* array state change is blocked due to reshape action
8514 * We might need to
8515 * - abort the reshape (if last_checkpoint is 0 and action!= reshape)
8516 * - finish the reshape (if last_checkpoint is big and action != reshape)
8517 * - update curr_migr_unit
8518 */
8519 if (a->curr_action == reshape) {
8520 /* still reshaping, maybe update curr_migr_unit */
8521 goto mark_checkpoint;
8522 } else {
8523 if (a->last_checkpoint == 0 && a->prev_action == reshape) {
8524 /* for some reason we aborted the reshape.
8525 *
8526 * disable automatic metadata rollback
8527 * user action is required to recover process
8528 */
8529 if (0) {
8530 struct imsm_map *map2 =
8531 get_imsm_map(dev, MAP_1);
8532 dev->vol.migr_state = 0;
8533 set_migr_type(dev, 0);
8534 dev->vol.curr_migr_unit = 0;
8535 memcpy(map, map2,
8536 sizeof_imsm_map(map2));
8537 super->updates_pending++;
8538 }
8539 }
8540 if (a->last_checkpoint >= a->info.component_size) {
8541 unsigned long long array_blocks;
8542 int used_disks;
8543 struct mdinfo *mdi;
8544
8545 used_disks = imsm_num_data_members(map);
8546 if (used_disks > 0) {
8547 array_blocks =
8548 per_dev_array_size(map) *
8549 used_disks;
8550 array_blocks =
8551 round_size_to_mb(array_blocks,
8552 used_disks);
8553 a->info.custom_array_size = array_blocks;
8554 /* encourage manager to update array
8555 * size
8556 */
8557
8558 a->check_reshape = 1;
8559 }
8560 /* finalize online capacity expansion/reshape */
8561 for (mdi = a->info.devs; mdi; mdi = mdi->next)
8562 imsm_set_disk(a,
8563 mdi->disk.raid_disk,
8564 mdi->curr_state);
8565
8566 imsm_progress_container_reshape(super);
8567 }
8568 }
8569 }
8570
8571 /* before we activate this array handle any missing disks */
8572 if (consistent == 2)
8573 handle_missing(super, dev);
8574
8575 if (consistent == 2 &&
8576 (!is_resync_complete(&a->info) ||
8577 map_state != IMSM_T_STATE_NORMAL ||
8578 dev->vol.migr_state))
8579 consistent = 0;
8580
8581 if (is_resync_complete(&a->info)) {
8582 /* complete intialization / resync,
8583 * recovery and interrupted recovery is completed in
8584 * ->set_disk
8585 */
8586 if (is_resyncing(dev)) {
8587 dprintf("imsm: mark resync done\n");
8588 end_migration(dev, super, map_state);
8589 super->updates_pending++;
8590 a->last_checkpoint = 0;
8591 }
8592 } else if ((!is_resyncing(dev) && !failed) &&
8593 (imsm_reshape_blocks_arrays_changes(super) == 0)) {
8594 /* mark the start of the init process if nothing is failed */
8595 dprintf("imsm: mark resync start\n");
8596 if (map->map_state == IMSM_T_STATE_UNINITIALIZED)
8597 migrate(dev, super, IMSM_T_STATE_NORMAL, MIGR_INIT);
8598 else
8599 migrate(dev, super, IMSM_T_STATE_NORMAL, MIGR_REPAIR);
8600 super->updates_pending++;
8601 }
8602
8603 mark_checkpoint:
8604 /* skip checkpointing for general migration,
8605 * it is controlled in mdadm
8606 */
8607 if (is_gen_migration(dev))
8608 goto skip_mark_checkpoint;
8609
8610 /* check if we can update curr_migr_unit from resync_start, recovery_start */
8611 blocks_per_unit = blocks_per_migr_unit(super, dev);
8612 if (blocks_per_unit) {
8613 __u32 units32;
8614 __u64 units;
8615
8616 units = a->last_checkpoint / blocks_per_unit;
8617 units32 = units;
8618
8619 /* check that we did not overflow 32-bits, and that
8620 * curr_migr_unit needs updating
8621 */
8622 if (units32 == units &&
8623 units32 != 0 &&
8624 __le32_to_cpu(dev->vol.curr_migr_unit) != units32) {
8625 dprintf("imsm: mark checkpoint (%u)\n", units32);
8626 dev->vol.curr_migr_unit = __cpu_to_le32(units32);
8627 super->updates_pending++;
8628 }
8629 }
8630
8631 skip_mark_checkpoint:
8632 /* mark dirty / clean */
8633 if (((dev->vol.dirty & RAIDVOL_DIRTY) && consistent) ||
8634 (!(dev->vol.dirty & RAIDVOL_DIRTY) && !consistent)) {
8635 dprintf("imsm: mark '%s'\n", consistent ? "clean" : "dirty");
8636 if (consistent) {
8637 dev->vol.dirty = RAIDVOL_CLEAN;
8638 } else {
8639 dev->vol.dirty = RAIDVOL_DIRTY;
8640 if (dev->rwh_policy == RWH_DISTRIBUTED ||
8641 dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
8642 dev->vol.dirty |= RAIDVOL_DSRECORD_VALID;
8643 }
8644 super->updates_pending++;
8645 }
8646
8647 return consistent;
8648 }
8649
8650 static int imsm_disk_slot_to_ord(struct active_array *a, int slot)
8651 {
8652 int inst = a->info.container_member;
8653 struct intel_super *super = a->container->sb;
8654 struct imsm_dev *dev = get_imsm_dev(super, inst);
8655 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8656
8657 if (slot > map->num_members) {
8658 pr_err("imsm: imsm_disk_slot_to_ord %d out of range 0..%d\n",
8659 slot, map->num_members - 1);
8660 return -1;
8661 }
8662
8663 if (slot < 0)
8664 return -1;
8665
8666 return get_imsm_ord_tbl_ent(dev, slot, MAP_0);
8667 }
8668
8669 static void imsm_set_disk(struct active_array *a, int n, int state)
8670 {
8671 int inst = a->info.container_member;
8672 struct intel_super *super = a->container->sb;
8673 struct imsm_dev *dev = get_imsm_dev(super, inst);
8674 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8675 struct imsm_disk *disk;
8676 struct mdinfo *mdi;
8677 int recovery_not_finished = 0;
8678 int failed;
8679 int ord;
8680 __u8 map_state;
8681 int rebuild_done = 0;
8682 int i;
8683
8684 ord = get_imsm_ord_tbl_ent(dev, n, MAP_X);
8685 if (ord < 0)
8686 return;
8687
8688 dprintf("imsm: set_disk %d:%x\n", n, state);
8689 disk = get_imsm_disk(super, ord_to_idx(ord));
8690
8691 /* check for new failures */
8692 if (disk && (state & DS_FAULTY)) {
8693 if (mark_failure(super, dev, disk, ord_to_idx(ord)))
8694 super->updates_pending++;
8695 }
8696
8697 /* check if in_sync */
8698 if (state & DS_INSYNC && ord & IMSM_ORD_REBUILD && is_rebuilding(dev)) {
8699 struct imsm_map *migr_map = get_imsm_map(dev, MAP_1);
8700
8701 set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord));
8702 rebuild_done = 1;
8703 super->updates_pending++;
8704 }
8705
8706 failed = imsm_count_failed(super, dev, MAP_0);
8707 map_state = imsm_check_degraded(super, dev, failed, MAP_0);
8708
8709 /* check if recovery complete, newly degraded, or failed */
8710 dprintf("imsm: Detected transition to state ");
8711 switch (map_state) {
8712 case IMSM_T_STATE_NORMAL: /* transition to normal state */
8713 dprintf("normal: ");
8714 if (is_rebuilding(dev)) {
8715 dprintf_cont("while rebuilding");
8716 /* check if recovery is really finished */
8717 for (mdi = a->info.devs; mdi ; mdi = mdi->next)
8718 if (mdi->recovery_start != MaxSector) {
8719 recovery_not_finished = 1;
8720 break;
8721 }
8722 if (recovery_not_finished) {
8723 dprintf_cont("\n");
8724 dprintf("Rebuild has not finished yet, state not changed");
8725 if (a->last_checkpoint < mdi->recovery_start) {
8726 a->last_checkpoint = mdi->recovery_start;
8727 super->updates_pending++;
8728 }
8729 break;
8730 }
8731 end_migration(dev, super, map_state);
8732 map->failed_disk_num = ~0;
8733 super->updates_pending++;
8734 a->last_checkpoint = 0;
8735 break;
8736 }
8737 if (is_gen_migration(dev)) {
8738 dprintf_cont("while general migration");
8739 if (a->last_checkpoint >= a->info.component_size)
8740 end_migration(dev, super, map_state);
8741 else
8742 map->map_state = map_state;
8743 map->failed_disk_num = ~0;
8744 super->updates_pending++;
8745 break;
8746 }
8747 break;
8748 case IMSM_T_STATE_DEGRADED: /* transition to degraded state */
8749 dprintf_cont("degraded: ");
8750 if (map->map_state != map_state && !dev->vol.migr_state) {
8751 dprintf_cont("mark degraded");
8752 map->map_state = map_state;
8753 super->updates_pending++;
8754 a->last_checkpoint = 0;
8755 break;
8756 }
8757 if (is_rebuilding(dev)) {
8758 dprintf_cont("while rebuilding ");
8759 if (state & DS_FAULTY) {
8760 dprintf_cont("removing failed drive ");
8761 if (n == map->failed_disk_num) {
8762 dprintf_cont("end migration");
8763 end_migration(dev, super, map_state);
8764 a->last_checkpoint = 0;
8765 } else {
8766 dprintf_cont("fail detected during rebuild, changing map state");
8767 map->map_state = map_state;
8768 }
8769 super->updates_pending++;
8770 }
8771
8772 if (!rebuild_done)
8773 break;
8774
8775 /* check if recovery is really finished */
8776 for (mdi = a->info.devs; mdi ; mdi = mdi->next)
8777 if (mdi->recovery_start != MaxSector) {
8778 recovery_not_finished = 1;
8779 break;
8780 }
8781 if (recovery_not_finished) {
8782 dprintf_cont("\n");
8783 dprintf_cont("Rebuild has not finished yet");
8784 if (a->last_checkpoint < mdi->recovery_start) {
8785 a->last_checkpoint =
8786 mdi->recovery_start;
8787 super->updates_pending++;
8788 }
8789 break;
8790 }
8791
8792 dprintf_cont(" Rebuild done, still degraded");
8793 end_migration(dev, super, map_state);
8794 a->last_checkpoint = 0;
8795 super->updates_pending++;
8796
8797 for (i = 0; i < map->num_members; i++) {
8798 int idx = get_imsm_ord_tbl_ent(dev, i, MAP_0);
8799
8800 if (idx & IMSM_ORD_REBUILD)
8801 map->failed_disk_num = i;
8802 }
8803 super->updates_pending++;
8804 break;
8805 }
8806 if (is_gen_migration(dev)) {
8807 dprintf_cont("while general migration");
8808 if (a->last_checkpoint >= a->info.component_size)
8809 end_migration(dev, super, map_state);
8810 else {
8811 map->map_state = map_state;
8812 manage_second_map(super, dev);
8813 }
8814 super->updates_pending++;
8815 break;
8816 }
8817 if (is_initializing(dev)) {
8818 dprintf_cont("while initialization.");
8819 map->map_state = map_state;
8820 super->updates_pending++;
8821 break;
8822 }
8823 break;
8824 case IMSM_T_STATE_FAILED: /* transition to failed state */
8825 dprintf_cont("failed: ");
8826 if (is_gen_migration(dev)) {
8827 dprintf_cont("while general migration");
8828 map->map_state = map_state;
8829 super->updates_pending++;
8830 break;
8831 }
8832 if (map->map_state != map_state) {
8833 dprintf_cont("mark failed");
8834 end_migration(dev, super, map_state);
8835 super->updates_pending++;
8836 a->last_checkpoint = 0;
8837 break;
8838 }
8839 break;
8840 default:
8841 dprintf_cont("state %i\n", map_state);
8842 }
8843 dprintf_cont("\n");
8844 }
8845
8846 static int store_imsm_mpb(int fd, struct imsm_super *mpb)
8847 {
8848 void *buf = mpb;
8849 __u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
8850 unsigned long long dsize;
8851 unsigned long long sectors;
8852 unsigned int sector_size;
8853
8854 get_dev_sector_size(fd, NULL, &sector_size);
8855 get_dev_size(fd, NULL, &dsize);
8856
8857 if (mpb_size > sector_size) {
8858 /* -1 to account for anchor */
8859 sectors = mpb_sectors(mpb, sector_size) - 1;
8860
8861 /* write the extended mpb to the sectors preceeding the anchor */
8862 if (lseek64(fd, dsize - (sector_size * (2 + sectors)),
8863 SEEK_SET) < 0)
8864 return 1;
8865
8866 if ((unsigned long long)write(fd, buf + sector_size,
8867 sector_size * sectors) != sector_size * sectors)
8868 return 1;
8869 }
8870
8871 /* first block is stored on second to last sector of the disk */
8872 if (lseek64(fd, dsize - (sector_size * 2), SEEK_SET) < 0)
8873 return 1;
8874
8875 if ((unsigned int)write(fd, buf, sector_size) != sector_size)
8876 return 1;
8877
8878 return 0;
8879 }
8880
8881 static void imsm_sync_metadata(struct supertype *container)
8882 {
8883 struct intel_super *super = container->sb;
8884
8885 dprintf("sync metadata: %d\n", super->updates_pending);
8886 if (!super->updates_pending)
8887 return;
8888
8889 write_super_imsm(container, 0);
8890
8891 super->updates_pending = 0;
8892 }
8893
8894 static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a)
8895 {
8896 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
8897 int i = get_imsm_disk_idx(dev, idx, MAP_X);
8898 struct dl *dl;
8899
8900 for (dl = super->disks; dl; dl = dl->next)
8901 if (dl->index == i)
8902 break;
8903
8904 if (dl && is_failed(&dl->disk))
8905 dl = NULL;
8906
8907 if (dl)
8908 dprintf("found %x:%x\n", dl->major, dl->minor);
8909
8910 return dl;
8911 }
8912
8913 static struct dl *imsm_add_spare(struct intel_super *super, int slot,
8914 struct active_array *a, int activate_new,
8915 struct mdinfo *additional_test_list)
8916 {
8917 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
8918 int idx = get_imsm_disk_idx(dev, slot, MAP_X);
8919 struct imsm_super *mpb = super->anchor;
8920 struct imsm_map *map;
8921 unsigned long long pos;
8922 struct mdinfo *d;
8923 struct extent *ex;
8924 int i, j;
8925 int found;
8926 __u32 array_start = 0;
8927 __u32 array_end = 0;
8928 struct dl *dl;
8929 struct mdinfo *test_list;
8930
8931 for (dl = super->disks; dl; dl = dl->next) {
8932 /* If in this array, skip */
8933 for (d = a->info.devs ; d ; d = d->next)
8934 if (d->state_fd >= 0 &&
8935 d->disk.major == dl->major &&
8936 d->disk.minor == dl->minor) {
8937 dprintf("%x:%x already in array\n",
8938 dl->major, dl->minor);
8939 break;
8940 }
8941 if (d)
8942 continue;
8943 test_list = additional_test_list;
8944 while (test_list) {
8945 if (test_list->disk.major == dl->major &&
8946 test_list->disk.minor == dl->minor) {
8947 dprintf("%x:%x already in additional test list\n",
8948 dl->major, dl->minor);
8949 break;
8950 }
8951 test_list = test_list->next;
8952 }
8953 if (test_list)
8954 continue;
8955
8956 /* skip in use or failed drives */
8957 if (is_failed(&dl->disk) || idx == dl->index ||
8958 dl->index == -2) {
8959 dprintf("%x:%x status (failed: %d index: %d)\n",
8960 dl->major, dl->minor, is_failed(&dl->disk), idx);
8961 continue;
8962 }
8963
8964 /* skip pure spares when we are looking for partially
8965 * assimilated drives
8966 */
8967 if (dl->index == -1 && !activate_new)
8968 continue;
8969
8970 if (!drive_validate_sector_size(super, dl))
8971 continue;
8972
8973 /* Does this unused device have the requisite free space?
8974 * It needs to be able to cover all member volumes
8975 */
8976 ex = get_extents(super, dl, 1);
8977 if (!ex) {
8978 dprintf("cannot get extents\n");
8979 continue;
8980 }
8981 for (i = 0; i < mpb->num_raid_devs; i++) {
8982 dev = get_imsm_dev(super, i);
8983 map = get_imsm_map(dev, MAP_0);
8984
8985 /* check if this disk is already a member of
8986 * this array
8987 */
8988 if (get_imsm_disk_slot(map, dl->index) >= 0)
8989 continue;
8990
8991 found = 0;
8992 j = 0;
8993 pos = 0;
8994 array_start = pba_of_lba0(map);
8995 array_end = array_start +
8996 per_dev_array_size(map) - 1;
8997
8998 do {
8999 /* check that we can start at pba_of_lba0 with
9000 * num_data_stripes*blocks_per_stripe of space
9001 */
9002 if (array_start >= pos && array_end < ex[j].start) {
9003 found = 1;
9004 break;
9005 }
9006 pos = ex[j].start + ex[j].size;
9007 j++;
9008 } while (ex[j-1].size);
9009
9010 if (!found)
9011 break;
9012 }
9013
9014 free(ex);
9015 if (i < mpb->num_raid_devs) {
9016 dprintf("%x:%x does not have %u to %u available\n",
9017 dl->major, dl->minor, array_start, array_end);
9018 /* No room */
9019 continue;
9020 }
9021 return dl;
9022 }
9023
9024 return dl;
9025 }
9026
9027 static int imsm_rebuild_allowed(struct supertype *cont, int dev_idx, int failed)
9028 {
9029 struct imsm_dev *dev2;
9030 struct imsm_map *map;
9031 struct dl *idisk;
9032 int slot;
9033 int idx;
9034 __u8 state;
9035
9036 dev2 = get_imsm_dev(cont->sb, dev_idx);
9037 if (dev2) {
9038 state = imsm_check_degraded(cont->sb, dev2, failed, MAP_0);
9039 if (state == IMSM_T_STATE_FAILED) {
9040 map = get_imsm_map(dev2, MAP_0);
9041 if (!map)
9042 return 1;
9043 for (slot = 0; slot < map->num_members; slot++) {
9044 /*
9045 * Check if failed disks are deleted from intel
9046 * disk list or are marked to be deleted
9047 */
9048 idx = get_imsm_disk_idx(dev2, slot, MAP_X);
9049 idisk = get_imsm_dl_disk(cont->sb, idx);
9050 /*
9051 * Do not rebuild the array if failed disks
9052 * from failed sub-array are not removed from
9053 * container.
9054 */
9055 if (idisk &&
9056 is_failed(&idisk->disk) &&
9057 (idisk->action != DISK_REMOVE))
9058 return 0;
9059 }
9060 }
9061 }
9062 return 1;
9063 }
9064
9065 static struct mdinfo *imsm_activate_spare(struct active_array *a,
9066 struct metadata_update **updates)
9067 {
9068 /**
9069 * Find a device with unused free space and use it to replace a
9070 * failed/vacant region in an array. We replace failed regions one a
9071 * array at a time. The result is that a new spare disk will be added
9072 * to the first failed array and after the monitor has finished
9073 * propagating failures the remainder will be consumed.
9074 *
9075 * FIXME add a capability for mdmon to request spares from another
9076 * container.
9077 */
9078
9079 struct intel_super *super = a->container->sb;
9080 int inst = a->info.container_member;
9081 struct imsm_dev *dev = get_imsm_dev(super, inst);
9082 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9083 int failed = a->info.array.raid_disks;
9084 struct mdinfo *rv = NULL;
9085 struct mdinfo *d;
9086 struct mdinfo *di;
9087 struct metadata_update *mu;
9088 struct dl *dl;
9089 struct imsm_update_activate_spare *u;
9090 int num_spares = 0;
9091 int i;
9092 int allowed;
9093
9094 for (d = a->info.devs ; d ; d = d->next) {
9095 if ((d->curr_state & DS_FAULTY) &&
9096 d->state_fd >= 0)
9097 /* wait for Removal to happen */
9098 return NULL;
9099 if (d->state_fd >= 0)
9100 failed--;
9101 }
9102
9103 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
9104 inst, failed, a->info.array.raid_disks, a->info.array.level);
9105
9106 if (imsm_reshape_blocks_arrays_changes(super))
9107 return NULL;
9108
9109 /* Cannot activate another spare if rebuild is in progress already
9110 */
9111 if (is_rebuilding(dev)) {
9112 dprintf("imsm: No spare activation allowed. Rebuild in progress already.\n");
9113 return NULL;
9114 }
9115
9116 if (a->info.array.level == 4)
9117 /* No repair for takeovered array
9118 * imsm doesn't support raid4
9119 */
9120 return NULL;
9121
9122 if (imsm_check_degraded(super, dev, failed, MAP_0) !=
9123 IMSM_T_STATE_DEGRADED)
9124 return NULL;
9125
9126 if (get_imsm_map(dev, MAP_0)->map_state == IMSM_T_STATE_UNINITIALIZED) {
9127 dprintf("imsm: No spare activation allowed. Volume is not initialized.\n");
9128 return NULL;
9129 }
9130
9131 /*
9132 * If there are any failed disks check state of the other volume.
9133 * Block rebuild if the another one is failed until failed disks
9134 * are removed from container.
9135 */
9136 if (failed) {
9137 dprintf("found failed disks in %.*s, check if there anotherfailed sub-array.\n",
9138 MAX_RAID_SERIAL_LEN, dev->volume);
9139 /* check if states of the other volumes allow for rebuild */
9140 for (i = 0; i < super->anchor->num_raid_devs; i++) {
9141 if (i != inst) {
9142 allowed = imsm_rebuild_allowed(a->container,
9143 i, failed);
9144 if (!allowed)
9145 return NULL;
9146 }
9147 }
9148 }
9149
9150 /* For each slot, if it is not working, find a spare */
9151 for (i = 0; i < a->info.array.raid_disks; i++) {
9152 for (d = a->info.devs ; d ; d = d->next)
9153 if (d->disk.raid_disk == i)
9154 break;
9155 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
9156 if (d && (d->state_fd >= 0))
9157 continue;
9158
9159 /*
9160 * OK, this device needs recovery. Try to re-add the
9161 * previous occupant of this slot, if this fails see if
9162 * we can continue the assimilation of a spare that was
9163 * partially assimilated, finally try to activate a new
9164 * spare.
9165 */
9166 dl = imsm_readd(super, i, a);
9167 if (!dl)
9168 dl = imsm_add_spare(super, i, a, 0, rv);
9169 if (!dl)
9170 dl = imsm_add_spare(super, i, a, 1, rv);
9171 if (!dl)
9172 continue;
9173
9174 /* found a usable disk with enough space */
9175 di = xcalloc(1, sizeof(*di));
9176
9177 /* dl->index will be -1 in the case we are activating a
9178 * pristine spare. imsm_process_update() will create a
9179 * new index in this case. Once a disk is found to be
9180 * failed in all member arrays it is kicked from the
9181 * metadata
9182 */
9183 di->disk.number = dl->index;
9184
9185 /* (ab)use di->devs to store a pointer to the device
9186 * we chose
9187 */
9188 di->devs = (struct mdinfo *) dl;
9189
9190 di->disk.raid_disk = i;
9191 di->disk.major = dl->major;
9192 di->disk.minor = dl->minor;
9193 di->disk.state = 0;
9194 di->recovery_start = 0;
9195 di->data_offset = pba_of_lba0(map);
9196 di->component_size = a->info.component_size;
9197 di->container_member = inst;
9198 di->bb.supported = 1;
9199 if (a->info.consistency_policy == CONSISTENCY_POLICY_PPL) {
9200 di->ppl_sector = get_ppl_sector(super, inst);
9201 di->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
9202 }
9203 super->random = random32();
9204 di->next = rv;
9205 rv = di;
9206 num_spares++;
9207 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
9208 i, di->data_offset);
9209 }
9210
9211 if (!rv)
9212 /* No spares found */
9213 return rv;
9214 /* Now 'rv' has a list of devices to return.
9215 * Create a metadata_update record to update the
9216 * disk_ord_tbl for the array
9217 */
9218 mu = xmalloc(sizeof(*mu));
9219 mu->buf = xcalloc(num_spares,
9220 sizeof(struct imsm_update_activate_spare));
9221 mu->space = NULL;
9222 mu->space_list = NULL;
9223 mu->len = sizeof(struct imsm_update_activate_spare) * num_spares;
9224 mu->next = *updates;
9225 u = (struct imsm_update_activate_spare *) mu->buf;
9226
9227 for (di = rv ; di ; di = di->next) {
9228 u->type = update_activate_spare;
9229 u->dl = (struct dl *) di->devs;
9230 di->devs = NULL;
9231 u->slot = di->disk.raid_disk;
9232 u->array = inst;
9233 u->next = u + 1;
9234 u++;
9235 }
9236 (u-1)->next = NULL;
9237 *updates = mu;
9238
9239 return rv;
9240 }
9241
9242 static int disks_overlap(struct intel_super *super, int idx, struct imsm_update_create_array *u)
9243 {
9244 struct imsm_dev *dev = get_imsm_dev(super, idx);
9245 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9246 struct imsm_map *new_map = get_imsm_map(&u->dev, MAP_0);
9247 struct disk_info *inf = get_disk_info(u);
9248 struct imsm_disk *disk;
9249 int i;
9250 int j;
9251
9252 for (i = 0; i < map->num_members; i++) {
9253 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i, MAP_X));
9254 for (j = 0; j < new_map->num_members; j++)
9255 if (serialcmp(disk->serial, inf[j].serial) == 0)
9256 return 1;
9257 }
9258
9259 return 0;
9260 }
9261
9262 static struct dl *get_disk_super(struct intel_super *super, int major, int minor)
9263 {
9264 struct dl *dl;
9265
9266 for (dl = super->disks; dl; dl = dl->next)
9267 if (dl->major == major && dl->minor == minor)
9268 return dl;
9269 return NULL;
9270 }
9271
9272 static int remove_disk_super(struct intel_super *super, int major, int minor)
9273 {
9274 struct dl *prev;
9275 struct dl *dl;
9276
9277 prev = NULL;
9278 for (dl = super->disks; dl; dl = dl->next) {
9279 if (dl->major == major && dl->minor == minor) {
9280 /* remove */
9281 if (prev)
9282 prev->next = dl->next;
9283 else
9284 super->disks = dl->next;
9285 dl->next = NULL;
9286 __free_imsm_disk(dl);
9287 dprintf("removed %x:%x\n", major, minor);
9288 break;
9289 }
9290 prev = dl;
9291 }
9292 return 0;
9293 }
9294
9295 static void imsm_delete(struct intel_super *super, struct dl **dlp, unsigned index);
9296
9297 static int add_remove_disk_update(struct intel_super *super)
9298 {
9299 int check_degraded = 0;
9300 struct dl *disk;
9301
9302 /* add/remove some spares to/from the metadata/contrainer */
9303 while (super->disk_mgmt_list) {
9304 struct dl *disk_cfg;
9305
9306 disk_cfg = super->disk_mgmt_list;
9307 super->disk_mgmt_list = disk_cfg->next;
9308 disk_cfg->next = NULL;
9309
9310 if (disk_cfg->action == DISK_ADD) {
9311 disk_cfg->next = super->disks;
9312 super->disks = disk_cfg;
9313 check_degraded = 1;
9314 dprintf("added %x:%x\n",
9315 disk_cfg->major, disk_cfg->minor);
9316 } else if (disk_cfg->action == DISK_REMOVE) {
9317 dprintf("Disk remove action processed: %x.%x\n",
9318 disk_cfg->major, disk_cfg->minor);
9319 disk = get_disk_super(super,
9320 disk_cfg->major,
9321 disk_cfg->minor);
9322 if (disk) {
9323 /* store action status */
9324 disk->action = DISK_REMOVE;
9325 /* remove spare disks only */
9326 if (disk->index == -1) {
9327 remove_disk_super(super,
9328 disk_cfg->major,
9329 disk_cfg->minor);
9330 } else {
9331 disk_cfg->fd = disk->fd;
9332 disk->fd = -1;
9333 }
9334 }
9335 /* release allocate disk structure */
9336 __free_imsm_disk(disk_cfg);
9337 }
9338 }
9339 return check_degraded;
9340 }
9341
9342 static int apply_reshape_migration_update(struct imsm_update_reshape_migration *u,
9343 struct intel_super *super,
9344 void ***space_list)
9345 {
9346 struct intel_dev *id;
9347 void **tofree = NULL;
9348 int ret_val = 0;
9349
9350 dprintf("(enter)\n");
9351 if (u->subdev < 0 || u->subdev > 1) {
9352 dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
9353 return ret_val;
9354 }
9355 if (space_list == NULL || *space_list == NULL) {
9356 dprintf("imsm: Error: Memory is not allocated\n");
9357 return ret_val;
9358 }
9359
9360 for (id = super->devlist ; id; id = id->next) {
9361 if (id->index == (unsigned)u->subdev) {
9362 struct imsm_dev *dev = get_imsm_dev(super, u->subdev);
9363 struct imsm_map *map;
9364 struct imsm_dev *new_dev =
9365 (struct imsm_dev *)*space_list;
9366 struct imsm_map *migr_map = get_imsm_map(dev, MAP_1);
9367 int to_state;
9368 struct dl *new_disk;
9369
9370 if (new_dev == NULL)
9371 return ret_val;
9372 *space_list = **space_list;
9373 memcpy(new_dev, dev, sizeof_imsm_dev(dev, 0));
9374 map = get_imsm_map(new_dev, MAP_0);
9375 if (migr_map) {
9376 dprintf("imsm: Error: migration in progress");
9377 return ret_val;
9378 }
9379
9380 to_state = map->map_state;
9381 if ((u->new_level == 5) && (map->raid_level == 0)) {
9382 map->num_members++;
9383 /* this should not happen */
9384 if (u->new_disks[0] < 0) {
9385 map->failed_disk_num =
9386 map->num_members - 1;
9387 to_state = IMSM_T_STATE_DEGRADED;
9388 } else
9389 to_state = IMSM_T_STATE_NORMAL;
9390 }
9391 migrate(new_dev, super, to_state, MIGR_GEN_MIGR);
9392 if (u->new_level > -1)
9393 map->raid_level = u->new_level;
9394 migr_map = get_imsm_map(new_dev, MAP_1);
9395 if ((u->new_level == 5) &&
9396 (migr_map->raid_level == 0)) {
9397 int ord = map->num_members - 1;
9398 migr_map->num_members--;
9399 if (u->new_disks[0] < 0)
9400 ord |= IMSM_ORD_REBUILD;
9401 set_imsm_ord_tbl_ent(map,
9402 map->num_members - 1,
9403 ord);
9404 }
9405 id->dev = new_dev;
9406 tofree = (void **)dev;
9407
9408 /* update chunk size
9409 */
9410 if (u->new_chunksize > 0) {
9411 unsigned long long num_data_stripes;
9412 struct imsm_map *dest_map =
9413 get_imsm_map(dev, MAP_0);
9414 int used_disks =
9415 imsm_num_data_members(dest_map);
9416
9417 if (used_disks == 0)
9418 return ret_val;
9419
9420 map->blocks_per_strip =
9421 __cpu_to_le16(u->new_chunksize * 2);
9422 num_data_stripes =
9423 imsm_dev_size(dev) / used_disks;
9424 num_data_stripes /= map->blocks_per_strip;
9425 num_data_stripes /= map->num_domains;
9426 set_num_data_stripes(map, num_data_stripes);
9427 }
9428
9429 /* ensure blocks_per_member has valid value
9430 */
9431 set_blocks_per_member(map,
9432 per_dev_array_size(map) +
9433 NUM_BLOCKS_DIRTY_STRIPE_REGION);
9434
9435 /* add disk
9436 */
9437 if (u->new_level != 5 || migr_map->raid_level != 0 ||
9438 migr_map->raid_level == map->raid_level)
9439 goto skip_disk_add;
9440
9441 if (u->new_disks[0] >= 0) {
9442 /* use passes spare
9443 */
9444 new_disk = get_disk_super(super,
9445 major(u->new_disks[0]),
9446 minor(u->new_disks[0]));
9447 dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
9448 major(u->new_disks[0]),
9449 minor(u->new_disks[0]),
9450 new_disk, new_disk->index);
9451 if (new_disk == NULL)
9452 goto error_disk_add;
9453
9454 new_disk->index = map->num_members - 1;
9455 /* slot to fill in autolayout
9456 */
9457 new_disk->raiddisk = new_disk->index;
9458 new_disk->disk.status |= CONFIGURED_DISK;
9459 new_disk->disk.status &= ~SPARE_DISK;
9460 } else
9461 goto error_disk_add;
9462
9463 skip_disk_add:
9464 *tofree = *space_list;
9465 /* calculate new size
9466 */
9467 imsm_set_array_size(new_dev, -1);
9468
9469 ret_val = 1;
9470 }
9471 }
9472
9473 if (tofree)
9474 *space_list = tofree;
9475 return ret_val;
9476
9477 error_disk_add:
9478 dprintf("Error: imsm: Cannot find disk.\n");
9479 return ret_val;
9480 }
9481
9482 static int apply_size_change_update(struct imsm_update_size_change *u,
9483 struct intel_super *super)
9484 {
9485 struct intel_dev *id;
9486 int ret_val = 0;
9487
9488 dprintf("(enter)\n");
9489 if (u->subdev < 0 || u->subdev > 1) {
9490 dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
9491 return ret_val;
9492 }
9493
9494 for (id = super->devlist ; id; id = id->next) {
9495 if (id->index == (unsigned)u->subdev) {
9496 struct imsm_dev *dev = get_imsm_dev(super, u->subdev);
9497 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9498 int used_disks = imsm_num_data_members(map);
9499 unsigned long long blocks_per_member;
9500 unsigned long long num_data_stripes;
9501 unsigned long long new_size_per_disk;
9502
9503 if (used_disks == 0)
9504 return 0;
9505
9506 /* calculate new size
9507 */
9508 new_size_per_disk = u->new_size / used_disks;
9509 blocks_per_member = new_size_per_disk +
9510 NUM_BLOCKS_DIRTY_STRIPE_REGION;
9511 num_data_stripes = new_size_per_disk /
9512 map->blocks_per_strip;
9513 num_data_stripes /= map->num_domains;
9514 dprintf("(size: %llu, blocks per member: %llu, num_data_stipes: %llu)\n",
9515 u->new_size, new_size_per_disk,
9516 num_data_stripes);
9517 set_blocks_per_member(map, blocks_per_member);
9518 set_num_data_stripes(map, num_data_stripes);
9519 imsm_set_array_size(dev, u->new_size);
9520
9521 ret_val = 1;
9522 break;
9523 }
9524 }
9525
9526 return ret_val;
9527 }
9528
9529 static int prepare_spare_to_activate(struct supertype *st,
9530 struct imsm_update_activate_spare *u)
9531 {
9532 struct intel_super *super = st->sb;
9533 int prev_current_vol = super->current_vol;
9534 struct active_array *a;
9535 int ret = 1;
9536
9537 for (a = st->arrays; a; a = a->next)
9538 /*
9539 * Additional initialization (adding bitmap header, filling
9540 * the bitmap area with '1's to force initial rebuild for a whole
9541 * data-area) is required when adding the spare to the volume
9542 * with write-intent bitmap.
9543 */
9544 if (a->info.container_member == u->array &&
9545 a->info.consistency_policy == CONSISTENCY_POLICY_BITMAP) {
9546 struct dl *dl;
9547
9548 for (dl = super->disks; dl; dl = dl->next)
9549 if (dl == u->dl)
9550 break;
9551 if (!dl)
9552 break;
9553
9554 super->current_vol = u->array;
9555 if (st->ss->write_bitmap(st, dl->fd, NoUpdate))
9556 ret = 0;
9557 super->current_vol = prev_current_vol;
9558 }
9559 return ret;
9560 }
9561
9562 static int apply_update_activate_spare(struct imsm_update_activate_spare *u,
9563 struct intel_super *super,
9564 struct active_array *active_array)
9565 {
9566 struct imsm_super *mpb = super->anchor;
9567 struct imsm_dev *dev = get_imsm_dev(super, u->array);
9568 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9569 struct imsm_map *migr_map;
9570 struct active_array *a;
9571 struct imsm_disk *disk;
9572 __u8 to_state;
9573 struct dl *dl;
9574 unsigned int found;
9575 int failed;
9576 int victim;
9577 int i;
9578 int second_map_created = 0;
9579
9580 for (; u; u = u->next) {
9581 victim = get_imsm_disk_idx(dev, u->slot, MAP_X);
9582
9583 if (victim < 0)
9584 return 0;
9585
9586 for (dl = super->disks; dl; dl = dl->next)
9587 if (dl == u->dl)
9588 break;
9589
9590 if (!dl) {
9591 pr_err("error: imsm_activate_spare passed an unknown disk (index: %d)\n",
9592 u->dl->index);
9593 return 0;
9594 }
9595
9596 /* count failures (excluding rebuilds and the victim)
9597 * to determine map[0] state
9598 */
9599 failed = 0;
9600 for (i = 0; i < map->num_members; i++) {
9601 if (i == u->slot)
9602 continue;
9603 disk = get_imsm_disk(super,
9604 get_imsm_disk_idx(dev, i, MAP_X));
9605 if (!disk || is_failed(disk))
9606 failed++;
9607 }
9608
9609 /* adding a pristine spare, assign a new index */
9610 if (dl->index < 0) {
9611 dl->index = super->anchor->num_disks;
9612 super->anchor->num_disks++;
9613 }
9614 disk = &dl->disk;
9615 disk->status |= CONFIGURED_DISK;
9616 disk->status &= ~SPARE_DISK;
9617
9618 /* mark rebuild */
9619 to_state = imsm_check_degraded(super, dev, failed, MAP_0);
9620 if (!second_map_created) {
9621 second_map_created = 1;
9622 map->map_state = IMSM_T_STATE_DEGRADED;
9623 migrate(dev, super, to_state, MIGR_REBUILD);
9624 } else
9625 map->map_state = to_state;
9626 migr_map = get_imsm_map(dev, MAP_1);
9627 set_imsm_ord_tbl_ent(map, u->slot, dl->index);
9628 set_imsm_ord_tbl_ent(migr_map, u->slot,
9629 dl->index | IMSM_ORD_REBUILD);
9630
9631 /* update the family_num to mark a new container
9632 * generation, being careful to record the existing
9633 * family_num in orig_family_num to clean up after
9634 * earlier mdadm versions that neglected to set it.
9635 */
9636 if (mpb->orig_family_num == 0)
9637 mpb->orig_family_num = mpb->family_num;
9638 mpb->family_num += super->random;
9639
9640 /* count arrays using the victim in the metadata */
9641 found = 0;
9642 for (a = active_array; a ; a = a->next) {
9643 dev = get_imsm_dev(super, a->info.container_member);
9644 map = get_imsm_map(dev, MAP_0);
9645
9646 if (get_imsm_disk_slot(map, victim) >= 0)
9647 found++;
9648 }
9649
9650 /* delete the victim if it is no longer being
9651 * utilized anywhere
9652 */
9653 if (!found) {
9654 struct dl **dlp;
9655
9656 /* We know that 'manager' isn't touching anything,
9657 * so it is safe to delete
9658 */
9659 for (dlp = &super->disks; *dlp; dlp = &(*dlp)->next)
9660 if ((*dlp)->index == victim)
9661 break;
9662
9663 /* victim may be on the missing list */
9664 if (!*dlp)
9665 for (dlp = &super->missing; *dlp;
9666 dlp = &(*dlp)->next)
9667 if ((*dlp)->index == victim)
9668 break;
9669 imsm_delete(super, dlp, victim);
9670 }
9671 }
9672
9673 return 1;
9674 }
9675
9676 static int apply_reshape_container_disks_update(struct imsm_update_reshape *u,
9677 struct intel_super *super,
9678 void ***space_list)
9679 {
9680 struct dl *new_disk;
9681 struct intel_dev *id;
9682 int i;
9683 int delta_disks = u->new_raid_disks - u->old_raid_disks;
9684 int disk_count = u->old_raid_disks;
9685 void **tofree = NULL;
9686 int devices_to_reshape = 1;
9687 struct imsm_super *mpb = super->anchor;
9688 int ret_val = 0;
9689 unsigned int dev_id;
9690
9691 dprintf("(enter)\n");
9692
9693 /* enable spares to use in array */
9694 for (i = 0; i < delta_disks; i++) {
9695 new_disk = get_disk_super(super,
9696 major(u->new_disks[i]),
9697 minor(u->new_disks[i]));
9698 dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
9699 major(u->new_disks[i]), minor(u->new_disks[i]),
9700 new_disk, new_disk->index);
9701 if (new_disk == NULL ||
9702 (new_disk->index >= 0 &&
9703 new_disk->index < u->old_raid_disks))
9704 goto update_reshape_exit;
9705 new_disk->index = disk_count++;
9706 /* slot to fill in autolayout
9707 */
9708 new_disk->raiddisk = new_disk->index;
9709 new_disk->disk.status |=
9710 CONFIGURED_DISK;
9711 new_disk->disk.status &= ~SPARE_DISK;
9712 }
9713
9714 dprintf("imsm: volume set mpb->num_raid_devs = %i\n",
9715 mpb->num_raid_devs);
9716 /* manage changes in volume
9717 */
9718 for (dev_id = 0; dev_id < mpb->num_raid_devs; dev_id++) {
9719 void **sp = *space_list;
9720 struct imsm_dev *newdev;
9721 struct imsm_map *newmap, *oldmap;
9722
9723 for (id = super->devlist ; id; id = id->next) {
9724 if (id->index == dev_id)
9725 break;
9726 }
9727 if (id == NULL)
9728 break;
9729 if (!sp)
9730 continue;
9731 *space_list = *sp;
9732 newdev = (void*)sp;
9733 /* Copy the dev, but not (all of) the map */
9734 memcpy(newdev, id->dev, sizeof(*newdev));
9735 oldmap = get_imsm_map(id->dev, MAP_0);
9736 newmap = get_imsm_map(newdev, MAP_0);
9737 /* Copy the current map */
9738 memcpy(newmap, oldmap, sizeof_imsm_map(oldmap));
9739 /* update one device only
9740 */
9741 if (devices_to_reshape) {
9742 dprintf("imsm: modifying subdev: %i\n",
9743 id->index);
9744 devices_to_reshape--;
9745 newdev->vol.migr_state = 1;
9746 newdev->vol.curr_migr_unit = 0;
9747 set_migr_type(newdev, MIGR_GEN_MIGR);
9748 newmap->num_members = u->new_raid_disks;
9749 for (i = 0; i < delta_disks; i++) {
9750 set_imsm_ord_tbl_ent(newmap,
9751 u->old_raid_disks + i,
9752 u->old_raid_disks + i);
9753 }
9754 /* New map is correct, now need to save old map
9755 */
9756 newmap = get_imsm_map(newdev, MAP_1);
9757 memcpy(newmap, oldmap, sizeof_imsm_map(oldmap));
9758
9759 imsm_set_array_size(newdev, -1);
9760 }
9761
9762 sp = (void **)id->dev;
9763 id->dev = newdev;
9764 *sp = tofree;
9765 tofree = sp;
9766
9767 /* Clear migration record */
9768 memset(super->migr_rec, 0, sizeof(struct migr_record));
9769 }
9770 if (tofree)
9771 *space_list = tofree;
9772 ret_val = 1;
9773
9774 update_reshape_exit:
9775
9776 return ret_val;
9777 }
9778
9779 static int apply_takeover_update(struct imsm_update_takeover *u,
9780 struct intel_super *super,
9781 void ***space_list)
9782 {
9783 struct imsm_dev *dev = NULL;
9784 struct intel_dev *dv;
9785 struct imsm_dev *dev_new;
9786 struct imsm_map *map;
9787 struct dl *dm, *du;
9788 int i;
9789
9790 for (dv = super->devlist; dv; dv = dv->next)
9791 if (dv->index == (unsigned int)u->subarray) {
9792 dev = dv->dev;
9793 break;
9794 }
9795
9796 if (dev == NULL)
9797 return 0;
9798
9799 map = get_imsm_map(dev, MAP_0);
9800
9801 if (u->direction == R10_TO_R0) {
9802 unsigned long long num_data_stripes;
9803
9804 /* Number of failed disks must be half of initial disk number */
9805 if (imsm_count_failed(super, dev, MAP_0) !=
9806 (map->num_members / 2))
9807 return 0;
9808
9809 /* iterate through devices to mark removed disks as spare */
9810 for (dm = super->disks; dm; dm = dm->next) {
9811 if (dm->disk.status & FAILED_DISK) {
9812 int idx = dm->index;
9813 /* update indexes on the disk list */
9814 /* FIXME this loop-with-the-loop looks wrong, I'm not convinced
9815 the index values will end up being correct.... NB */
9816 for (du = super->disks; du; du = du->next)
9817 if (du->index > idx)
9818 du->index--;
9819 /* mark as spare disk */
9820 mark_spare(dm);
9821 }
9822 }
9823 /* update map */
9824 map->num_members = map->num_members / 2;
9825 map->map_state = IMSM_T_STATE_NORMAL;
9826 map->num_domains = 1;
9827 map->raid_level = 0;
9828 map->failed_disk_num = -1;
9829 num_data_stripes = imsm_dev_size(dev) / 2;
9830 num_data_stripes /= map->blocks_per_strip;
9831 set_num_data_stripes(map, num_data_stripes);
9832 }
9833
9834 if (u->direction == R0_TO_R10) {
9835 void **space;
9836 unsigned long long num_data_stripes;
9837
9838 /* update slots in current disk list */
9839 for (dm = super->disks; dm; dm = dm->next) {
9840 if (dm->index >= 0)
9841 dm->index *= 2;
9842 }
9843 /* create new *missing* disks */
9844 for (i = 0; i < map->num_members; i++) {
9845 space = *space_list;
9846 if (!space)
9847 continue;
9848 *space_list = *space;
9849 du = (void *)space;
9850 memcpy(du, super->disks, sizeof(*du));
9851 du->fd = -1;
9852 du->minor = 0;
9853 du->major = 0;
9854 du->index = (i * 2) + 1;
9855 sprintf((char *)du->disk.serial,
9856 " MISSING_%d", du->index);
9857 sprintf((char *)du->serial,
9858 "MISSING_%d", du->index);
9859 du->next = super->missing;
9860 super->missing = du;
9861 }
9862 /* create new dev and map */
9863 space = *space_list;
9864 if (!space)
9865 return 0;
9866 *space_list = *space;
9867 dev_new = (void *)space;
9868 memcpy(dev_new, dev, sizeof(*dev));
9869 /* update new map */
9870 map = get_imsm_map(dev_new, MAP_0);
9871 map->num_members = map->num_members * 2;
9872 map->map_state = IMSM_T_STATE_DEGRADED;
9873 map->num_domains = 2;
9874 map->raid_level = 1;
9875 num_data_stripes = imsm_dev_size(dev) / 2;
9876 num_data_stripes /= map->blocks_per_strip;
9877 num_data_stripes /= map->num_domains;
9878 set_num_data_stripes(map, num_data_stripes);
9879
9880 /* replace dev<->dev_new */
9881 dv->dev = dev_new;
9882 }
9883 /* update disk order table */
9884 for (du = super->disks; du; du = du->next)
9885 if (du->index >= 0)
9886 set_imsm_ord_tbl_ent(map, du->index, du->index);
9887 for (du = super->missing; du; du = du->next)
9888 if (du->index >= 0) {
9889 set_imsm_ord_tbl_ent(map, du->index, du->index);
9890 mark_missing(super, dv->dev, &du->disk, du->index);
9891 }
9892
9893 return 1;
9894 }
9895
9896 static void imsm_process_update(struct supertype *st,
9897 struct metadata_update *update)
9898 {
9899 /**
9900 * crack open the metadata_update envelope to find the update record
9901 * update can be one of:
9902 * update_reshape_container_disks - all the arrays in the container
9903 * are being reshaped to have more devices. We need to mark
9904 * the arrays for general migration and convert selected spares
9905 * into active devices.
9906 * update_activate_spare - a spare device has replaced a failed
9907 * device in an array, update the disk_ord_tbl. If this disk is
9908 * present in all member arrays then also clear the SPARE_DISK
9909 * flag
9910 * update_create_array
9911 * update_kill_array
9912 * update_rename_array
9913 * update_add_remove_disk
9914 */
9915 struct intel_super *super = st->sb;
9916 struct imsm_super *mpb;
9917 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
9918
9919 /* update requires a larger buf but the allocation failed */
9920 if (super->next_len && !super->next_buf) {
9921 super->next_len = 0;
9922 return;
9923 }
9924
9925 if (super->next_buf) {
9926 memcpy(super->next_buf, super->buf, super->len);
9927 free(super->buf);
9928 super->len = super->next_len;
9929 super->buf = super->next_buf;
9930
9931 super->next_len = 0;
9932 super->next_buf = NULL;
9933 }
9934
9935 mpb = super->anchor;
9936
9937 switch (type) {
9938 case update_general_migration_checkpoint: {
9939 struct intel_dev *id;
9940 struct imsm_update_general_migration_checkpoint *u =
9941 (void *)update->buf;
9942
9943 dprintf("called for update_general_migration_checkpoint\n");
9944
9945 /* find device under general migration */
9946 for (id = super->devlist ; id; id = id->next) {
9947 if (is_gen_migration(id->dev)) {
9948 id->dev->vol.curr_migr_unit =
9949 __cpu_to_le32(u->curr_migr_unit);
9950 super->updates_pending++;
9951 }
9952 }
9953 break;
9954 }
9955 case update_takeover: {
9956 struct imsm_update_takeover *u = (void *)update->buf;
9957 if (apply_takeover_update(u, super, &update->space_list)) {
9958 imsm_update_version_info(super);
9959 super->updates_pending++;
9960 }
9961 break;
9962 }
9963
9964 case update_reshape_container_disks: {
9965 struct imsm_update_reshape *u = (void *)update->buf;
9966 if (apply_reshape_container_disks_update(
9967 u, super, &update->space_list))
9968 super->updates_pending++;
9969 break;
9970 }
9971 case update_reshape_migration: {
9972 struct imsm_update_reshape_migration *u = (void *)update->buf;
9973 if (apply_reshape_migration_update(
9974 u, super, &update->space_list))
9975 super->updates_pending++;
9976 break;
9977 }
9978 case update_size_change: {
9979 struct imsm_update_size_change *u = (void *)update->buf;
9980 if (apply_size_change_update(u, super))
9981 super->updates_pending++;
9982 break;
9983 }
9984 case update_activate_spare: {
9985 struct imsm_update_activate_spare *u = (void *) update->buf;
9986
9987 if (prepare_spare_to_activate(st, u) &&
9988 apply_update_activate_spare(u, super, st->arrays))
9989 super->updates_pending++;
9990 break;
9991 }
9992 case update_create_array: {
9993 /* someone wants to create a new array, we need to be aware of
9994 * a few races/collisions:
9995 * 1/ 'Create' called by two separate instances of mdadm
9996 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
9997 * devices that have since been assimilated via
9998 * activate_spare.
9999 * In the event this update can not be carried out mdadm will
10000 * (FIX ME) notice that its update did not take hold.
10001 */
10002 struct imsm_update_create_array *u = (void *) update->buf;
10003 struct intel_dev *dv;
10004 struct imsm_dev *dev;
10005 struct imsm_map *map, *new_map;
10006 unsigned long long start, end;
10007 unsigned long long new_start, new_end;
10008 int i;
10009 struct disk_info *inf;
10010 struct dl *dl;
10011
10012 /* handle racing creates: first come first serve */
10013 if (u->dev_idx < mpb->num_raid_devs) {
10014 dprintf("subarray %d already defined\n", u->dev_idx);
10015 goto create_error;
10016 }
10017
10018 /* check update is next in sequence */
10019 if (u->dev_idx != mpb->num_raid_devs) {
10020 dprintf("can not create array %d expected index %d\n",
10021 u->dev_idx, mpb->num_raid_devs);
10022 goto create_error;
10023 }
10024
10025 new_map = get_imsm_map(&u->dev, MAP_0);
10026 new_start = pba_of_lba0(new_map);
10027 new_end = new_start + per_dev_array_size(new_map);
10028 inf = get_disk_info(u);
10029
10030 /* handle activate_spare versus create race:
10031 * check to make sure that overlapping arrays do not include
10032 * overalpping disks
10033 */
10034 for (i = 0; i < mpb->num_raid_devs; i++) {
10035 dev = get_imsm_dev(super, i);
10036 map = get_imsm_map(dev, MAP_0);
10037 start = pba_of_lba0(map);
10038 end = start + per_dev_array_size(map);
10039 if ((new_start >= start && new_start <= end) ||
10040 (start >= new_start && start <= new_end))
10041 /* overlap */;
10042 else
10043 continue;
10044
10045 if (disks_overlap(super, i, u)) {
10046 dprintf("arrays overlap\n");
10047 goto create_error;
10048 }
10049 }
10050
10051 /* check that prepare update was successful */
10052 if (!update->space) {
10053 dprintf("prepare update failed\n");
10054 goto create_error;
10055 }
10056
10057 /* check that all disks are still active before committing
10058 * changes. FIXME: could we instead handle this by creating a
10059 * degraded array? That's probably not what the user expects,
10060 * so better to drop this update on the floor.
10061 */
10062 for (i = 0; i < new_map->num_members; i++) {
10063 dl = serial_to_dl(inf[i].serial, super);
10064 if (!dl) {
10065 dprintf("disk disappeared\n");
10066 goto create_error;
10067 }
10068 }
10069
10070 super->updates_pending++;
10071
10072 /* convert spares to members and fixup ord_tbl */
10073 for (i = 0; i < new_map->num_members; i++) {
10074 dl = serial_to_dl(inf[i].serial, super);
10075 if (dl->index == -1) {
10076 dl->index = mpb->num_disks;
10077 mpb->num_disks++;
10078 dl->disk.status |= CONFIGURED_DISK;
10079 dl->disk.status &= ~SPARE_DISK;
10080 }
10081 set_imsm_ord_tbl_ent(new_map, i, dl->index);
10082 }
10083
10084 dv = update->space;
10085 dev = dv->dev;
10086 update->space = NULL;
10087 imsm_copy_dev(dev, &u->dev);
10088 dv->index = u->dev_idx;
10089 dv->next = super->devlist;
10090 super->devlist = dv;
10091 mpb->num_raid_devs++;
10092
10093 imsm_update_version_info(super);
10094 break;
10095 create_error:
10096 /* mdmon knows how to release update->space, but not
10097 * ((struct intel_dev *) update->space)->dev
10098 */
10099 if (update->space) {
10100 dv = update->space;
10101 free(dv->dev);
10102 }
10103 break;
10104 }
10105 case update_kill_array: {
10106 struct imsm_update_kill_array *u = (void *) update->buf;
10107 int victim = u->dev_idx;
10108 struct active_array *a;
10109 struct intel_dev **dp;
10110 struct imsm_dev *dev;
10111
10112 /* sanity check that we are not affecting the uuid of
10113 * active arrays, or deleting an active array
10114 *
10115 * FIXME when immutable ids are available, but note that
10116 * we'll also need to fixup the invalidated/active
10117 * subarray indexes in mdstat
10118 */
10119 for (a = st->arrays; a; a = a->next)
10120 if (a->info.container_member >= victim)
10121 break;
10122 /* by definition if mdmon is running at least one array
10123 * is active in the container, so checking
10124 * mpb->num_raid_devs is just extra paranoia
10125 */
10126 dev = get_imsm_dev(super, victim);
10127 if (a || !dev || mpb->num_raid_devs == 1) {
10128 dprintf("failed to delete subarray-%d\n", victim);
10129 break;
10130 }
10131
10132 for (dp = &super->devlist; *dp;)
10133 if ((*dp)->index == (unsigned)super->current_vol) {
10134 *dp = (*dp)->next;
10135 } else {
10136 if ((*dp)->index > (unsigned)victim)
10137 (*dp)->index--;
10138 dp = &(*dp)->next;
10139 }
10140 mpb->num_raid_devs--;
10141 super->updates_pending++;
10142 break;
10143 }
10144 case update_rename_array: {
10145 struct imsm_update_rename_array *u = (void *) update->buf;
10146 char name[MAX_RAID_SERIAL_LEN+1];
10147 int target = u->dev_idx;
10148 struct active_array *a;
10149 struct imsm_dev *dev;
10150
10151 /* sanity check that we are not affecting the uuid of
10152 * an active array
10153 */
10154 memset(name, 0, sizeof(name));
10155 snprintf(name, MAX_RAID_SERIAL_LEN, "%s", (char *) u->name);
10156 name[MAX_RAID_SERIAL_LEN] = '\0';
10157 for (a = st->arrays; a; a = a->next)
10158 if (a->info.container_member == target)
10159 break;
10160 dev = get_imsm_dev(super, u->dev_idx);
10161 if (a || !dev || !check_name(super, name, 1)) {
10162 dprintf("failed to rename subarray-%d\n", target);
10163 break;
10164 }
10165
10166 memcpy(dev->volume, name, MAX_RAID_SERIAL_LEN);
10167 super->updates_pending++;
10168 break;
10169 }
10170 case update_add_remove_disk: {
10171 /* we may be able to repair some arrays if disks are
10172 * being added, check the status of add_remove_disk
10173 * if discs has been added.
10174 */
10175 if (add_remove_disk_update(super)) {
10176 struct active_array *a;
10177
10178 super->updates_pending++;
10179 for (a = st->arrays; a; a = a->next)
10180 a->check_degraded = 1;
10181 }
10182 break;
10183 }
10184 case update_prealloc_badblocks_mem:
10185 break;
10186 case update_rwh_policy: {
10187 struct imsm_update_rwh_policy *u = (void *)update->buf;
10188 int target = u->dev_idx;
10189 struct imsm_dev *dev = get_imsm_dev(super, target);
10190 if (!dev) {
10191 dprintf("could not find subarray-%d\n", target);
10192 break;
10193 }
10194
10195 if (dev->rwh_policy != u->new_policy) {
10196 dev->rwh_policy = u->new_policy;
10197 super->updates_pending++;
10198 }
10199 break;
10200 }
10201 default:
10202 pr_err("error: unsupported process update type:(type: %d)\n", type);
10203 }
10204 }
10205
10206 static struct mdinfo *get_spares_for_grow(struct supertype *st);
10207
10208 static int imsm_prepare_update(struct supertype *st,
10209 struct metadata_update *update)
10210 {
10211 /**
10212 * Allocate space to hold new disk entries, raid-device entries or a new
10213 * mpb if necessary. The manager synchronously waits for updates to
10214 * complete in the monitor, so new mpb buffers allocated here can be
10215 * integrated by the monitor thread without worrying about live pointers
10216 * in the manager thread.
10217 */
10218 enum imsm_update_type type;
10219 struct intel_super *super = st->sb;
10220 unsigned int sector_size = super->sector_size;
10221 struct imsm_super *mpb = super->anchor;
10222 size_t buf_len;
10223 size_t len = 0;
10224
10225 if (update->len < (int)sizeof(type))
10226 return 0;
10227
10228 type = *(enum imsm_update_type *) update->buf;
10229
10230 switch (type) {
10231 case update_general_migration_checkpoint:
10232 if (update->len < (int)sizeof(struct imsm_update_general_migration_checkpoint))
10233 return 0;
10234 dprintf("called for update_general_migration_checkpoint\n");
10235 break;
10236 case update_takeover: {
10237 struct imsm_update_takeover *u = (void *)update->buf;
10238 if (update->len < (int)sizeof(*u))
10239 return 0;
10240 if (u->direction == R0_TO_R10) {
10241 void **tail = (void **)&update->space_list;
10242 struct imsm_dev *dev = get_imsm_dev(super, u->subarray);
10243 struct imsm_map *map = get_imsm_map(dev, MAP_0);
10244 int num_members = map->num_members;
10245 void *space;
10246 int size, i;
10247 /* allocate memory for added disks */
10248 for (i = 0; i < num_members; i++) {
10249 size = sizeof(struct dl);
10250 space = xmalloc(size);
10251 *tail = space;
10252 tail = space;
10253 *tail = NULL;
10254 }
10255 /* allocate memory for new device */
10256 size = sizeof_imsm_dev(super->devlist->dev, 0) +
10257 (num_members * sizeof(__u32));
10258 space = xmalloc(size);
10259 *tail = space;
10260 tail = space;
10261 *tail = NULL;
10262 len = disks_to_mpb_size(num_members * 2);
10263 }
10264
10265 break;
10266 }
10267 case update_reshape_container_disks: {
10268 /* Every raid device in the container is about to
10269 * gain some more devices, and we will enter a
10270 * reconfiguration.
10271 * So each 'imsm_map' will be bigger, and the imsm_vol
10272 * will now hold 2 of them.
10273 * Thus we need new 'struct imsm_dev' allocations sized
10274 * as sizeof_imsm_dev but with more devices in both maps.
10275 */
10276 struct imsm_update_reshape *u = (void *)update->buf;
10277 struct intel_dev *dl;
10278 void **space_tail = (void**)&update->space_list;
10279
10280 if (update->len < (int)sizeof(*u))
10281 return 0;
10282
10283 dprintf("for update_reshape\n");
10284
10285 for (dl = super->devlist; dl; dl = dl->next) {
10286 int size = sizeof_imsm_dev(dl->dev, 1);
10287 void *s;
10288 if (u->new_raid_disks > u->old_raid_disks)
10289 size += sizeof(__u32)*2*
10290 (u->new_raid_disks - u->old_raid_disks);
10291 s = xmalloc(size);
10292 *space_tail = s;
10293 space_tail = s;
10294 *space_tail = NULL;
10295 }
10296
10297 len = disks_to_mpb_size(u->new_raid_disks);
10298 dprintf("New anchor length is %llu\n", (unsigned long long)len);
10299 break;
10300 }
10301 case update_reshape_migration: {
10302 /* for migration level 0->5 we need to add disks
10303 * so the same as for container operation we will copy
10304 * device to the bigger location.
10305 * in memory prepared device and new disk area are prepared
10306 * for usage in process update
10307 */
10308 struct imsm_update_reshape_migration *u = (void *)update->buf;
10309 struct intel_dev *id;
10310 void **space_tail = (void **)&update->space_list;
10311 int size;
10312 void *s;
10313 int current_level = -1;
10314
10315 if (update->len < (int)sizeof(*u))
10316 return 0;
10317
10318 dprintf("for update_reshape\n");
10319
10320 /* add space for bigger array in update
10321 */
10322 for (id = super->devlist; id; id = id->next) {
10323 if (id->index == (unsigned)u->subdev) {
10324 size = sizeof_imsm_dev(id->dev, 1);
10325 if (u->new_raid_disks > u->old_raid_disks)
10326 size += sizeof(__u32)*2*
10327 (u->new_raid_disks - u->old_raid_disks);
10328 s = xmalloc(size);
10329 *space_tail = s;
10330 space_tail = s;
10331 *space_tail = NULL;
10332 break;
10333 }
10334 }
10335 if (update->space_list == NULL)
10336 break;
10337
10338 /* add space for disk in update
10339 */
10340 size = sizeof(struct dl);
10341 s = xmalloc(size);
10342 *space_tail = s;
10343 space_tail = s;
10344 *space_tail = NULL;
10345
10346 /* add spare device to update
10347 */
10348 for (id = super->devlist ; id; id = id->next)
10349 if (id->index == (unsigned)u->subdev) {
10350 struct imsm_dev *dev;
10351 struct imsm_map *map;
10352
10353 dev = get_imsm_dev(super, u->subdev);
10354 map = get_imsm_map(dev, MAP_0);
10355 current_level = map->raid_level;
10356 break;
10357 }
10358 if (u->new_level == 5 && u->new_level != current_level) {
10359 struct mdinfo *spares;
10360
10361 spares = get_spares_for_grow(st);
10362 if (spares) {
10363 struct dl *dl;
10364 struct mdinfo *dev;
10365
10366 dev = spares->devs;
10367 if (dev) {
10368 u->new_disks[0] =
10369 makedev(dev->disk.major,
10370 dev->disk.minor);
10371 dl = get_disk_super(super,
10372 dev->disk.major,
10373 dev->disk.minor);
10374 dl->index = u->old_raid_disks;
10375 dev = dev->next;
10376 }
10377 sysfs_free(spares);
10378 }
10379 }
10380 len = disks_to_mpb_size(u->new_raid_disks);
10381 dprintf("New anchor length is %llu\n", (unsigned long long)len);
10382 break;
10383 }
10384 case update_size_change: {
10385 if (update->len < (int)sizeof(struct imsm_update_size_change))
10386 return 0;
10387 break;
10388 }
10389 case update_activate_spare: {
10390 if (update->len < (int)sizeof(struct imsm_update_activate_spare))
10391 return 0;
10392 break;
10393 }
10394 case update_create_array: {
10395 struct imsm_update_create_array *u = (void *) update->buf;
10396 struct intel_dev *dv;
10397 struct imsm_dev *dev = &u->dev;
10398 struct imsm_map *map = get_imsm_map(dev, MAP_0);
10399 struct dl *dl;
10400 struct disk_info *inf;
10401 int i;
10402 int activate = 0;
10403
10404 if (update->len < (int)sizeof(*u))
10405 return 0;
10406
10407 inf = get_disk_info(u);
10408 len = sizeof_imsm_dev(dev, 1);
10409 /* allocate a new super->devlist entry */
10410 dv = xmalloc(sizeof(*dv));
10411 dv->dev = xmalloc(len);
10412 update->space = dv;
10413
10414 /* count how many spares will be converted to members */
10415 for (i = 0; i < map->num_members; i++) {
10416 dl = serial_to_dl(inf[i].serial, super);
10417 if (!dl) {
10418 /* hmm maybe it failed?, nothing we can do about
10419 * it here
10420 */
10421 continue;
10422 }
10423 if (count_memberships(dl, super) == 0)
10424 activate++;
10425 }
10426 len += activate * sizeof(struct imsm_disk);
10427 break;
10428 }
10429 case update_kill_array: {
10430 if (update->len < (int)sizeof(struct imsm_update_kill_array))
10431 return 0;
10432 break;
10433 }
10434 case update_rename_array: {
10435 if (update->len < (int)sizeof(struct imsm_update_rename_array))
10436 return 0;
10437 break;
10438 }
10439 case update_add_remove_disk:
10440 /* no update->len needed */
10441 break;
10442 case update_prealloc_badblocks_mem:
10443 super->extra_space += sizeof(struct bbm_log) -
10444 get_imsm_bbm_log_size(super->bbm_log);
10445 break;
10446 case update_rwh_policy: {
10447 if (update->len < (int)sizeof(struct imsm_update_rwh_policy))
10448 return 0;
10449 break;
10450 }
10451 default:
10452 return 0;
10453 }
10454
10455 /* check if we need a larger metadata buffer */
10456 if (super->next_buf)
10457 buf_len = super->next_len;
10458 else
10459 buf_len = super->len;
10460
10461 if (__le32_to_cpu(mpb->mpb_size) + super->extra_space + len > buf_len) {
10462 /* ok we need a larger buf than what is currently allocated
10463 * if this allocation fails process_update will notice that
10464 * ->next_len is set and ->next_buf is NULL
10465 */
10466 buf_len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) +
10467 super->extra_space + len, sector_size);
10468 if (super->next_buf)
10469 free(super->next_buf);
10470
10471 super->next_len = buf_len;
10472 if (posix_memalign(&super->next_buf, sector_size, buf_len) == 0)
10473 memset(super->next_buf, 0, buf_len);
10474 else
10475 super->next_buf = NULL;
10476 }
10477 return 1;
10478 }
10479
10480 /* must be called while manager is quiesced */
10481 static void imsm_delete(struct intel_super *super, struct dl **dlp, unsigned index)
10482 {
10483 struct imsm_super *mpb = super->anchor;
10484 struct dl *iter;
10485 struct imsm_dev *dev;
10486 struct imsm_map *map;
10487 unsigned int i, j, num_members;
10488 __u32 ord, ord_map0;
10489 struct bbm_log *log = super->bbm_log;
10490
10491 dprintf("deleting device[%d] from imsm_super\n", index);
10492
10493 /* shift all indexes down one */
10494 for (iter = super->disks; iter; iter = iter->next)
10495 if (iter->index > (int)index)
10496 iter->index--;
10497 for (iter = super->missing; iter; iter = iter->next)
10498 if (iter->index > (int)index)
10499 iter->index--;
10500
10501 for (i = 0; i < mpb->num_raid_devs; i++) {
10502 dev = get_imsm_dev(super, i);
10503 map = get_imsm_map(dev, MAP_0);
10504 num_members = map->num_members;
10505 for (j = 0; j < num_members; j++) {
10506 /* update ord entries being careful not to propagate
10507 * ord-flags to the first map
10508 */
10509 ord = get_imsm_ord_tbl_ent(dev, j, MAP_X);
10510 ord_map0 = get_imsm_ord_tbl_ent(dev, j, MAP_0);
10511
10512 if (ord_to_idx(ord) <= index)
10513 continue;
10514
10515 map = get_imsm_map(dev, MAP_0);
10516 set_imsm_ord_tbl_ent(map, j, ord_map0 - 1);
10517 map = get_imsm_map(dev, MAP_1);
10518 if (map)
10519 set_imsm_ord_tbl_ent(map, j, ord - 1);
10520 }
10521 }
10522
10523 for (i = 0; i < log->entry_count; i++) {
10524 struct bbm_log_entry *entry = &log->marked_block_entries[i];
10525
10526 if (entry->disk_ordinal <= index)
10527 continue;
10528 entry->disk_ordinal--;
10529 }
10530
10531 mpb->num_disks--;
10532 super->updates_pending++;
10533 if (*dlp) {
10534 struct dl *dl = *dlp;
10535
10536 *dlp = (*dlp)->next;
10537 __free_imsm_disk(dl);
10538 }
10539 }
10540
10541 static int imsm_get_allowed_degradation(int level, int raid_disks,
10542 struct intel_super *super,
10543 struct imsm_dev *dev)
10544 {
10545 switch (level) {
10546 case 1:
10547 case 10:{
10548 int ret_val = 0;
10549 struct imsm_map *map;
10550 int i;
10551
10552 ret_val = raid_disks/2;
10553 /* check map if all disks pairs not failed
10554 * in both maps
10555 */
10556 map = get_imsm_map(dev, MAP_0);
10557 for (i = 0; i < ret_val; i++) {
10558 int degradation = 0;
10559 if (get_imsm_disk(super, i) == NULL)
10560 degradation++;
10561 if (get_imsm_disk(super, i + 1) == NULL)
10562 degradation++;
10563 if (degradation == 2)
10564 return 0;
10565 }
10566 map = get_imsm_map(dev, MAP_1);
10567 /* if there is no second map
10568 * result can be returned
10569 */
10570 if (map == NULL)
10571 return ret_val;
10572 /* check degradation in second map
10573 */
10574 for (i = 0; i < ret_val; i++) {
10575 int degradation = 0;
10576 if (get_imsm_disk(super, i) == NULL)
10577 degradation++;
10578 if (get_imsm_disk(super, i + 1) == NULL)
10579 degradation++;
10580 if (degradation == 2)
10581 return 0;
10582 }
10583 return ret_val;
10584 }
10585 case 5:
10586 return 1;
10587 case 6:
10588 return 2;
10589 default:
10590 return 0;
10591 }
10592 }
10593
10594 /*******************************************************************************
10595 * Function: validate_container_imsm
10596 * Description: This routine validates container after assemble,
10597 * eg. if devices in container are under the same controller.
10598 *
10599 * Parameters:
10600 * info : linked list with info about devices used in array
10601 * Returns:
10602 * 1 : HBA mismatch
10603 * 0 : Success
10604 ******************************************************************************/
10605 int validate_container_imsm(struct mdinfo *info)
10606 {
10607 if (check_env("IMSM_NO_PLATFORM"))
10608 return 0;
10609
10610 struct sys_dev *idev;
10611 struct sys_dev *hba = NULL;
10612 struct sys_dev *intel_devices = find_intel_devices();
10613 char *dev_path = devt_to_devpath(makedev(info->disk.major,
10614 info->disk.minor));
10615
10616 for (idev = intel_devices; idev; idev = idev->next) {
10617 if (dev_path && strstr(dev_path, idev->path)) {
10618 hba = idev;
10619 break;
10620 }
10621 }
10622 if (dev_path)
10623 free(dev_path);
10624
10625 if (!hba) {
10626 pr_err("WARNING - Cannot detect HBA for device %s!\n",
10627 devid2kname(makedev(info->disk.major, info->disk.minor)));
10628 return 1;
10629 }
10630
10631 const struct imsm_orom *orom = get_orom_by_device_id(hba->dev_id);
10632 struct mdinfo *dev;
10633
10634 for (dev = info->next; dev; dev = dev->next) {
10635 dev_path = devt_to_devpath(makedev(dev->disk.major, dev->disk.minor));
10636
10637 struct sys_dev *hba2 = NULL;
10638 for (idev = intel_devices; idev; idev = idev->next) {
10639 if (dev_path && strstr(dev_path, idev->path)) {
10640 hba2 = idev;
10641 break;
10642 }
10643 }
10644 if (dev_path)
10645 free(dev_path);
10646
10647 const struct imsm_orom *orom2 = hba2 == NULL ? NULL :
10648 get_orom_by_device_id(hba2->dev_id);
10649
10650 if (hba2 && hba->type != hba2->type) {
10651 pr_err("WARNING - HBAs of devices do not match %s != %s\n",
10652 get_sys_dev_type(hba->type), get_sys_dev_type(hba2->type));
10653 return 1;
10654 }
10655
10656 if (orom != orom2) {
10657 pr_err("WARNING - IMSM container assembled with disks under different HBAs!\n"
10658 " This operation is not supported and can lead to data loss.\n");
10659 return 1;
10660 }
10661
10662 if (!orom) {
10663 pr_err("WARNING - IMSM container assembled with disks under HBAs without IMSM platform support!\n"
10664 " This operation is not supported and can lead to data loss.\n");
10665 return 1;
10666 }
10667 }
10668
10669 return 0;
10670 }
10671
10672 /*******************************************************************************
10673 * Function: imsm_record_badblock
10674 * Description: This routine stores new bad block record in BBM log
10675 *
10676 * Parameters:
10677 * a : array containing a bad block
10678 * slot : disk number containing a bad block
10679 * sector : bad block sector
10680 * length : bad block sectors range
10681 * Returns:
10682 * 1 : Success
10683 * 0 : Error
10684 ******************************************************************************/
10685 static int imsm_record_badblock(struct active_array *a, int slot,
10686 unsigned long long sector, int length)
10687 {
10688 struct intel_super *super = a->container->sb;
10689 int ord;
10690 int ret;
10691
10692 ord = imsm_disk_slot_to_ord(a, slot);
10693 if (ord < 0)
10694 return 0;
10695
10696 ret = record_new_badblock(super->bbm_log, ord_to_idx(ord), sector,
10697 length);
10698 if (ret)
10699 super->updates_pending++;
10700
10701 return ret;
10702 }
10703 /*******************************************************************************
10704 * Function: imsm_clear_badblock
10705 * Description: This routine clears bad block record from BBM log
10706 *
10707 * Parameters:
10708 * a : array containing a bad block
10709 * slot : disk number containing a bad block
10710 * sector : bad block sector
10711 * length : bad block sectors range
10712 * Returns:
10713 * 1 : Success
10714 * 0 : Error
10715 ******************************************************************************/
10716 static int imsm_clear_badblock(struct active_array *a, int slot,
10717 unsigned long long sector, int length)
10718 {
10719 struct intel_super *super = a->container->sb;
10720 int ord;
10721 int ret;
10722
10723 ord = imsm_disk_slot_to_ord(a, slot);
10724 if (ord < 0)
10725 return 0;
10726
10727 ret = clear_badblock(super->bbm_log, ord_to_idx(ord), sector, length);
10728 if (ret)
10729 super->updates_pending++;
10730
10731 return ret;
10732 }
10733 /*******************************************************************************
10734 * Function: imsm_get_badblocks
10735 * Description: This routine get list of bad blocks for an array
10736 *
10737 * Parameters:
10738 * a : array
10739 * slot : disk number
10740 * Returns:
10741 * bb : structure containing bad blocks
10742 * NULL : error
10743 ******************************************************************************/
10744 static struct md_bb *imsm_get_badblocks(struct active_array *a, int slot)
10745 {
10746 int inst = a->info.container_member;
10747 struct intel_super *super = a->container->sb;
10748 struct imsm_dev *dev = get_imsm_dev(super, inst);
10749 struct imsm_map *map = get_imsm_map(dev, MAP_0);
10750 int ord;
10751
10752 ord = imsm_disk_slot_to_ord(a, slot);
10753 if (ord < 0)
10754 return NULL;
10755
10756 get_volume_badblocks(super->bbm_log, ord_to_idx(ord), pba_of_lba0(map),
10757 per_dev_array_size(map), &super->bb);
10758
10759 return &super->bb;
10760 }
10761 /*******************************************************************************
10762 * Function: examine_badblocks_imsm
10763 * Description: Prints list of bad blocks on a disk to the standard output
10764 *
10765 * Parameters:
10766 * st : metadata handler
10767 * fd : open file descriptor for device
10768 * devname : device name
10769 * Returns:
10770 * 0 : Success
10771 * 1 : Error
10772 ******************************************************************************/
10773 static int examine_badblocks_imsm(struct supertype *st, int fd, char *devname)
10774 {
10775 struct intel_super *super = st->sb;
10776 struct bbm_log *log = super->bbm_log;
10777 struct dl *d = NULL;
10778 int any = 0;
10779
10780 for (d = super->disks; d ; d = d->next) {
10781 if (strcmp(d->devname, devname) == 0)
10782 break;
10783 }
10784
10785 if ((d == NULL) || (d->index < 0)) { /* serial mismatch probably */
10786 pr_err("%s doesn't appear to be part of a raid array\n",
10787 devname);
10788 return 1;
10789 }
10790
10791 if (log != NULL) {
10792 unsigned int i;
10793 struct bbm_log_entry *entry = &log->marked_block_entries[0];
10794
10795 for (i = 0; i < log->entry_count; i++) {
10796 if (entry[i].disk_ordinal == d->index) {
10797 unsigned long long sector = __le48_to_cpu(
10798 &entry[i].defective_block_start);
10799 int cnt = entry[i].marked_count + 1;
10800
10801 if (!any) {
10802 printf("Bad-blocks on %s:\n", devname);
10803 any = 1;
10804 }
10805
10806 printf("%20llu for %d sectors\n", sector, cnt);
10807 }
10808 }
10809 }
10810
10811 if (!any)
10812 printf("No bad-blocks list configured on %s\n", devname);
10813
10814 return 0;
10815 }
10816 /*******************************************************************************
10817 * Function: init_migr_record_imsm
10818 * Description: Function inits imsm migration record
10819 * Parameters:
10820 * super : imsm internal array info
10821 * dev : device under migration
10822 * info : general array info to find the smallest device
10823 * Returns:
10824 * none
10825 ******************************************************************************/
10826 void init_migr_record_imsm(struct supertype *st, struct imsm_dev *dev,
10827 struct mdinfo *info)
10828 {
10829 struct intel_super *super = st->sb;
10830 struct migr_record *migr_rec = super->migr_rec;
10831 int new_data_disks;
10832 unsigned long long dsize, dev_sectors;
10833 long long unsigned min_dev_sectors = -1LLU;
10834 struct imsm_map *map_dest = get_imsm_map(dev, MAP_0);
10835 struct imsm_map *map_src = get_imsm_map(dev, MAP_1);
10836 unsigned long long num_migr_units;
10837 unsigned long long array_blocks;
10838 struct dl *dl_disk = NULL;
10839
10840 memset(migr_rec, 0, sizeof(struct migr_record));
10841 migr_rec->family_num = __cpu_to_le32(super->anchor->family_num);
10842
10843 /* only ascending reshape supported now */
10844 migr_rec->ascending_migr = __cpu_to_le32(1);
10845
10846 migr_rec->dest_depth_per_unit = GEN_MIGR_AREA_SIZE /
10847 max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
10848 migr_rec->dest_depth_per_unit *=
10849 max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
10850 new_data_disks = imsm_num_data_members(map_dest);
10851 migr_rec->blocks_per_unit =
10852 __cpu_to_le32(migr_rec->dest_depth_per_unit * new_data_disks);
10853 migr_rec->dest_depth_per_unit =
10854 __cpu_to_le32(migr_rec->dest_depth_per_unit);
10855 array_blocks = info->component_size * new_data_disks;
10856 num_migr_units =
10857 array_blocks / __le32_to_cpu(migr_rec->blocks_per_unit);
10858
10859 if (array_blocks % __le32_to_cpu(migr_rec->blocks_per_unit))
10860 num_migr_units++;
10861 set_num_migr_units(migr_rec, num_migr_units);
10862
10863 migr_rec->post_migr_vol_cap = dev->size_low;
10864 migr_rec->post_migr_vol_cap_hi = dev->size_high;
10865
10866 /* Find the smallest dev */
10867 for (dl_disk = super->disks; dl_disk ; dl_disk = dl_disk->next) {
10868 /* ignore spares in container */
10869 if (dl_disk->index < 0)
10870 continue;
10871 get_dev_size(dl_disk->fd, NULL, &dsize);
10872 dev_sectors = dsize / 512;
10873 if (dev_sectors < min_dev_sectors)
10874 min_dev_sectors = dev_sectors;
10875 }
10876 set_migr_chkp_area_pba(migr_rec, min_dev_sectors -
10877 RAID_DISK_RESERVED_BLOCKS_IMSM_HI);
10878
10879 write_imsm_migr_rec(st);
10880
10881 return;
10882 }
10883
10884 /*******************************************************************************
10885 * Function: save_backup_imsm
10886 * Description: Function saves critical data stripes to Migration Copy Area
10887 * and updates the current migration unit status.
10888 * Use restore_stripes() to form a destination stripe,
10889 * and to write it to the Copy Area.
10890 * Parameters:
10891 * st : supertype information
10892 * dev : imsm device that backup is saved for
10893 * info : general array info
10894 * buf : input buffer
10895 * length : length of data to backup (blocks_per_unit)
10896 * Returns:
10897 * 0 : success
10898 *, -1 : fail
10899 ******************************************************************************/
10900 int save_backup_imsm(struct supertype *st,
10901 struct imsm_dev *dev,
10902 struct mdinfo *info,
10903 void *buf,
10904 int length)
10905 {
10906 int rv = -1;
10907 struct intel_super *super = st->sb;
10908 unsigned long long *target_offsets;
10909 int *targets;
10910 int i;
10911 struct imsm_map *map_dest = get_imsm_map(dev, MAP_0);
10912 int new_disks = map_dest->num_members;
10913 int dest_layout = 0;
10914 int dest_chunk;
10915 unsigned long long start;
10916 int data_disks = imsm_num_data_members(map_dest);
10917
10918 targets = xmalloc(new_disks * sizeof(int));
10919
10920 for (i = 0; i < new_disks; i++) {
10921 struct dl *dl_disk = get_imsm_dl_disk(super, i);
10922
10923 targets[i] = dl_disk->fd;
10924 }
10925
10926 target_offsets = xcalloc(new_disks, sizeof(unsigned long long));
10927
10928 start = info->reshape_progress * 512;
10929 for (i = 0; i < new_disks; i++) {
10930 target_offsets[i] = migr_chkp_area_pba(super->migr_rec) * 512;
10931 /* move back copy area adderss, it will be moved forward
10932 * in restore_stripes() using start input variable
10933 */
10934 target_offsets[i] -= start/data_disks;
10935 }
10936
10937 dest_layout = imsm_level_to_layout(map_dest->raid_level);
10938 dest_chunk = __le16_to_cpu(map_dest->blocks_per_strip) * 512;
10939
10940 if (restore_stripes(targets, /* list of dest devices */
10941 target_offsets, /* migration record offsets */
10942 new_disks,
10943 dest_chunk,
10944 map_dest->raid_level,
10945 dest_layout,
10946 -1, /* source backup file descriptor */
10947 0, /* input buf offset
10948 * always 0 buf is already offseted */
10949 start,
10950 length,
10951 buf) != 0) {
10952 pr_err("Error restoring stripes\n");
10953 goto abort;
10954 }
10955
10956 rv = 0;
10957
10958 abort:
10959 if (targets) {
10960 free(targets);
10961 }
10962 free(target_offsets);
10963
10964 return rv;
10965 }
10966
10967 /*******************************************************************************
10968 * Function: save_checkpoint_imsm
10969 * Description: Function called for current unit status update
10970 * in the migration record. It writes it to disk.
10971 * Parameters:
10972 * super : imsm internal array info
10973 * info : general array info
10974 * Returns:
10975 * 0: success
10976 * 1: failure
10977 * 2: failure, means no valid migration record
10978 * / no general migration in progress /
10979 ******************************************************************************/
10980 int save_checkpoint_imsm(struct supertype *st, struct mdinfo *info, int state)
10981 {
10982 struct intel_super *super = st->sb;
10983 unsigned long long blocks_per_unit;
10984 unsigned long long curr_migr_unit;
10985
10986 if (load_imsm_migr_rec(super) != 0) {
10987 dprintf("imsm: ERROR: Cannot read migration record for checkpoint save.\n");
10988 return 1;
10989 }
10990
10991 blocks_per_unit = __le32_to_cpu(super->migr_rec->blocks_per_unit);
10992 if (blocks_per_unit == 0) {
10993 dprintf("imsm: no migration in progress.\n");
10994 return 2;
10995 }
10996 curr_migr_unit = info->reshape_progress / blocks_per_unit;
10997 /* check if array is alligned to copy area
10998 * if it is not alligned, add one to current migration unit value
10999 * this can happend on array reshape finish only
11000 */
11001 if (info->reshape_progress % blocks_per_unit)
11002 curr_migr_unit++;
11003
11004 set_current_migr_unit(super->migr_rec, curr_migr_unit);
11005 super->migr_rec->rec_status = __cpu_to_le32(state);
11006 set_migr_dest_1st_member_lba(super->migr_rec,
11007 super->migr_rec->dest_depth_per_unit * curr_migr_unit);
11008
11009 if (write_imsm_migr_rec(st) < 0) {
11010 dprintf("imsm: Cannot write migration record outside backup area\n");
11011 return 1;
11012 }
11013
11014 return 0;
11015 }
11016
11017 /*******************************************************************************
11018 * Function: recover_backup_imsm
11019 * Description: Function recovers critical data from the Migration Copy Area
11020 * while assembling an array.
11021 * Parameters:
11022 * super : imsm internal array info
11023 * info : general array info
11024 * Returns:
11025 * 0 : success (or there is no data to recover)
11026 * 1 : fail
11027 ******************************************************************************/
11028 int recover_backup_imsm(struct supertype *st, struct mdinfo *info)
11029 {
11030 struct intel_super *super = st->sb;
11031 struct migr_record *migr_rec = super->migr_rec;
11032 struct imsm_map *map_dest;
11033 struct intel_dev *id = NULL;
11034 unsigned long long read_offset;
11035 unsigned long long write_offset;
11036 unsigned unit_len;
11037 int new_disks, err;
11038 char *buf = NULL;
11039 int retval = 1;
11040 unsigned int sector_size = super->sector_size;
11041 unsigned long curr_migr_unit = current_migr_unit(migr_rec);
11042 unsigned long num_migr_units = get_num_migr_units(migr_rec);
11043 char buffer[20];
11044 int skipped_disks = 0;
11045 struct dl *dl_disk;
11046
11047 err = sysfs_get_str(info, NULL, "array_state", (char *)buffer, 20);
11048 if (err < 1)
11049 return 1;
11050
11051 /* recover data only during assemblation */
11052 if (strncmp(buffer, "inactive", 8) != 0)
11053 return 0;
11054 /* no data to recover */
11055 if (__le32_to_cpu(migr_rec->rec_status) == UNIT_SRC_NORMAL)
11056 return 0;
11057 if (curr_migr_unit >= num_migr_units)
11058 return 1;
11059
11060 /* find device during reshape */
11061 for (id = super->devlist; id; id = id->next)
11062 if (is_gen_migration(id->dev))
11063 break;
11064 if (id == NULL)
11065 return 1;
11066
11067 map_dest = get_imsm_map(id->dev, MAP_0);
11068 new_disks = map_dest->num_members;
11069
11070 read_offset = migr_chkp_area_pba(migr_rec) * 512;
11071
11072 write_offset = (migr_dest_1st_member_lba(migr_rec) +
11073 pba_of_lba0(map_dest)) * 512;
11074
11075 unit_len = __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
11076 if (posix_memalign((void **)&buf, sector_size, unit_len) != 0)
11077 goto abort;
11078
11079 for (dl_disk = super->disks; dl_disk; dl_disk = dl_disk->next) {
11080 if (dl_disk->index < 0)
11081 continue;
11082
11083 if (dl_disk->fd < 0) {
11084 skipped_disks++;
11085 continue;
11086 }
11087 if (lseek64(dl_disk->fd, read_offset, SEEK_SET) < 0) {
11088 pr_err("Cannot seek to block: %s\n",
11089 strerror(errno));
11090 skipped_disks++;
11091 continue;
11092 }
11093 if (read(dl_disk->fd, buf, unit_len) != unit_len) {
11094 pr_err("Cannot read copy area block: %s\n",
11095 strerror(errno));
11096 skipped_disks++;
11097 continue;
11098 }
11099 if (lseek64(dl_disk->fd, write_offset, SEEK_SET) < 0) {
11100 pr_err("Cannot seek to block: %s\n",
11101 strerror(errno));
11102 skipped_disks++;
11103 continue;
11104 }
11105 if (write(dl_disk->fd, buf, unit_len) != unit_len) {
11106 pr_err("Cannot restore block: %s\n",
11107 strerror(errno));
11108 skipped_disks++;
11109 continue;
11110 }
11111 }
11112
11113 if (skipped_disks > imsm_get_allowed_degradation(info->new_level,
11114 new_disks,
11115 super,
11116 id->dev)) {
11117 pr_err("Cannot restore data from backup. Too many failed disks\n");
11118 goto abort;
11119 }
11120
11121 if (save_checkpoint_imsm(st, info, UNIT_SRC_NORMAL)) {
11122 /* ignore error == 2, this can mean end of reshape here
11123 */
11124 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL) during restart\n");
11125 } else
11126 retval = 0;
11127
11128 abort:
11129 free(buf);
11130 return retval;
11131 }
11132
11133 static char disk_by_path[] = "/dev/disk/by-path/";
11134
11135 static const char *imsm_get_disk_controller_domain(const char *path)
11136 {
11137 char disk_path[PATH_MAX];
11138 char *drv=NULL;
11139 struct stat st;
11140
11141 strcpy(disk_path, disk_by_path);
11142 strncat(disk_path, path, PATH_MAX - strlen(disk_path) - 1);
11143 if (stat(disk_path, &st) == 0) {
11144 struct sys_dev* hba;
11145 char *path;
11146
11147 path = devt_to_devpath(st.st_rdev);
11148 if (path == NULL)
11149 return "unknown";
11150 hba = find_disk_attached_hba(-1, path);
11151 if (hba && hba->type == SYS_DEV_SAS)
11152 drv = "isci";
11153 else if (hba && hba->type == SYS_DEV_SATA)
11154 drv = "ahci";
11155 else if (hba && hba->type == SYS_DEV_VMD)
11156 drv = "vmd";
11157 else if (hba && hba->type == SYS_DEV_NVME)
11158 drv = "nvme";
11159 else
11160 drv = "unknown";
11161 dprintf("path: %s hba: %s attached: %s\n",
11162 path, (hba) ? hba->path : "NULL", drv);
11163 free(path);
11164 }
11165 return drv;
11166 }
11167
11168 static char *imsm_find_array_devnm_by_subdev(int subdev, char *container)
11169 {
11170 static char devnm[32];
11171 char subdev_name[20];
11172 struct mdstat_ent *mdstat;
11173
11174 sprintf(subdev_name, "%d", subdev);
11175 mdstat = mdstat_by_subdev(subdev_name, container);
11176 if (!mdstat)
11177 return NULL;
11178
11179 strcpy(devnm, mdstat->devnm);
11180 free_mdstat(mdstat);
11181 return devnm;
11182 }
11183
11184 static int imsm_reshape_is_allowed_on_container(struct supertype *st,
11185 struct geo_params *geo,
11186 int *old_raid_disks,
11187 int direction)
11188 {
11189 /* currently we only support increasing the number of devices
11190 * for a container. This increases the number of device for each
11191 * member array. They must all be RAID0 or RAID5.
11192 */
11193 int ret_val = 0;
11194 struct mdinfo *info, *member;
11195 int devices_that_can_grow = 0;
11196
11197 dprintf("imsm: imsm_reshape_is_allowed_on_container(ENTER): st->devnm = (%s)\n", st->devnm);
11198
11199 if (geo->size > 0 ||
11200 geo->level != UnSet ||
11201 geo->layout != UnSet ||
11202 geo->chunksize != 0 ||
11203 geo->raid_disks == UnSet) {
11204 dprintf("imsm: Container operation is allowed for raid disks number change only.\n");
11205 return ret_val;
11206 }
11207
11208 if (direction == ROLLBACK_METADATA_CHANGES) {
11209 dprintf("imsm: Metadata changes rollback is not supported for container operation.\n");
11210 return ret_val;
11211 }
11212
11213 info = container_content_imsm(st, NULL);
11214 for (member = info; member; member = member->next) {
11215 char *result;
11216
11217 dprintf("imsm: checking device_num: %i\n",
11218 member->container_member);
11219
11220 if (geo->raid_disks <= member->array.raid_disks) {
11221 /* we work on container for Online Capacity Expansion
11222 * only so raid_disks has to grow
11223 */
11224 dprintf("imsm: for container operation raid disks increase is required\n");
11225 break;
11226 }
11227
11228 if (info->array.level != 0 && info->array.level != 5) {
11229 /* we cannot use this container with other raid level
11230 */
11231 dprintf("imsm: for container operation wrong raid level (%i) detected\n",
11232 info->array.level);
11233 break;
11234 } else {
11235 /* check for platform support
11236 * for this raid level configuration
11237 */
11238 struct intel_super *super = st->sb;
11239 if (!is_raid_level_supported(super->orom,
11240 member->array.level,
11241 geo->raid_disks)) {
11242 dprintf("platform does not support raid%d with %d disk%s\n",
11243 info->array.level,
11244 geo->raid_disks,
11245 geo->raid_disks > 1 ? "s" : "");
11246 break;
11247 }
11248 /* check if component size is aligned to chunk size
11249 */
11250 if (info->component_size %
11251 (info->array.chunk_size/512)) {
11252 dprintf("Component size is not aligned to chunk size\n");
11253 break;
11254 }
11255 }
11256
11257 if (*old_raid_disks &&
11258 info->array.raid_disks != *old_raid_disks)
11259 break;
11260 *old_raid_disks = info->array.raid_disks;
11261
11262 /* All raid5 and raid0 volumes in container
11263 * have to be ready for Online Capacity Expansion
11264 * so they need to be assembled. We have already
11265 * checked that no recovery etc is happening.
11266 */
11267 result = imsm_find_array_devnm_by_subdev(member->container_member,
11268 st->container_devnm);
11269 if (result == NULL) {
11270 dprintf("imsm: cannot find array\n");
11271 break;
11272 }
11273 devices_that_can_grow++;
11274 }
11275 sysfs_free(info);
11276 if (!member && devices_that_can_grow)
11277 ret_val = 1;
11278
11279 if (ret_val)
11280 dprintf("Container operation allowed\n");
11281 else
11282 dprintf("Error: %i\n", ret_val);
11283
11284 return ret_val;
11285 }
11286
11287 /* Function: get_spares_for_grow
11288 * Description: Allocates memory and creates list of spare devices
11289 * avaliable in container. Checks if spare drive size is acceptable.
11290 * Parameters: Pointer to the supertype structure
11291 * Returns: Pointer to the list of spare devices (mdinfo structure) on success,
11292 * NULL if fail
11293 */
11294 static struct mdinfo *get_spares_for_grow(struct supertype *st)
11295 {
11296 struct spare_criteria sc;
11297
11298 get_spare_criteria_imsm(st, &sc);
11299 return container_choose_spares(st, &sc, NULL, NULL, NULL, 0);
11300 }
11301
11302 /******************************************************************************
11303 * function: imsm_create_metadata_update_for_reshape
11304 * Function creates update for whole IMSM container.
11305 *
11306 ******************************************************************************/
11307 static int imsm_create_metadata_update_for_reshape(
11308 struct supertype *st,
11309 struct geo_params *geo,
11310 int old_raid_disks,
11311 struct imsm_update_reshape **updatep)
11312 {
11313 struct intel_super *super = st->sb;
11314 struct imsm_super *mpb = super->anchor;
11315 int update_memory_size;
11316 struct imsm_update_reshape *u;
11317 struct mdinfo *spares;
11318 int i;
11319 int delta_disks;
11320 struct mdinfo *dev;
11321
11322 dprintf("(enter) raid_disks = %i\n", geo->raid_disks);
11323
11324 delta_disks = geo->raid_disks - old_raid_disks;
11325
11326 /* size of all update data without anchor */
11327 update_memory_size = sizeof(struct imsm_update_reshape);
11328
11329 /* now add space for spare disks that we need to add. */
11330 update_memory_size += sizeof(u->new_disks[0]) * (delta_disks - 1);
11331
11332 u = xcalloc(1, update_memory_size);
11333 u->type = update_reshape_container_disks;
11334 u->old_raid_disks = old_raid_disks;
11335 u->new_raid_disks = geo->raid_disks;
11336
11337 /* now get spare disks list
11338 */
11339 spares = get_spares_for_grow(st);
11340
11341 if (spares == NULL || delta_disks > spares->array.spare_disks) {
11342 pr_err("imsm: ERROR: Cannot get spare devices for %s.\n", geo->dev_name);
11343 i = -1;
11344 goto abort;
11345 }
11346
11347 /* we have got spares
11348 * update disk list in imsm_disk list table in anchor
11349 */
11350 dprintf("imsm: %i spares are available.\n\n",
11351 spares->array.spare_disks);
11352
11353 dev = spares->devs;
11354 for (i = 0; i < delta_disks; i++) {
11355 struct dl *dl;
11356
11357 if (dev == NULL)
11358 break;
11359 u->new_disks[i] = makedev(dev->disk.major,
11360 dev->disk.minor);
11361 dl = get_disk_super(super, dev->disk.major, dev->disk.minor);
11362 dl->index = mpb->num_disks;
11363 mpb->num_disks++;
11364 dev = dev->next;
11365 }
11366
11367 abort:
11368 /* free spares
11369 */
11370 sysfs_free(spares);
11371
11372 dprintf("imsm: reshape update preparation :");
11373 if (i == delta_disks) {
11374 dprintf_cont(" OK\n");
11375 *updatep = u;
11376 return update_memory_size;
11377 }
11378 free(u);
11379 dprintf_cont(" Error\n");
11380
11381 return 0;
11382 }
11383
11384 /******************************************************************************
11385 * function: imsm_create_metadata_update_for_size_change()
11386 * Creates update for IMSM array for array size change.
11387 *
11388 ******************************************************************************/
11389 static int imsm_create_metadata_update_for_size_change(
11390 struct supertype *st,
11391 struct geo_params *geo,
11392 struct imsm_update_size_change **updatep)
11393 {
11394 struct intel_super *super = st->sb;
11395 int update_memory_size;
11396 struct imsm_update_size_change *u;
11397
11398 dprintf("(enter) New size = %llu\n", geo->size);
11399
11400 /* size of all update data without anchor */
11401 update_memory_size = sizeof(struct imsm_update_size_change);
11402
11403 u = xcalloc(1, update_memory_size);
11404 u->type = update_size_change;
11405 u->subdev = super->current_vol;
11406 u->new_size = geo->size;
11407
11408 dprintf("imsm: reshape update preparation : OK\n");
11409 *updatep = u;
11410
11411 return update_memory_size;
11412 }
11413
11414 /******************************************************************************
11415 * function: imsm_create_metadata_update_for_migration()
11416 * Creates update for IMSM array.
11417 *
11418 ******************************************************************************/
11419 static int imsm_create_metadata_update_for_migration(
11420 struct supertype *st,
11421 struct geo_params *geo,
11422 struct imsm_update_reshape_migration **updatep)
11423 {
11424 struct intel_super *super = st->sb;
11425 int update_memory_size;
11426 struct imsm_update_reshape_migration *u;
11427 struct imsm_dev *dev;
11428 int previous_level = -1;
11429
11430 dprintf("(enter) New Level = %i\n", geo->level);
11431
11432 /* size of all update data without anchor */
11433 update_memory_size = sizeof(struct imsm_update_reshape_migration);
11434
11435 u = xcalloc(1, update_memory_size);
11436 u->type = update_reshape_migration;
11437 u->subdev = super->current_vol;
11438 u->new_level = geo->level;
11439 u->new_layout = geo->layout;
11440 u->new_raid_disks = u->old_raid_disks = geo->raid_disks;
11441 u->new_disks[0] = -1;
11442 u->new_chunksize = -1;
11443
11444 dev = get_imsm_dev(super, u->subdev);
11445 if (dev) {
11446 struct imsm_map *map;
11447
11448 map = get_imsm_map(dev, MAP_0);
11449 if (map) {
11450 int current_chunk_size =
11451 __le16_to_cpu(map->blocks_per_strip) / 2;
11452
11453 if (geo->chunksize != current_chunk_size) {
11454 u->new_chunksize = geo->chunksize / 1024;
11455 dprintf("imsm: chunk size change from %i to %i\n",
11456 current_chunk_size, u->new_chunksize);
11457 }
11458 previous_level = map->raid_level;
11459 }
11460 }
11461 if (geo->level == 5 && previous_level == 0) {
11462 struct mdinfo *spares = NULL;
11463
11464 u->new_raid_disks++;
11465 spares = get_spares_for_grow(st);
11466 if (spares == NULL || spares->array.spare_disks < 1) {
11467 free(u);
11468 sysfs_free(spares);
11469 update_memory_size = 0;
11470 pr_err("cannot get spare device for requested migration\n");
11471 return 0;
11472 }
11473 sysfs_free(spares);
11474 }
11475 dprintf("imsm: reshape update preparation : OK\n");
11476 *updatep = u;
11477
11478 return update_memory_size;
11479 }
11480
11481 static void imsm_update_metadata_locally(struct supertype *st,
11482 void *buf, int len)
11483 {
11484 struct metadata_update mu;
11485
11486 mu.buf = buf;
11487 mu.len = len;
11488 mu.space = NULL;
11489 mu.space_list = NULL;
11490 mu.next = NULL;
11491 if (imsm_prepare_update(st, &mu))
11492 imsm_process_update(st, &mu);
11493
11494 while (mu.space_list) {
11495 void **space = mu.space_list;
11496 mu.space_list = *space;
11497 free(space);
11498 }
11499 }
11500
11501 /***************************************************************************
11502 * Function: imsm_analyze_change
11503 * Description: Function analyze change for single volume
11504 * and validate if transition is supported
11505 * Parameters: Geometry parameters, supertype structure,
11506 * metadata change direction (apply/rollback)
11507 * Returns: Operation type code on success, -1 if fail
11508 ****************************************************************************/
11509 enum imsm_reshape_type imsm_analyze_change(struct supertype *st,
11510 struct geo_params *geo,
11511 int direction)
11512 {
11513 struct mdinfo info;
11514 int change = -1;
11515 int check_devs = 0;
11516 int chunk;
11517 /* number of added/removed disks in operation result */
11518 int devNumChange = 0;
11519 /* imsm compatible layout value for array geometry verification */
11520 int imsm_layout = -1;
11521 int data_disks;
11522 struct imsm_dev *dev;
11523 struct imsm_map *map;
11524 struct intel_super *super;
11525 unsigned long long current_size;
11526 unsigned long long free_size;
11527 unsigned long long max_size;
11528 int rv;
11529
11530 getinfo_super_imsm_volume(st, &info, NULL);
11531 if (geo->level != info.array.level && geo->level >= 0 &&
11532 geo->level != UnSet) {
11533 switch (info.array.level) {
11534 case 0:
11535 if (geo->level == 5) {
11536 change = CH_MIGRATION;
11537 if (geo->layout != ALGORITHM_LEFT_ASYMMETRIC) {
11538 pr_err("Error. Requested Layout not supported (left-asymmetric layout is supported only)!\n");
11539 change = -1;
11540 goto analyse_change_exit;
11541 }
11542 imsm_layout = geo->layout;
11543 check_devs = 1;
11544 devNumChange = 1; /* parity disk added */
11545 } else if (geo->level == 10) {
11546 change = CH_TAKEOVER;
11547 check_devs = 1;
11548 devNumChange = 2; /* two mirrors added */
11549 imsm_layout = 0x102; /* imsm supported layout */
11550 }
11551 break;
11552 case 1:
11553 case 10:
11554 if (geo->level == 0) {
11555 change = CH_TAKEOVER;
11556 check_devs = 1;
11557 devNumChange = -(geo->raid_disks/2);
11558 imsm_layout = 0; /* imsm raid0 layout */
11559 }
11560 break;
11561 }
11562 if (change == -1) {
11563 pr_err("Error. Level Migration from %d to %d not supported!\n",
11564 info.array.level, geo->level);
11565 goto analyse_change_exit;
11566 }
11567 } else
11568 geo->level = info.array.level;
11569
11570 if (geo->layout != info.array.layout &&
11571 (geo->layout != UnSet && geo->layout != -1)) {
11572 change = CH_MIGRATION;
11573 if (info.array.layout == 0 && info.array.level == 5 &&
11574 geo->layout == 5) {
11575 /* reshape 5 -> 4 */
11576 } else if (info.array.layout == 5 && info.array.level == 5 &&
11577 geo->layout == 0) {
11578 /* reshape 4 -> 5 */
11579 geo->layout = 0;
11580 geo->level = 5;
11581 } else {
11582 pr_err("Error. Layout Migration from %d to %d not supported!\n",
11583 info.array.layout, geo->layout);
11584 change = -1;
11585 goto analyse_change_exit;
11586 }
11587 } else {
11588 geo->layout = info.array.layout;
11589 if (imsm_layout == -1)
11590 imsm_layout = info.array.layout;
11591 }
11592
11593 if (geo->chunksize > 0 && geo->chunksize != UnSet &&
11594 geo->chunksize != info.array.chunk_size) {
11595 if (info.array.level == 10) {
11596 pr_err("Error. Chunk size change for RAID 10 is not supported.\n");
11597 change = -1;
11598 goto analyse_change_exit;
11599 } else if (info.component_size % (geo->chunksize/512)) {
11600 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk). Aborting...\n",
11601 geo->chunksize/1024, info.component_size/2);
11602 change = -1;
11603 goto analyse_change_exit;
11604 }
11605 change = CH_MIGRATION;
11606 } else {
11607 geo->chunksize = info.array.chunk_size;
11608 }
11609
11610 chunk = geo->chunksize / 1024;
11611
11612 super = st->sb;
11613 dev = get_imsm_dev(super, super->current_vol);
11614 map = get_imsm_map(dev, MAP_0);
11615 data_disks = imsm_num_data_members(map);
11616 /* compute current size per disk member
11617 */
11618 current_size = info.custom_array_size / data_disks;
11619
11620 if (geo->size > 0 && geo->size != MAX_SIZE) {
11621 /* align component size
11622 */
11623 geo->size = imsm_component_size_alignment_check(
11624 get_imsm_raid_level(dev->vol.map),
11625 chunk * 1024, super->sector_size,
11626 geo->size * 2);
11627 if (geo->size == 0) {
11628 pr_err("Error. Size expansion is supported only (current size is %llu, requested size /rounded/ is 0).\n",
11629 current_size);
11630 goto analyse_change_exit;
11631 }
11632 }
11633
11634 if (current_size != geo->size && geo->size > 0) {
11635 if (change != -1) {
11636 pr_err("Error. Size change should be the only one at a time.\n");
11637 change = -1;
11638 goto analyse_change_exit;
11639 }
11640 if ((super->current_vol + 1) != super->anchor->num_raid_devs) {
11641 pr_err("Error. The last volume in container can be expanded only (%i/%s).\n",
11642 super->current_vol, st->devnm);
11643 goto analyse_change_exit;
11644 }
11645 /* check the maximum available size
11646 */
11647 rv = imsm_get_free_size(st, dev->vol.map->num_members,
11648 0, chunk, &free_size);
11649 if (rv == 0)
11650 /* Cannot find maximum available space
11651 */
11652 max_size = 0;
11653 else {
11654 max_size = free_size + current_size;
11655 /* align component size
11656 */
11657 max_size = imsm_component_size_alignment_check(
11658 get_imsm_raid_level(dev->vol.map),
11659 chunk * 1024, super->sector_size,
11660 max_size);
11661 }
11662 if (geo->size == MAX_SIZE) {
11663 /* requested size change to the maximum available size
11664 */
11665 if (max_size == 0) {
11666 pr_err("Error. Cannot find maximum available space.\n");
11667 change = -1;
11668 goto analyse_change_exit;
11669 } else
11670 geo->size = max_size;
11671 }
11672
11673 if (direction == ROLLBACK_METADATA_CHANGES) {
11674 /* accept size for rollback only
11675 */
11676 } else {
11677 /* round size due to metadata compatibility
11678 */
11679 geo->size = (geo->size >> SECT_PER_MB_SHIFT)
11680 << SECT_PER_MB_SHIFT;
11681 dprintf("Prepare update for size change to %llu\n",
11682 geo->size );
11683 if (current_size >= geo->size) {
11684 pr_err("Error. Size expansion is supported only (current size is %llu, requested size /rounded/ is %llu).\n",
11685 current_size, geo->size);
11686 goto analyse_change_exit;
11687 }
11688 if (max_size && geo->size > max_size) {
11689 pr_err("Error. Requested size is larger than maximum available size (maximum available size is %llu, requested size /rounded/ is %llu).\n",
11690 max_size, geo->size);
11691 goto analyse_change_exit;
11692 }
11693 }
11694 geo->size *= data_disks;
11695 geo->raid_disks = dev->vol.map->num_members;
11696 change = CH_ARRAY_SIZE;
11697 }
11698 if (!validate_geometry_imsm(st,
11699 geo->level,
11700 imsm_layout,
11701 geo->raid_disks + devNumChange,
11702 &chunk,
11703 geo->size, INVALID_SECTORS,
11704 0, 0, info.consistency_policy, 1))
11705 change = -1;
11706
11707 if (check_devs) {
11708 struct intel_super *super = st->sb;
11709 struct imsm_super *mpb = super->anchor;
11710
11711 if (mpb->num_raid_devs > 1) {
11712 pr_err("Error. Cannot perform operation on %s- for this operation it MUST be single array in container\n",
11713 geo->dev_name);
11714 change = -1;
11715 }
11716 }
11717
11718 analyse_change_exit:
11719 if (direction == ROLLBACK_METADATA_CHANGES &&
11720 (change == CH_MIGRATION || change == CH_TAKEOVER)) {
11721 dprintf("imsm: Metadata changes rollback is not supported for migration and takeover operations.\n");
11722 change = -1;
11723 }
11724 return change;
11725 }
11726
11727 int imsm_takeover(struct supertype *st, struct geo_params *geo)
11728 {
11729 struct intel_super *super = st->sb;
11730 struct imsm_update_takeover *u;
11731
11732 u = xmalloc(sizeof(struct imsm_update_takeover));
11733
11734 u->type = update_takeover;
11735 u->subarray = super->current_vol;
11736
11737 /* 10->0 transition */
11738 if (geo->level == 0)
11739 u->direction = R10_TO_R0;
11740
11741 /* 0->10 transition */
11742 if (geo->level == 10)
11743 u->direction = R0_TO_R10;
11744
11745 /* update metadata locally */
11746 imsm_update_metadata_locally(st, u,
11747 sizeof(struct imsm_update_takeover));
11748 /* and possibly remotely */
11749 if (st->update_tail)
11750 append_metadata_update(st, u,
11751 sizeof(struct imsm_update_takeover));
11752 else
11753 free(u);
11754
11755 return 0;
11756 }
11757
11758 /* Flush size update if size calculated by num_data_stripes is higher than
11759 * imsm_dev_size to eliminate differences during reshape.
11760 * Mdmon will recalculate them correctly.
11761 * If subarray index is not set then check whole container.
11762 * Returns:
11763 * 0 - no error occurred
11764 * 1 - error detected
11765 */
11766 static int imsm_fix_size_mismatch(struct supertype *st, int subarray_index)
11767 {
11768 struct intel_super *super = st->sb;
11769 int tmp = super->current_vol;
11770 int ret_val = 1;
11771 int i;
11772
11773 for (i = 0; i < super->anchor->num_raid_devs; i++) {
11774 if (subarray_index >= 0 && i != subarray_index)
11775 continue;
11776 super->current_vol = i;
11777 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
11778 struct imsm_map *map = get_imsm_map(dev, MAP_0);
11779 unsigned int disc_count = imsm_num_data_members(map);
11780 struct geo_params geo;
11781 struct imsm_update_size_change *update;
11782 unsigned long long calc_size = per_dev_array_size(map) * disc_count;
11783 unsigned long long d_size = imsm_dev_size(dev);
11784 int u_size;
11785
11786 if (calc_size == d_size || dev->vol.migr_type == MIGR_GEN_MIGR)
11787 continue;
11788
11789 /* There is a difference, verify that imsm_dev_size is
11790 * rounded correctly and push update.
11791 */
11792 if (d_size != round_size_to_mb(d_size, disc_count)) {
11793 dprintf("imsm: Size of volume %d is not rounded correctly\n",
11794 i);
11795 goto exit;
11796 }
11797 memset(&geo, 0, sizeof(struct geo_params));
11798 geo.size = d_size;
11799 u_size = imsm_create_metadata_update_for_size_change(st, &geo,
11800 &update);
11801 if (u_size < 1) {
11802 dprintf("imsm: Cannot prepare size change update\n");
11803 goto exit;
11804 }
11805 imsm_update_metadata_locally(st, update, u_size);
11806 if (st->update_tail) {
11807 append_metadata_update(st, update, u_size);
11808 flush_metadata_updates(st);
11809 st->update_tail = &st->updates;
11810 } else {
11811 imsm_sync_metadata(st);
11812 }
11813 }
11814 ret_val = 0;
11815 exit:
11816 super->current_vol = tmp;
11817 return ret_val;
11818 }
11819
11820 static int imsm_reshape_super(struct supertype *st, unsigned long long size,
11821 int level,
11822 int layout, int chunksize, int raid_disks,
11823 int delta_disks, char *backup, char *dev,
11824 int direction, int verbose)
11825 {
11826 int ret_val = 1;
11827 struct geo_params geo;
11828
11829 dprintf("(enter)\n");
11830
11831 memset(&geo, 0, sizeof(struct geo_params));
11832
11833 geo.dev_name = dev;
11834 strcpy(geo.devnm, st->devnm);
11835 geo.size = size;
11836 geo.level = level;
11837 geo.layout = layout;
11838 geo.chunksize = chunksize;
11839 geo.raid_disks = raid_disks;
11840 if (delta_disks != UnSet)
11841 geo.raid_disks += delta_disks;
11842
11843 dprintf("for level : %i\n", geo.level);
11844 dprintf("for raid_disks : %i\n", geo.raid_disks);
11845
11846 if (strcmp(st->container_devnm, st->devnm) == 0) {
11847 /* On container level we can only increase number of devices. */
11848 dprintf("imsm: info: Container operation\n");
11849 int old_raid_disks = 0;
11850
11851 if (imsm_reshape_is_allowed_on_container(
11852 st, &geo, &old_raid_disks, direction)) {
11853 struct imsm_update_reshape *u = NULL;
11854 int len;
11855
11856 if (imsm_fix_size_mismatch(st, -1)) {
11857 dprintf("imsm: Cannot fix size mismatch\n");
11858 goto exit_imsm_reshape_super;
11859 }
11860
11861 len = imsm_create_metadata_update_for_reshape(
11862 st, &geo, old_raid_disks, &u);
11863
11864 if (len <= 0) {
11865 dprintf("imsm: Cannot prepare update\n");
11866 goto exit_imsm_reshape_super;
11867 }
11868
11869 ret_val = 0;
11870 /* update metadata locally */
11871 imsm_update_metadata_locally(st, u, len);
11872 /* and possibly remotely */
11873 if (st->update_tail)
11874 append_metadata_update(st, u, len);
11875 else
11876 free(u);
11877
11878 } else {
11879 pr_err("(imsm) Operation is not allowed on this container\n");
11880 }
11881 } else {
11882 /* On volume level we support following operations
11883 * - takeover: raid10 -> raid0; raid0 -> raid10
11884 * - chunk size migration
11885 * - migration: raid5 -> raid0; raid0 -> raid5
11886 */
11887 struct intel_super *super = st->sb;
11888 struct intel_dev *dev = super->devlist;
11889 int change;
11890 dprintf("imsm: info: Volume operation\n");
11891 /* find requested device */
11892 while (dev) {
11893 char *devnm =
11894 imsm_find_array_devnm_by_subdev(
11895 dev->index, st->container_devnm);
11896 if (devnm && strcmp(devnm, geo.devnm) == 0)
11897 break;
11898 dev = dev->next;
11899 }
11900 if (dev == NULL) {
11901 pr_err("Cannot find %s (%s) subarray\n",
11902 geo.dev_name, geo.devnm);
11903 goto exit_imsm_reshape_super;
11904 }
11905 super->current_vol = dev->index;
11906 change = imsm_analyze_change(st, &geo, direction);
11907 switch (change) {
11908 case CH_TAKEOVER:
11909 ret_val = imsm_takeover(st, &geo);
11910 break;
11911 case CH_MIGRATION: {
11912 struct imsm_update_reshape_migration *u = NULL;
11913 int len =
11914 imsm_create_metadata_update_for_migration(
11915 st, &geo, &u);
11916 if (len < 1) {
11917 dprintf("imsm: Cannot prepare update\n");
11918 break;
11919 }
11920 ret_val = 0;
11921 /* update metadata locally */
11922 imsm_update_metadata_locally(st, u, len);
11923 /* and possibly remotely */
11924 if (st->update_tail)
11925 append_metadata_update(st, u, len);
11926 else
11927 free(u);
11928 }
11929 break;
11930 case CH_ARRAY_SIZE: {
11931 struct imsm_update_size_change *u = NULL;
11932 int len =
11933 imsm_create_metadata_update_for_size_change(
11934 st, &geo, &u);
11935 if (len < 1) {
11936 dprintf("imsm: Cannot prepare update\n");
11937 break;
11938 }
11939 ret_val = 0;
11940 /* update metadata locally */
11941 imsm_update_metadata_locally(st, u, len);
11942 /* and possibly remotely */
11943 if (st->update_tail)
11944 append_metadata_update(st, u, len);
11945 else
11946 free(u);
11947 }
11948 break;
11949 default:
11950 ret_val = 1;
11951 }
11952 }
11953
11954 exit_imsm_reshape_super:
11955 dprintf("imsm: reshape_super Exit code = %i\n", ret_val);
11956 return ret_val;
11957 }
11958
11959 #define COMPLETED_OK 0
11960 #define COMPLETED_NONE 1
11961 #define COMPLETED_DELAYED 2
11962
11963 static int read_completed(int fd, unsigned long long *val)
11964 {
11965 int ret;
11966 char buf[50];
11967
11968 ret = sysfs_fd_get_str(fd, buf, 50);
11969 if (ret < 0)
11970 return ret;
11971
11972 ret = COMPLETED_OK;
11973 if (strncmp(buf, "none", 4) == 0) {
11974 ret = COMPLETED_NONE;
11975 } else if (strncmp(buf, "delayed", 7) == 0) {
11976 ret = COMPLETED_DELAYED;
11977 } else {
11978 char *ep;
11979 *val = strtoull(buf, &ep, 0);
11980 if (ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))
11981 ret = -1;
11982 }
11983 return ret;
11984 }
11985
11986 /*******************************************************************************
11987 * Function: wait_for_reshape_imsm
11988 * Description: Function writes new sync_max value and waits until
11989 * reshape process reach new position
11990 * Parameters:
11991 * sra : general array info
11992 * ndata : number of disks in new array's layout
11993 * Returns:
11994 * 0 : success,
11995 * 1 : there is no reshape in progress,
11996 * -1 : fail
11997 ******************************************************************************/
11998 int wait_for_reshape_imsm(struct mdinfo *sra, int ndata)
11999 {
12000 int fd = sysfs_get_fd(sra, NULL, "sync_completed");
12001 int retry = 3;
12002 unsigned long long completed;
12003 /* to_complete : new sync_max position */
12004 unsigned long long to_complete = sra->reshape_progress;
12005 unsigned long long position_to_set = to_complete / ndata;
12006
12007 if (fd < 0) {
12008 dprintf("cannot open reshape_position\n");
12009 return 1;
12010 }
12011
12012 do {
12013 if (sysfs_fd_get_ll(fd, &completed) < 0) {
12014 if (!retry) {
12015 dprintf("cannot read reshape_position (no reshape in progres)\n");
12016 close(fd);
12017 return 1;
12018 }
12019 usleep(30000);
12020 } else
12021 break;
12022 } while (retry--);
12023
12024 if (completed > position_to_set) {
12025 dprintf("wrong next position to set %llu (%llu)\n",
12026 to_complete, position_to_set);
12027 close(fd);
12028 return -1;
12029 }
12030 dprintf("Position set: %llu\n", position_to_set);
12031 if (sysfs_set_num(sra, NULL, "sync_max",
12032 position_to_set) != 0) {
12033 dprintf("cannot set reshape position to %llu\n",
12034 position_to_set);
12035 close(fd);
12036 return -1;
12037 }
12038
12039 do {
12040 int rc;
12041 char action[20];
12042 int timeout = 3000;
12043
12044 sysfs_wait(fd, &timeout);
12045 if (sysfs_get_str(sra, NULL, "sync_action",
12046 action, 20) > 0 &&
12047 strncmp(action, "reshape", 7) != 0) {
12048 if (strncmp(action, "idle", 4) == 0)
12049 break;
12050 close(fd);
12051 return -1;
12052 }
12053
12054 rc = read_completed(fd, &completed);
12055 if (rc < 0) {
12056 dprintf("cannot read reshape_position (in loop)\n");
12057 close(fd);
12058 return 1;
12059 } else if (rc == COMPLETED_NONE)
12060 break;
12061 } while (completed < position_to_set);
12062
12063 close(fd);
12064 return 0;
12065 }
12066
12067 /*******************************************************************************
12068 * Function: check_degradation_change
12069 * Description: Check that array hasn't become failed.
12070 * Parameters:
12071 * info : for sysfs access
12072 * sources : source disks descriptors
12073 * degraded: previous degradation level
12074 * Returns:
12075 * degradation level
12076 ******************************************************************************/
12077 int check_degradation_change(struct mdinfo *info,
12078 int *sources,
12079 int degraded)
12080 {
12081 unsigned long long new_degraded;
12082 int rv;
12083
12084 rv = sysfs_get_ll(info, NULL, "degraded", &new_degraded);
12085 if (rv == -1 || (new_degraded != (unsigned long long)degraded)) {
12086 /* check each device to ensure it is still working */
12087 struct mdinfo *sd;
12088 new_degraded = 0;
12089 for (sd = info->devs ; sd ; sd = sd->next) {
12090 if (sd->disk.state & (1<<MD_DISK_FAULTY))
12091 continue;
12092 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
12093 char sbuf[100];
12094
12095 if (sysfs_get_str(info,
12096 sd, "state", sbuf, sizeof(sbuf)) < 0 ||
12097 strstr(sbuf, "faulty") ||
12098 strstr(sbuf, "in_sync") == NULL) {
12099 /* this device is dead */
12100 sd->disk.state = (1<<MD_DISK_FAULTY);
12101 if (sd->disk.raid_disk >= 0 &&
12102 sources[sd->disk.raid_disk] >= 0) {
12103 close(sources[
12104 sd->disk.raid_disk]);
12105 sources[sd->disk.raid_disk] =
12106 -1;
12107 }
12108 new_degraded++;
12109 }
12110 }
12111 }
12112 }
12113
12114 return new_degraded;
12115 }
12116
12117 /*******************************************************************************
12118 * Function: imsm_manage_reshape
12119 * Description: Function finds array under reshape and it manages reshape
12120 * process. It creates stripes backups (if required) and sets
12121 * checkpoints.
12122 * Parameters:
12123 * afd : Backup handle (nattive) - not used
12124 * sra : general array info
12125 * reshape : reshape parameters - not used
12126 * st : supertype structure
12127 * blocks : size of critical section [blocks]
12128 * fds : table of source device descriptor
12129 * offsets : start of array (offest per devices)
12130 * dests : not used
12131 * destfd : table of destination device descriptor
12132 * destoffsets : table of destination offsets (per device)
12133 * Returns:
12134 * 1 : success, reshape is done
12135 * 0 : fail
12136 ******************************************************************************/
12137 static int imsm_manage_reshape(
12138 int afd, struct mdinfo *sra, struct reshape *reshape,
12139 struct supertype *st, unsigned long backup_blocks,
12140 int *fds, unsigned long long *offsets,
12141 int dests, int *destfd, unsigned long long *destoffsets)
12142 {
12143 int ret_val = 0;
12144 struct intel_super *super = st->sb;
12145 struct intel_dev *dv;
12146 unsigned int sector_size = super->sector_size;
12147 struct imsm_dev *dev = NULL;
12148 struct imsm_map *map_src, *map_dest;
12149 int migr_vol_qan = 0;
12150 int ndata, odata; /* [bytes] */
12151 int chunk; /* [bytes] */
12152 struct migr_record *migr_rec;
12153 char *buf = NULL;
12154 unsigned int buf_size; /* [bytes] */
12155 unsigned long long max_position; /* array size [bytes] */
12156 unsigned long long next_step; /* [blocks]/[bytes] */
12157 unsigned long long old_data_stripe_length;
12158 unsigned long long start_src; /* [bytes] */
12159 unsigned long long start; /* [bytes] */
12160 unsigned long long start_buf_shift; /* [bytes] */
12161 int degraded = 0;
12162 int source_layout = 0;
12163 int subarray_index = -1;
12164
12165 if (!sra)
12166 return ret_val;
12167
12168 if (!fds || !offsets)
12169 goto abort;
12170
12171 /* Find volume during the reshape */
12172 for (dv = super->devlist; dv; dv = dv->next) {
12173 if (dv->dev->vol.migr_type == MIGR_GEN_MIGR &&
12174 dv->dev->vol.migr_state == 1) {
12175 dev = dv->dev;
12176 migr_vol_qan++;
12177 subarray_index = dv->index;
12178 }
12179 }
12180 /* Only one volume can migrate at the same time */
12181 if (migr_vol_qan != 1) {
12182 pr_err("%s", migr_vol_qan ?
12183 "Number of migrating volumes greater than 1\n" :
12184 "There is no volume during migrationg\n");
12185 goto abort;
12186 }
12187
12188 map_dest = get_imsm_map(dev, MAP_0);
12189 map_src = get_imsm_map(dev, MAP_1);
12190 if (map_src == NULL)
12191 goto abort;
12192
12193 ndata = imsm_num_data_members(map_dest);
12194 odata = imsm_num_data_members(map_src);
12195
12196 chunk = __le16_to_cpu(map_src->blocks_per_strip) * 512;
12197 old_data_stripe_length = odata * chunk;
12198
12199 migr_rec = super->migr_rec;
12200
12201 /* initialize migration record for start condition */
12202 if (sra->reshape_progress == 0)
12203 init_migr_record_imsm(st, dev, sra);
12204 else {
12205 if (__le32_to_cpu(migr_rec->rec_status) != UNIT_SRC_NORMAL) {
12206 dprintf("imsm: cannot restart migration when data are present in copy area.\n");
12207 goto abort;
12208 }
12209 /* Save checkpoint to update migration record for current
12210 * reshape position (in md). It can be farther than current
12211 * reshape position in metadata.
12212 */
12213 if (save_checkpoint_imsm(st, sra, UNIT_SRC_NORMAL) == 1) {
12214 /* ignore error == 2, this can mean end of reshape here
12215 */
12216 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL, initial save)\n");
12217 goto abort;
12218 }
12219 }
12220
12221 /* size for data */
12222 buf_size = __le32_to_cpu(migr_rec->blocks_per_unit) * 512;
12223 /* extend buffer size for parity disk */
12224 buf_size += __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
12225 /* add space for stripe alignment */
12226 buf_size += old_data_stripe_length;
12227 if (posix_memalign((void **)&buf, MAX_SECTOR_SIZE, buf_size)) {
12228 dprintf("imsm: Cannot allocate checkpoint buffer\n");
12229 goto abort;
12230 }
12231
12232 max_position = sra->component_size * ndata;
12233 source_layout = imsm_level_to_layout(map_src->raid_level);
12234
12235 while (current_migr_unit(migr_rec) <
12236 get_num_migr_units(migr_rec)) {
12237 /* current reshape position [blocks] */
12238 unsigned long long current_position =
12239 __le32_to_cpu(migr_rec->blocks_per_unit)
12240 * current_migr_unit(migr_rec);
12241 unsigned long long border;
12242
12243 /* Check that array hasn't become failed.
12244 */
12245 degraded = check_degradation_change(sra, fds, degraded);
12246 if (degraded > 1) {
12247 dprintf("imsm: Abort reshape due to degradation level (%i)\n", degraded);
12248 goto abort;
12249 }
12250
12251 next_step = __le32_to_cpu(migr_rec->blocks_per_unit);
12252
12253 if ((current_position + next_step) > max_position)
12254 next_step = max_position - current_position;
12255
12256 start = current_position * 512;
12257
12258 /* align reading start to old geometry */
12259 start_buf_shift = start % old_data_stripe_length;
12260 start_src = start - start_buf_shift;
12261
12262 border = (start_src / odata) - (start / ndata);
12263 border /= 512;
12264 if (border <= __le32_to_cpu(migr_rec->dest_depth_per_unit)) {
12265 /* save critical stripes to buf
12266 * start - start address of current unit
12267 * to backup [bytes]
12268 * start_src - start address of current unit
12269 * to backup alligned to source array
12270 * [bytes]
12271 */
12272 unsigned long long next_step_filler;
12273 unsigned long long copy_length = next_step * 512;
12274
12275 /* allign copy area length to stripe in old geometry */
12276 next_step_filler = ((copy_length + start_buf_shift)
12277 % old_data_stripe_length);
12278 if (next_step_filler)
12279 next_step_filler = (old_data_stripe_length
12280 - next_step_filler);
12281 dprintf("save_stripes() parameters: start = %llu,\tstart_src = %llu,\tnext_step*512 = %llu,\tstart_in_buf_shift = %llu,\tnext_step_filler = %llu\n",
12282 start, start_src, copy_length,
12283 start_buf_shift, next_step_filler);
12284
12285 if (save_stripes(fds, offsets, map_src->num_members,
12286 chunk, map_src->raid_level,
12287 source_layout, 0, NULL, start_src,
12288 copy_length +
12289 next_step_filler + start_buf_shift,
12290 buf)) {
12291 dprintf("imsm: Cannot save stripes to buffer\n");
12292 goto abort;
12293 }
12294 /* Convert data to destination format and store it
12295 * in backup general migration area
12296 */
12297 if (save_backup_imsm(st, dev, sra,
12298 buf + start_buf_shift, copy_length)) {
12299 dprintf("imsm: Cannot save stripes to target devices\n");
12300 goto abort;
12301 }
12302 if (save_checkpoint_imsm(st, sra,
12303 UNIT_SRC_IN_CP_AREA)) {
12304 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_IN_CP_AREA)\n");
12305 goto abort;
12306 }
12307 } else {
12308 /* set next step to use whole border area */
12309 border /= next_step;
12310 if (border > 1)
12311 next_step *= border;
12312 }
12313 /* When data backed up, checkpoint stored,
12314 * kick the kernel to reshape unit of data
12315 */
12316 next_step = next_step + sra->reshape_progress;
12317 /* limit next step to array max position */
12318 if (next_step > max_position)
12319 next_step = max_position;
12320 sysfs_set_num(sra, NULL, "suspend_lo", sra->reshape_progress);
12321 sysfs_set_num(sra, NULL, "suspend_hi", next_step);
12322 sra->reshape_progress = next_step;
12323
12324 /* wait until reshape finish */
12325 if (wait_for_reshape_imsm(sra, ndata)) {
12326 dprintf("wait_for_reshape_imsm returned error!\n");
12327 goto abort;
12328 }
12329 if (sigterm)
12330 goto abort;
12331
12332 if (save_checkpoint_imsm(st, sra, UNIT_SRC_NORMAL) == 1) {
12333 /* ignore error == 2, this can mean end of reshape here
12334 */
12335 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL)\n");
12336 goto abort;
12337 }
12338
12339 }
12340
12341 /* clear migr_rec on disks after successful migration */
12342 struct dl *d;
12343
12344 memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
12345 for (d = super->disks; d; d = d->next) {
12346 if (d->index < 0 || is_failed(&d->disk))
12347 continue;
12348 unsigned long long dsize;
12349
12350 get_dev_size(d->fd, NULL, &dsize);
12351 if (lseek64(d->fd, dsize - MIGR_REC_SECTOR_POSITION*sector_size,
12352 SEEK_SET) >= 0) {
12353 if ((unsigned int)write(d->fd, super->migr_rec_buf,
12354 MIGR_REC_BUF_SECTORS*sector_size) !=
12355 MIGR_REC_BUF_SECTORS*sector_size)
12356 perror("Write migr_rec failed");
12357 }
12358 }
12359
12360 /* return '1' if done */
12361 ret_val = 1;
12362
12363 /* After the reshape eliminate size mismatch in metadata.
12364 * Don't update md/component_size here, volume hasn't
12365 * to take whole space. It is allowed by kernel.
12366 * md/component_size will be set propoperly after next assembly.
12367 */
12368 imsm_fix_size_mismatch(st, subarray_index);
12369
12370 abort:
12371 free(buf);
12372 /* See Grow.c: abort_reshape() for further explanation */
12373 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
12374 sysfs_set_num(sra, NULL, "suspend_hi", 0);
12375 sysfs_set_num(sra, NULL, "suspend_lo", 0);
12376
12377 return ret_val;
12378 }
12379
12380 /*******************************************************************************
12381 * Function: calculate_bitmap_min_chunksize
12382 * Description: Calculates the minimal valid bitmap chunk size
12383 * Parameters:
12384 * max_bits : indicate how many bits can be used for the bitmap
12385 * data_area_size : the size of the data area covered by the bitmap
12386 *
12387 * Returns:
12388 * The bitmap chunk size
12389 ******************************************************************************/
12390 static unsigned long long
12391 calculate_bitmap_min_chunksize(unsigned long long max_bits,
12392 unsigned long long data_area_size)
12393 {
12394 unsigned long long min_chunk =
12395 4096; /* sub-page chunks don't work yet.. */
12396 unsigned long long bits = data_area_size / min_chunk + 1;
12397
12398 while (bits > max_bits) {
12399 min_chunk *= 2;
12400 bits = (bits + 1) / 2;
12401 }
12402 return min_chunk;
12403 }
12404
12405 /*******************************************************************************
12406 * Function: calculate_bitmap_chunksize
12407 * Description: Calculates the bitmap chunk size for the given device
12408 * Parameters:
12409 * st : supertype information
12410 * dev : device for the bitmap
12411 *
12412 * Returns:
12413 * The bitmap chunk size
12414 ******************************************************************************/
12415 static unsigned long long calculate_bitmap_chunksize(struct supertype *st,
12416 struct imsm_dev *dev)
12417 {
12418 struct intel_super *super = st->sb;
12419 unsigned long long min_chunksize;
12420 unsigned long long result = IMSM_DEFAULT_BITMAP_CHUNKSIZE;
12421 size_t dev_size = imsm_dev_size(dev);
12422
12423 min_chunksize = calculate_bitmap_min_chunksize(
12424 IMSM_BITMAP_AREA_SIZE * super->sector_size, dev_size);
12425
12426 if (result < min_chunksize)
12427 result = min_chunksize;
12428
12429 return result;
12430 }
12431
12432 /*******************************************************************************
12433 * Function: init_bitmap_header
12434 * Description: Initialize the bitmap header structure
12435 * Parameters:
12436 * st : supertype information
12437 * bms : bitmap header struct to initialize
12438 * dev : device for the bitmap
12439 *
12440 * Returns:
12441 * 0 : success
12442 * -1 : fail
12443 ******************************************************************************/
12444 static int init_bitmap_header(struct supertype *st, struct bitmap_super_s *bms,
12445 struct imsm_dev *dev)
12446 {
12447 int vol_uuid[4];
12448
12449 if (!bms || !dev)
12450 return -1;
12451
12452 bms->magic = __cpu_to_le32(BITMAP_MAGIC);
12453 bms->version = __cpu_to_le32(BITMAP_MAJOR_HI);
12454 bms->daemon_sleep = __cpu_to_le32(IMSM_DEFAULT_BITMAP_DAEMON_SLEEP);
12455 bms->sync_size = __cpu_to_le64(IMSM_BITMAP_AREA_SIZE);
12456 bms->write_behind = __cpu_to_le32(0);
12457
12458 uuid_from_super_imsm(st, vol_uuid);
12459 memcpy(bms->uuid, vol_uuid, 16);
12460
12461 bms->chunksize = calculate_bitmap_chunksize(st, dev);
12462
12463 return 0;
12464 }
12465
12466 /*******************************************************************************
12467 * Function: validate_internal_bitmap_for_drive
12468 * Description: Verify if the bitmap header for a given drive.
12469 * Parameters:
12470 * st : supertype information
12471 * offset : The offset from the beginning of the drive where to look for
12472 * the bitmap header.
12473 * d : the drive info
12474 *
12475 * Returns:
12476 * 0 : success
12477 * -1 : fail
12478 ******************************************************************************/
12479 static int validate_internal_bitmap_for_drive(struct supertype *st,
12480 unsigned long long offset,
12481 struct dl *d)
12482 {
12483 struct intel_super *super = st->sb;
12484 int ret = -1;
12485 int vol_uuid[4];
12486 bitmap_super_t *bms;
12487 int fd;
12488
12489 if (!d)
12490 return -1;
12491
12492 void *read_buf;
12493
12494 if (posix_memalign(&read_buf, MAX_SECTOR_SIZE, IMSM_BITMAP_HEADER_SIZE))
12495 return -1;
12496
12497 fd = d->fd;
12498 if (fd < 0) {
12499 fd = open(d->devname, O_RDONLY, 0);
12500 if (fd < 0) {
12501 dprintf("cannot open the device %s\n", d->devname);
12502 goto abort;
12503 }
12504 }
12505
12506 if (lseek64(fd, offset * super->sector_size, SEEK_SET) < 0)
12507 goto abort;
12508 if (read(fd, read_buf, IMSM_BITMAP_HEADER_SIZE) !=
12509 IMSM_BITMAP_HEADER_SIZE)
12510 goto abort;
12511
12512 uuid_from_super_imsm(st, vol_uuid);
12513
12514 bms = read_buf;
12515 if ((bms->magic != __cpu_to_le32(BITMAP_MAGIC)) ||
12516 (bms->version != __cpu_to_le32(BITMAP_MAJOR_HI)) ||
12517 (!same_uuid((int *)bms->uuid, vol_uuid, st->ss->swapuuid))) {
12518 dprintf("wrong bitmap header detected\n");
12519 goto abort;
12520 }
12521
12522 ret = 0;
12523 abort:
12524 if ((d->fd < 0) && (fd >= 0))
12525 close(fd);
12526 if (read_buf)
12527 free(read_buf);
12528
12529 return ret;
12530 }
12531
12532 /*******************************************************************************
12533 * Function: validate_internal_bitmap_imsm
12534 * Description: Verify if the bitmap header is in place and with proper data.
12535 * Parameters:
12536 * st : supertype information
12537 *
12538 * Returns:
12539 * 0 : success or device w/o RWH_BITMAP
12540 * -1 : fail
12541 ******************************************************************************/
12542 static int validate_internal_bitmap_imsm(struct supertype *st)
12543 {
12544 struct intel_super *super = st->sb;
12545 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
12546 unsigned long long offset;
12547 struct dl *d;
12548
12549 if (!dev)
12550 return -1;
12551
12552 if (dev->rwh_policy != RWH_BITMAP)
12553 return 0;
12554
12555 offset = get_bitmap_header_sector(super, super->current_vol);
12556 for (d = super->disks; d; d = d->next) {
12557 if (d->index < 0 || is_failed(&d->disk))
12558 continue;
12559
12560 if (validate_internal_bitmap_for_drive(st, offset, d)) {
12561 pr_err("imsm: bitmap validation failed\n");
12562 return -1;
12563 }
12564 }
12565 return 0;
12566 }
12567
12568 /*******************************************************************************
12569 * Function: add_internal_bitmap_imsm
12570 * Description: Mark the volume to use the bitmap and updates the chunk size value.
12571 * Parameters:
12572 * st : supertype information
12573 * chunkp : bitmap chunk size
12574 * delay : not used for imsm
12575 * write_behind : not used for imsm
12576 * size : not used for imsm
12577 * may_change : not used for imsm
12578 * amajor : not used for imsm
12579 *
12580 * Returns:
12581 * 0 : success
12582 * -1 : fail
12583 ******************************************************************************/
12584 static int add_internal_bitmap_imsm(struct supertype *st, int *chunkp,
12585 int delay, int write_behind,
12586 unsigned long long size, int may_change,
12587 int amajor)
12588 {
12589 struct intel_super *super = st->sb;
12590 int vol_idx = super->current_vol;
12591 struct imsm_dev *dev;
12592
12593 if (!super->devlist || vol_idx == -1 || !chunkp)
12594 return -1;
12595
12596 dev = get_imsm_dev(super, vol_idx);
12597
12598 if (!dev) {
12599 dprintf("cannot find the device for volume index %d\n",
12600 vol_idx);
12601 return -1;
12602 }
12603 dev->rwh_policy = RWH_BITMAP;
12604
12605 *chunkp = calculate_bitmap_chunksize(st, dev);
12606
12607 return 0;
12608 }
12609
12610 /*******************************************************************************
12611 * Function: locate_bitmap_imsm
12612 * Description: Seek 'fd' to start of write-intent-bitmap.
12613 * Parameters:
12614 * st : supertype information
12615 * fd : file descriptor for the device
12616 * node_num : not used for imsm
12617 *
12618 * Returns:
12619 * 0 : success
12620 * -1 : fail
12621 ******************************************************************************/
12622 static int locate_bitmap_imsm(struct supertype *st, int fd, int node_num)
12623 {
12624 struct intel_super *super = st->sb;
12625 unsigned long long offset;
12626 int vol_idx = super->current_vol;
12627
12628 if (!super->devlist || vol_idx == -1)
12629 return -1;
12630
12631 offset = get_bitmap_header_sector(super, super->current_vol);
12632 dprintf("bitmap header offset is %llu\n", offset);
12633
12634 lseek64(fd, offset << 9, 0);
12635
12636 return 0;
12637 }
12638
12639 /*******************************************************************************
12640 * Function: write_init_bitmap_imsm
12641 * Description: Write a bitmap header and prepares the area for the bitmap.
12642 * Parameters:
12643 * st : supertype information
12644 * fd : file descriptor for the device
12645 * update : not used for imsm
12646 *
12647 * Returns:
12648 * 0 : success
12649 * -1 : fail
12650 ******************************************************************************/
12651 static int write_init_bitmap_imsm(struct supertype *st, int fd,
12652 enum bitmap_update update)
12653 {
12654 struct intel_super *super = st->sb;
12655 int vol_idx = super->current_vol;
12656 int ret = 0;
12657 unsigned long long offset;
12658 bitmap_super_t bms = { 0 };
12659 size_t written = 0;
12660 size_t to_write;
12661 ssize_t rv_num;
12662 void *buf;
12663
12664 if (!super->devlist || !super->sector_size || vol_idx == -1)
12665 return -1;
12666
12667 struct imsm_dev *dev = get_imsm_dev(super, vol_idx);
12668
12669 /* first clear the space for bitmap header */
12670 unsigned long long bitmap_area_start =
12671 get_bitmap_header_sector(super, vol_idx);
12672
12673 dprintf("zeroing area start (%llu) and size (%u)\n", bitmap_area_start,
12674 IMSM_BITMAP_AND_HEADER_SIZE / super->sector_size);
12675 if (zero_disk_range(fd, bitmap_area_start,
12676 IMSM_BITMAP_HEADER_SIZE / super->sector_size)) {
12677 pr_err("imsm: cannot zeroing the space for the bitmap\n");
12678 return -1;
12679 }
12680
12681 /* The bitmap area should be filled with "1"s to perform initial
12682 * synchronization.
12683 */
12684 if (posix_memalign(&buf, MAX_SECTOR_SIZE, MAX_SECTOR_SIZE))
12685 return -1;
12686 memset(buf, 0xFF, MAX_SECTOR_SIZE);
12687 offset = get_bitmap_sector(super, vol_idx);
12688 lseek64(fd, offset << 9, 0);
12689 while (written < IMSM_BITMAP_AREA_SIZE) {
12690 to_write = IMSM_BITMAP_AREA_SIZE - written;
12691 if (to_write > MAX_SECTOR_SIZE)
12692 to_write = MAX_SECTOR_SIZE;
12693 rv_num = write(fd, buf, MAX_SECTOR_SIZE);
12694 if (rv_num != MAX_SECTOR_SIZE) {
12695 ret = -1;
12696 dprintf("cannot initialize bitmap area\n");
12697 goto abort;
12698 }
12699 written += rv_num;
12700 }
12701
12702 /* write a bitmap header */
12703 init_bitmap_header(st, &bms, dev);
12704 memset(buf, 0, MAX_SECTOR_SIZE);
12705 memcpy(buf, &bms, sizeof(bitmap_super_t));
12706 if (locate_bitmap_imsm(st, fd, 0)) {
12707 ret = -1;
12708 dprintf("cannot locate the bitmap\n");
12709 goto abort;
12710 }
12711 if (write(fd, buf, MAX_SECTOR_SIZE) != MAX_SECTOR_SIZE) {
12712 ret = -1;
12713 dprintf("cannot write the bitmap header\n");
12714 goto abort;
12715 }
12716 fsync(fd);
12717
12718 abort:
12719 free(buf);
12720
12721 return ret;
12722 }
12723
12724 /*******************************************************************************
12725 * Function: is_vol_to_setup_bitmap
12726 * Description: Checks if a bitmap should be activated on the dev.
12727 * Parameters:
12728 * info : info about the volume to setup the bitmap
12729 * dev : the device to check against bitmap creation
12730 *
12731 * Returns:
12732 * 0 : bitmap should be set up on the device
12733 * -1 : otherwise
12734 ******************************************************************************/
12735 static int is_vol_to_setup_bitmap(struct mdinfo *info, struct imsm_dev *dev)
12736 {
12737 if (!dev || !info)
12738 return -1;
12739
12740 if ((strcmp((char *)dev->volume, info->name) == 0) &&
12741 (dev->rwh_policy == RWH_BITMAP))
12742 return -1;
12743
12744 return 0;
12745 }
12746
12747 /*******************************************************************************
12748 * Function: set_bitmap_sysfs
12749 * Description: Set the sysfs atributes of a given volume to activate the bitmap.
12750 * Parameters:
12751 * info : info about the volume where the bitmap should be setup
12752 * chunksize : bitmap chunk size
12753 * location : location of the bitmap
12754 *
12755 * Returns:
12756 * 0 : success
12757 * -1 : fail
12758 ******************************************************************************/
12759 static int set_bitmap_sysfs(struct mdinfo *info, unsigned long long chunksize,
12760 char *location)
12761 {
12762 /* The bitmap/metadata is set to external to allow changing of value for
12763 * bitmap/location. When external is used, the kernel will treat an offset
12764 * related to the device's first lba (in opposition to the "internal" case
12765 * when this value is related to the beginning of the superblock).
12766 */
12767 if (sysfs_set_str(info, NULL, "bitmap/metadata", "external")) {
12768 dprintf("failed to set bitmap/metadata\n");
12769 return -1;
12770 }
12771
12772 /* It can only be changed when no bitmap is active.
12773 * Should be bigger than 512 and must be power of 2.
12774 * It is expecting the value in bytes.
12775 */
12776 if (sysfs_set_num(info, NULL, "bitmap/chunksize",
12777 __cpu_to_le32(chunksize))) {
12778 dprintf("failed to set bitmap/chunksize\n");
12779 return -1;
12780 }
12781
12782 /* It is expecting the value in sectors. */
12783 if (sysfs_set_num(info, NULL, "bitmap/space",
12784 __cpu_to_le64(IMSM_BITMAP_AREA_SIZE))) {
12785 dprintf("failed to set bitmap/space\n");
12786 return -1;
12787 }
12788
12789 /* Determines the delay between the bitmap updates.
12790 * It is expecting the value in seconds.
12791 */
12792 if (sysfs_set_num(info, NULL, "bitmap/time_base",
12793 __cpu_to_le64(IMSM_DEFAULT_BITMAP_DAEMON_SLEEP))) {
12794 dprintf("failed to set bitmap/time_base\n");
12795 return -1;
12796 }
12797
12798 /* It is expecting the value in sectors with a sign at the beginning. */
12799 if (sysfs_set_str(info, NULL, "bitmap/location", location)) {
12800 dprintf("failed to set bitmap/location\n");
12801 return -1;
12802 }
12803
12804 return 0;
12805 }
12806
12807 /*******************************************************************************
12808 * Function: set_bitmap_imsm
12809 * Description: Setup the bitmap for the given volume
12810 * Parameters:
12811 * st : supertype information
12812 * info : info about the volume where the bitmap should be setup
12813 *
12814 * Returns:
12815 * 0 : success
12816 * -1 : fail
12817 ******************************************************************************/
12818 static int set_bitmap_imsm(struct supertype *st, struct mdinfo *info)
12819 {
12820 struct intel_super *super = st->sb;
12821 int prev_current_vol = super->current_vol;
12822 struct imsm_dev *dev;
12823 int ret = -1;
12824 char location[16] = "";
12825 unsigned long long chunksize;
12826 struct intel_dev *dev_it;
12827
12828 for (dev_it = super->devlist; dev_it; dev_it = dev_it->next) {
12829 super->current_vol = dev_it->index;
12830 dev = get_imsm_dev(super, super->current_vol);
12831
12832 if (is_vol_to_setup_bitmap(info, dev)) {
12833 if (validate_internal_bitmap_imsm(st)) {
12834 dprintf("bitmap header validation failed\n");
12835 goto abort;
12836 }
12837
12838 chunksize = calculate_bitmap_chunksize(st, dev);
12839 dprintf("chunk size is %llu\n", chunksize);
12840
12841 snprintf(location, sizeof(location), "+%llu",
12842 get_bitmap_sector(super, super->current_vol));
12843 dprintf("bitmap offset is %s\n", location);
12844
12845 if (set_bitmap_sysfs(info, chunksize, location)) {
12846 dprintf("cannot setup the bitmap\n");
12847 goto abort;
12848 }
12849 }
12850 }
12851 ret = 0;
12852 abort:
12853 super->current_vol = prev_current_vol;
12854 return ret;
12855 }
12856
12857 struct superswitch super_imsm = {
12858 .examine_super = examine_super_imsm,
12859 .brief_examine_super = brief_examine_super_imsm,
12860 .brief_examine_subarrays = brief_examine_subarrays_imsm,
12861 .export_examine_super = export_examine_super_imsm,
12862 .detail_super = detail_super_imsm,
12863 .brief_detail_super = brief_detail_super_imsm,
12864 .write_init_super = write_init_super_imsm,
12865 .validate_geometry = validate_geometry_imsm,
12866 .add_to_super = add_to_super_imsm,
12867 .remove_from_super = remove_from_super_imsm,
12868 .detail_platform = detail_platform_imsm,
12869 .export_detail_platform = export_detail_platform_imsm,
12870 .kill_subarray = kill_subarray_imsm,
12871 .update_subarray = update_subarray_imsm,
12872 .load_container = load_container_imsm,
12873 .default_geometry = default_geometry_imsm,
12874 .get_disk_controller_domain = imsm_get_disk_controller_domain,
12875 .reshape_super = imsm_reshape_super,
12876 .manage_reshape = imsm_manage_reshape,
12877 .recover_backup = recover_backup_imsm,
12878 .examine_badblocks = examine_badblocks_imsm,
12879 .match_home = match_home_imsm,
12880 .uuid_from_super= uuid_from_super_imsm,
12881 .getinfo_super = getinfo_super_imsm,
12882 .getinfo_super_disks = getinfo_super_disks_imsm,
12883 .update_super = update_super_imsm,
12884
12885 .avail_size = avail_size_imsm,
12886 .get_spare_criteria = get_spare_criteria_imsm,
12887
12888 .compare_super = compare_super_imsm,
12889
12890 .load_super = load_super_imsm,
12891 .init_super = init_super_imsm,
12892 .store_super = store_super_imsm,
12893 .free_super = free_super_imsm,
12894 .match_metadata_desc = match_metadata_desc_imsm,
12895 .container_content = container_content_imsm,
12896 .validate_container = validate_container_imsm,
12897
12898 .add_internal_bitmap = add_internal_bitmap_imsm,
12899 .locate_bitmap = locate_bitmap_imsm,
12900 .write_bitmap = write_init_bitmap_imsm,
12901 .set_bitmap = set_bitmap_imsm,
12902
12903 .write_init_ppl = write_init_ppl_imsm,
12904 .validate_ppl = validate_ppl_imsm,
12905
12906 .external = 1,
12907 .name = "imsm",
12908
12909 /* for mdmon */
12910 .open_new = imsm_open_new,
12911 .set_array_state= imsm_set_array_state,
12912 .set_disk = imsm_set_disk,
12913 .sync_metadata = imsm_sync_metadata,
12914 .activate_spare = imsm_activate_spare,
12915 .process_update = imsm_process_update,
12916 .prepare_update = imsm_prepare_update,
12917 .record_bad_block = imsm_record_badblock,
12918 .clear_bad_block = imsm_clear_badblock,
12919 .get_bad_blocks = imsm_get_badblocks,
12920 };