]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-intel.c
Zeroout whole ppl space during creation/force assemble
[thirdparty/mdadm.git] / super-intel.c
1 /*
2 * mdadm - Intel(R) Matrix Storage Manager Support
3 *
4 * Copyright (C) 2002-2008 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define HAVE_STDINT_H 1
21 #include "mdadm.h"
22 #include "mdmon.h"
23 #include "sha1.h"
24 #include "platform-intel.h"
25 #include <values.h>
26 #include <scsi/sg.h>
27 #include <ctype.h>
28 #include <dirent.h>
29
30 /* MPB == Metadata Parameter Block */
31 #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
32 #define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
33 #define MPB_VERSION_RAID0 "1.0.00"
34 #define MPB_VERSION_RAID1 "1.1.00"
35 #define MPB_VERSION_MANY_VOLUMES_PER_ARRAY "1.2.00"
36 #define MPB_VERSION_3OR4_DISK_ARRAY "1.2.01"
37 #define MPB_VERSION_RAID5 "1.2.02"
38 #define MPB_VERSION_5OR6_DISK_ARRAY "1.2.04"
39 #define MPB_VERSION_CNG "1.2.06"
40 #define MPB_VERSION_ATTRIBS "1.3.00"
41 #define MAX_SIGNATURE_LENGTH 32
42 #define MAX_RAID_SERIAL_LEN 16
43
44 /* supports RAID0 */
45 #define MPB_ATTRIB_RAID0 __cpu_to_le32(0x00000001)
46 /* supports RAID1 */
47 #define MPB_ATTRIB_RAID1 __cpu_to_le32(0x00000002)
48 /* supports RAID10 */
49 #define MPB_ATTRIB_RAID10 __cpu_to_le32(0x00000004)
50 /* supports RAID1E */
51 #define MPB_ATTRIB_RAID1E __cpu_to_le32(0x00000008)
52 /* supports RAID5 */
53 #define MPB_ATTRIB_RAID5 __cpu_to_le32(0x00000010)
54 /* supports RAID CNG */
55 #define MPB_ATTRIB_RAIDCNG __cpu_to_le32(0x00000020)
56 /* supports expanded stripe sizes of 256K, 512K and 1MB */
57 #define MPB_ATTRIB_EXP_STRIPE_SIZE __cpu_to_le32(0x00000040)
58
59 /* The OROM Support RST Caching of Volumes */
60 #define MPB_ATTRIB_NVM __cpu_to_le32(0x02000000)
61 /* The OROM supports creating disks greater than 2TB */
62 #define MPB_ATTRIB_2TB_DISK __cpu_to_le32(0x04000000)
63 /* The OROM supports Bad Block Management */
64 #define MPB_ATTRIB_BBM __cpu_to_le32(0x08000000)
65
66 /* THe OROM Supports NVM Caching of Volumes */
67 #define MPB_ATTRIB_NEVER_USE2 __cpu_to_le32(0x10000000)
68 /* The OROM supports creating volumes greater than 2TB */
69 #define MPB_ATTRIB_2TB __cpu_to_le32(0x20000000)
70 /* originally for PMP, now it's wasted b/c. Never use this bit! */
71 #define MPB_ATTRIB_NEVER_USE __cpu_to_le32(0x40000000)
72 /* Verify MPB contents against checksum after reading MPB */
73 #define MPB_ATTRIB_CHECKSUM_VERIFY __cpu_to_le32(0x80000000)
74
75 /* Define all supported attributes that have to be accepted by mdadm
76 */
77 #define MPB_ATTRIB_SUPPORTED (MPB_ATTRIB_CHECKSUM_VERIFY | \
78 MPB_ATTRIB_2TB | \
79 MPB_ATTRIB_2TB_DISK | \
80 MPB_ATTRIB_RAID0 | \
81 MPB_ATTRIB_RAID1 | \
82 MPB_ATTRIB_RAID10 | \
83 MPB_ATTRIB_RAID5 | \
84 MPB_ATTRIB_EXP_STRIPE_SIZE | \
85 MPB_ATTRIB_BBM)
86
87 /* Define attributes that are unused but not harmful */
88 #define MPB_ATTRIB_IGNORED (MPB_ATTRIB_NEVER_USE)
89
90 #define MPB_SECTOR_CNT 2210
91 #define IMSM_RESERVED_SECTORS 4096
92 #define NUM_BLOCKS_DIRTY_STRIPE_REGION 2056
93 #define SECT_PER_MB_SHIFT 11
94 #define MAX_SECTOR_SIZE 4096
95 #define MULTIPLE_PPL_AREA_SIZE_IMSM (1024 * 1024) /* Size of the whole
96 * mutliple PPL area
97 */
98
99 /* Disk configuration info. */
100 #define IMSM_MAX_DEVICES 255
101 struct imsm_disk {
102 __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */
103 __u32 total_blocks_lo; /* 0xE8 - 0xEB total blocks lo */
104 __u32 scsi_id; /* 0xEC - 0xEF scsi ID */
105 #define SPARE_DISK __cpu_to_le32(0x01) /* Spare */
106 #define CONFIGURED_DISK __cpu_to_le32(0x02) /* Member of some RaidDev */
107 #define FAILED_DISK __cpu_to_le32(0x04) /* Permanent failure */
108 #define JOURNAL_DISK __cpu_to_le32(0x2000000) /* Device marked as Journaling Drive */
109 __u32 status; /* 0xF0 - 0xF3 */
110 __u32 owner_cfg_num; /* which config 0,1,2... owns this disk */
111 __u32 total_blocks_hi; /* 0xF4 - 0xF5 total blocks hi */
112 #define IMSM_DISK_FILLERS 3
113 __u32 filler[IMSM_DISK_FILLERS]; /* 0xF5 - 0x107 MPB_DISK_FILLERS for future expansion */
114 };
115
116 /* map selector for map managment
117 */
118 #define MAP_0 0
119 #define MAP_1 1
120 #define MAP_X -1
121
122 /* RAID map configuration infos. */
123 struct imsm_map {
124 __u32 pba_of_lba0_lo; /* start address of partition */
125 __u32 blocks_per_member_lo;/* blocks per member */
126 __u32 num_data_stripes_lo; /* number of data stripes */
127 __u16 blocks_per_strip;
128 __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */
129 #define IMSM_T_STATE_NORMAL 0
130 #define IMSM_T_STATE_UNINITIALIZED 1
131 #define IMSM_T_STATE_DEGRADED 2
132 #define IMSM_T_STATE_FAILED 3
133 __u8 raid_level;
134 #define IMSM_T_RAID0 0
135 #define IMSM_T_RAID1 1
136 #define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
137 __u8 num_members; /* number of member disks */
138 __u8 num_domains; /* number of parity domains */
139 __u8 failed_disk_num; /* valid only when state is degraded */
140 __u8 ddf;
141 __u32 pba_of_lba0_hi;
142 __u32 blocks_per_member_hi;
143 __u32 num_data_stripes_hi;
144 __u32 filler[4]; /* expansion area */
145 #define IMSM_ORD_REBUILD (1 << 24)
146 __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members],
147 * top byte contains some flags
148 */
149 } __attribute__ ((packed));
150
151 struct imsm_vol {
152 __u32 curr_migr_unit;
153 __u32 checkpoint_id; /* id to access curr_migr_unit */
154 __u8 migr_state; /* Normal or Migrating */
155 #define MIGR_INIT 0
156 #define MIGR_REBUILD 1
157 #define MIGR_VERIFY 2 /* analagous to echo check > sync_action */
158 #define MIGR_GEN_MIGR 3
159 #define MIGR_STATE_CHANGE 4
160 #define MIGR_REPAIR 5
161 __u8 migr_type; /* Initializing, Rebuilding, ... */
162 #define RAIDVOL_CLEAN 0
163 #define RAIDVOL_DIRTY 1
164 #define RAIDVOL_DSRECORD_VALID 2
165 __u8 dirty;
166 __u8 fs_state; /* fast-sync state for CnG (0xff == disabled) */
167 __u16 verify_errors; /* number of mismatches */
168 __u16 bad_blocks; /* number of bad blocks during verify */
169 __u32 filler[4];
170 struct imsm_map map[1];
171 /* here comes another one if migr_state */
172 } __attribute__ ((packed));
173
174 struct imsm_dev {
175 __u8 volume[MAX_RAID_SERIAL_LEN];
176 __u32 size_low;
177 __u32 size_high;
178 #define DEV_BOOTABLE __cpu_to_le32(0x01)
179 #define DEV_BOOT_DEVICE __cpu_to_le32(0x02)
180 #define DEV_READ_COALESCING __cpu_to_le32(0x04)
181 #define DEV_WRITE_COALESCING __cpu_to_le32(0x08)
182 #define DEV_LAST_SHUTDOWN_DIRTY __cpu_to_le32(0x10)
183 #define DEV_HIDDEN_AT_BOOT __cpu_to_le32(0x20)
184 #define DEV_CURRENTLY_HIDDEN __cpu_to_le32(0x40)
185 #define DEV_VERIFY_AND_FIX __cpu_to_le32(0x80)
186 #define DEV_MAP_STATE_UNINIT __cpu_to_le32(0x100)
187 #define DEV_NO_AUTO_RECOVERY __cpu_to_le32(0x200)
188 #define DEV_CLONE_N_GO __cpu_to_le32(0x400)
189 #define DEV_CLONE_MAN_SYNC __cpu_to_le32(0x800)
190 #define DEV_CNG_MASTER_DISK_NUM __cpu_to_le32(0x1000)
191 __u32 status; /* Persistent RaidDev status */
192 __u32 reserved_blocks; /* Reserved blocks at beginning of volume */
193 __u8 migr_priority;
194 __u8 num_sub_vols;
195 __u8 tid;
196 __u8 cng_master_disk;
197 __u16 cache_policy;
198 __u8 cng_state;
199 __u8 cng_sub_state;
200 __u16 my_vol_raid_dev_num; /* Used in Unique volume Id for this RaidDev */
201
202 /* NVM_EN */
203 __u8 nv_cache_mode;
204 __u8 nv_cache_flags;
205
206 /* Unique Volume Id of the NvCache Volume associated with this volume */
207 __u32 nvc_vol_orig_family_num;
208 __u16 nvc_vol_raid_dev_num;
209
210 #define RWH_OFF 0
211 #define RWH_DISTRIBUTED 1
212 #define RWH_JOURNALING_DRIVE 2
213 #define RWH_MULTIPLE_DISTRIBUTED 3
214 #define RWH_MULTIPLE_PPLS_JOURNALING_DRIVE 4
215 #define RWH_MULTIPLE_OFF 5
216 __u8 rwh_policy; /* Raid Write Hole Policy */
217 __u8 jd_serial[MAX_RAID_SERIAL_LEN]; /* Journal Drive serial number */
218 __u8 filler1;
219
220 #define IMSM_DEV_FILLERS 3
221 __u32 filler[IMSM_DEV_FILLERS];
222 struct imsm_vol vol;
223 } __attribute__ ((packed));
224
225 struct imsm_super {
226 __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */
227 __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */
228 __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */
229 __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */
230 __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
231 __u32 error_log_size; /* 0x30 - 0x33 in bytes */
232 __u32 attributes; /* 0x34 - 0x37 */
233 __u8 num_disks; /* 0x38 Number of configured disks */
234 __u8 num_raid_devs; /* 0x39 Number of configured volumes */
235 __u8 error_log_pos; /* 0x3A */
236 __u8 fill[1]; /* 0x3B */
237 __u32 cache_size; /* 0x3c - 0x40 in mb */
238 __u32 orig_family_num; /* 0x40 - 0x43 original family num */
239 __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
240 __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
241 __u16 num_raid_devs_created; /* 0x4C - 0x4D Used for generating unique
242 * volume IDs for raid_dev created in this array
243 * (starts at 1)
244 */
245 __u16 filler1; /* 0x4E - 0x4F */
246 #define IMSM_FILLERS 34
247 __u32 filler[IMSM_FILLERS]; /* 0x50 - 0xD7 RAID_MPB_FILLERS */
248 struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
249 /* here comes imsm_dev[num_raid_devs] */
250 /* here comes BBM logs */
251 } __attribute__ ((packed));
252
253 #define BBM_LOG_MAX_ENTRIES 254
254 #define BBM_LOG_MAX_LBA_ENTRY_VAL 256 /* Represents 256 LBAs */
255 #define BBM_LOG_SIGNATURE 0xabadb10c
256
257 struct bbm_log_block_addr {
258 __u16 w1;
259 __u32 dw1;
260 } __attribute__ ((__packed__));
261
262 struct bbm_log_entry {
263 __u8 marked_count; /* Number of blocks marked - 1 */
264 __u8 disk_ordinal; /* Disk entry within the imsm_super */
265 struct bbm_log_block_addr defective_block_start;
266 } __attribute__ ((__packed__));
267
268 struct bbm_log {
269 __u32 signature; /* 0xABADB10C */
270 __u32 entry_count;
271 struct bbm_log_entry marked_block_entries[BBM_LOG_MAX_ENTRIES];
272 } __attribute__ ((__packed__));
273
274 static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
275
276 #define BLOCKS_PER_KB (1024/512)
277
278 #define RAID_DISK_RESERVED_BLOCKS_IMSM_HI 2209
279
280 #define GEN_MIGR_AREA_SIZE 2048 /* General Migration Copy Area size in blocks */
281
282 #define MIGR_REC_BUF_SECTORS 1 /* size of migr_record i/o buffer in sectors */
283 #define MIGR_REC_SECTOR_POSITION 1 /* migr_record position offset on disk,
284 * MIGR_REC_BUF_SECTORS <= MIGR_REC_SECTOR_POS
285 */
286
287 #define UNIT_SRC_NORMAL 0 /* Source data for curr_migr_unit must
288 * be recovered using srcMap */
289 #define UNIT_SRC_IN_CP_AREA 1 /* Source data for curr_migr_unit has
290 * already been migrated and must
291 * be recovered from checkpoint area */
292
293 #define PPL_ENTRY_SPACE (128 * 1024) /* Size of single PPL, without the header */
294
295 struct migr_record {
296 __u32 rec_status; /* Status used to determine how to restart
297 * migration in case it aborts
298 * in some fashion */
299 __u32 curr_migr_unit; /* 0..numMigrUnits-1 */
300 __u32 family_num; /* Family number of MPB
301 * containing the RaidDev
302 * that is migrating */
303 __u32 ascending_migr; /* True if migrating in increasing
304 * order of lbas */
305 __u32 blocks_per_unit; /* Num disk blocks per unit of operation */
306 __u32 dest_depth_per_unit; /* Num member blocks each destMap
307 * member disk
308 * advances per unit-of-operation */
309 __u32 ckpt_area_pba; /* Pba of first block of ckpt copy area */
310 __u32 dest_1st_member_lba; /* First member lba on first
311 * stripe of destination */
312 __u32 num_migr_units; /* Total num migration units-of-op */
313 __u32 post_migr_vol_cap; /* Size of volume after
314 * migration completes */
315 __u32 post_migr_vol_cap_hi; /* Expansion space for LBA64 */
316 __u32 ckpt_read_disk_num; /* Which member disk in destSubMap[0] the
317 * migration ckpt record was read from
318 * (for recovered migrations) */
319 } __attribute__ ((__packed__));
320
321 struct md_list {
322 /* usage marker:
323 * 1: load metadata
324 * 2: metadata does not match
325 * 4: already checked
326 */
327 int used;
328 char *devname;
329 int found;
330 int container;
331 dev_t st_rdev;
332 struct md_list *next;
333 };
334
335 #define pr_vrb(fmt, arg...) (void) (verbose && pr_err(fmt, ##arg))
336
337 static __u8 migr_type(struct imsm_dev *dev)
338 {
339 if (dev->vol.migr_type == MIGR_VERIFY &&
340 dev->status & DEV_VERIFY_AND_FIX)
341 return MIGR_REPAIR;
342 else
343 return dev->vol.migr_type;
344 }
345
346 static void set_migr_type(struct imsm_dev *dev, __u8 migr_type)
347 {
348 /* for compatibility with older oroms convert MIGR_REPAIR, into
349 * MIGR_VERIFY w/ DEV_VERIFY_AND_FIX status
350 */
351 if (migr_type == MIGR_REPAIR) {
352 dev->vol.migr_type = MIGR_VERIFY;
353 dev->status |= DEV_VERIFY_AND_FIX;
354 } else {
355 dev->vol.migr_type = migr_type;
356 dev->status &= ~DEV_VERIFY_AND_FIX;
357 }
358 }
359
360 static unsigned int sector_count(__u32 bytes, unsigned int sector_size)
361 {
362 return ROUND_UP(bytes, sector_size) / sector_size;
363 }
364
365 static unsigned int mpb_sectors(struct imsm_super *mpb,
366 unsigned int sector_size)
367 {
368 return sector_count(__le32_to_cpu(mpb->mpb_size), sector_size);
369 }
370
371 struct intel_dev {
372 struct imsm_dev *dev;
373 struct intel_dev *next;
374 unsigned index;
375 };
376
377 struct intel_hba {
378 enum sys_dev_type type;
379 char *path;
380 char *pci_id;
381 struct intel_hba *next;
382 };
383
384 enum action {
385 DISK_REMOVE = 1,
386 DISK_ADD
387 };
388 /* internal representation of IMSM metadata */
389 struct intel_super {
390 union {
391 void *buf; /* O_DIRECT buffer for reading/writing metadata */
392 struct imsm_super *anchor; /* immovable parameters */
393 };
394 union {
395 void *migr_rec_buf; /* buffer for I/O operations */
396 struct migr_record *migr_rec; /* migration record */
397 };
398 int clean_migration_record_by_mdmon; /* when reshape is switched to next
399 array, it indicates that mdmon is allowed to clean migration
400 record */
401 size_t len; /* size of the 'buf' allocation */
402 size_t extra_space; /* extra space in 'buf' that is not used yet */
403 void *next_buf; /* for realloc'ing buf from the manager */
404 size_t next_len;
405 int updates_pending; /* count of pending updates for mdmon */
406 int current_vol; /* index of raid device undergoing creation */
407 unsigned long long create_offset; /* common start for 'current_vol' */
408 __u32 random; /* random data for seeding new family numbers */
409 struct intel_dev *devlist;
410 unsigned int sector_size; /* sector size of used member drives */
411 struct dl {
412 struct dl *next;
413 int index;
414 __u8 serial[MAX_RAID_SERIAL_LEN];
415 int major, minor;
416 char *devname;
417 struct imsm_disk disk;
418 int fd;
419 int extent_cnt;
420 struct extent *e; /* for determining freespace @ create */
421 int raiddisk; /* slot to fill in autolayout */
422 enum action action;
423 } *disks, *current_disk;
424 struct dl *disk_mgmt_list; /* list of disks to add/remove while mdmon
425 active */
426 struct dl *missing; /* disks removed while we weren't looking */
427 struct bbm_log *bbm_log;
428 struct intel_hba *hba; /* device path of the raid controller for this metadata */
429 const struct imsm_orom *orom; /* platform firmware support */
430 struct intel_super *next; /* (temp) list for disambiguating family_num */
431 struct md_bb bb; /* memory for get_bad_blocks call */
432 };
433
434 struct intel_disk {
435 struct imsm_disk disk;
436 #define IMSM_UNKNOWN_OWNER (-1)
437 int owner;
438 struct intel_disk *next;
439 };
440
441 struct extent {
442 unsigned long long start, size;
443 };
444
445 /* definitions of reshape process types */
446 enum imsm_reshape_type {
447 CH_TAKEOVER,
448 CH_MIGRATION,
449 CH_ARRAY_SIZE,
450 };
451
452 /* definition of messages passed to imsm_process_update */
453 enum imsm_update_type {
454 update_activate_spare,
455 update_create_array,
456 update_kill_array,
457 update_rename_array,
458 update_add_remove_disk,
459 update_reshape_container_disks,
460 update_reshape_migration,
461 update_takeover,
462 update_general_migration_checkpoint,
463 update_size_change,
464 update_prealloc_badblocks_mem,
465 update_rwh_policy,
466 };
467
468 struct imsm_update_activate_spare {
469 enum imsm_update_type type;
470 struct dl *dl;
471 int slot;
472 int array;
473 struct imsm_update_activate_spare *next;
474 };
475
476 struct geo_params {
477 char devnm[32];
478 char *dev_name;
479 unsigned long long size;
480 int level;
481 int layout;
482 int chunksize;
483 int raid_disks;
484 };
485
486 enum takeover_direction {
487 R10_TO_R0,
488 R0_TO_R10
489 };
490 struct imsm_update_takeover {
491 enum imsm_update_type type;
492 int subarray;
493 enum takeover_direction direction;
494 };
495
496 struct imsm_update_reshape {
497 enum imsm_update_type type;
498 int old_raid_disks;
499 int new_raid_disks;
500
501 int new_disks[1]; /* new_raid_disks - old_raid_disks makedev number */
502 };
503
504 struct imsm_update_reshape_migration {
505 enum imsm_update_type type;
506 int old_raid_disks;
507 int new_raid_disks;
508 /* fields for array migration changes
509 */
510 int subdev;
511 int new_level;
512 int new_layout;
513 int new_chunksize;
514
515 int new_disks[1]; /* new_raid_disks - old_raid_disks makedev number */
516 };
517
518 struct imsm_update_size_change {
519 enum imsm_update_type type;
520 int subdev;
521 long long new_size;
522 };
523
524 struct imsm_update_general_migration_checkpoint {
525 enum imsm_update_type type;
526 __u32 curr_migr_unit;
527 };
528
529 struct disk_info {
530 __u8 serial[MAX_RAID_SERIAL_LEN];
531 };
532
533 struct imsm_update_create_array {
534 enum imsm_update_type type;
535 int dev_idx;
536 struct imsm_dev dev;
537 };
538
539 struct imsm_update_kill_array {
540 enum imsm_update_type type;
541 int dev_idx;
542 };
543
544 struct imsm_update_rename_array {
545 enum imsm_update_type type;
546 __u8 name[MAX_RAID_SERIAL_LEN];
547 int dev_idx;
548 };
549
550 struct imsm_update_add_remove_disk {
551 enum imsm_update_type type;
552 };
553
554 struct imsm_update_prealloc_bb_mem {
555 enum imsm_update_type type;
556 };
557
558 struct imsm_update_rwh_policy {
559 enum imsm_update_type type;
560 int new_policy;
561 int dev_idx;
562 };
563
564 static const char *_sys_dev_type[] = {
565 [SYS_DEV_UNKNOWN] = "Unknown",
566 [SYS_DEV_SAS] = "SAS",
567 [SYS_DEV_SATA] = "SATA",
568 [SYS_DEV_NVME] = "NVMe",
569 [SYS_DEV_VMD] = "VMD"
570 };
571
572 const char *get_sys_dev_type(enum sys_dev_type type)
573 {
574 if (type >= SYS_DEV_MAX)
575 type = SYS_DEV_UNKNOWN;
576
577 return _sys_dev_type[type];
578 }
579
580 static struct intel_hba * alloc_intel_hba(struct sys_dev *device)
581 {
582 struct intel_hba *result = xmalloc(sizeof(*result));
583
584 result->type = device->type;
585 result->path = xstrdup(device->path);
586 result->next = NULL;
587 if (result->path && (result->pci_id = strrchr(result->path, '/')) != NULL)
588 result->pci_id++;
589
590 return result;
591 }
592
593 static struct intel_hba * find_intel_hba(struct intel_hba *hba, struct sys_dev *device)
594 {
595 struct intel_hba *result;
596
597 for (result = hba; result; result = result->next) {
598 if (result->type == device->type && strcmp(result->path, device->path) == 0)
599 break;
600 }
601 return result;
602 }
603
604 static int attach_hba_to_super(struct intel_super *super, struct sys_dev *device)
605 {
606 struct intel_hba *hba;
607
608 /* check if disk attached to Intel HBA */
609 hba = find_intel_hba(super->hba, device);
610 if (hba != NULL)
611 return 1;
612 /* Check if HBA is already attached to super */
613 if (super->hba == NULL) {
614 super->hba = alloc_intel_hba(device);
615 return 1;
616 }
617
618 hba = super->hba;
619 /* Intel metadata allows for all disks attached to the same type HBA.
620 * Do not support HBA types mixing
621 */
622 if (device->type != hba->type)
623 return 2;
624
625 /* Multiple same type HBAs can be used if they share the same OROM */
626 const struct imsm_orom *device_orom = get_orom_by_device_id(device->dev_id);
627
628 if (device_orom != super->orom)
629 return 2;
630
631 while (hba->next)
632 hba = hba->next;
633
634 hba->next = alloc_intel_hba(device);
635 return 1;
636 }
637
638 static struct sys_dev* find_disk_attached_hba(int fd, const char *devname)
639 {
640 struct sys_dev *list, *elem;
641 char *disk_path;
642
643 if ((list = find_intel_devices()) == NULL)
644 return 0;
645
646 if (fd < 0)
647 disk_path = (char *) devname;
648 else
649 disk_path = diskfd_to_devpath(fd);
650
651 if (!disk_path)
652 return 0;
653
654 for (elem = list; elem; elem = elem->next)
655 if (path_attached_to_hba(disk_path, elem->path))
656 return elem;
657
658 if (disk_path != devname)
659 free(disk_path);
660
661 return NULL;
662 }
663
664 static int find_intel_hba_capability(int fd, struct intel_super *super,
665 char *devname);
666
667 static struct supertype *match_metadata_desc_imsm(char *arg)
668 {
669 struct supertype *st;
670
671 if (strcmp(arg, "imsm") != 0 &&
672 strcmp(arg, "default") != 0
673 )
674 return NULL;
675
676 st = xcalloc(1, sizeof(*st));
677 st->ss = &super_imsm;
678 st->max_devs = IMSM_MAX_DEVICES;
679 st->minor_version = 0;
680 st->sb = NULL;
681 return st;
682 }
683
684 static __u8 *get_imsm_version(struct imsm_super *mpb)
685 {
686 return &mpb->sig[MPB_SIG_LEN];
687 }
688
689 /* retrieve a disk directly from the anchor when the anchor is known to be
690 * up-to-date, currently only at load time
691 */
692 static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index)
693 {
694 if (index >= mpb->num_disks)
695 return NULL;
696 return &mpb->disk[index];
697 }
698
699 /* retrieve the disk description based on a index of the disk
700 * in the sub-array
701 */
702 static struct dl *get_imsm_dl_disk(struct intel_super *super, __u8 index)
703 {
704 struct dl *d;
705
706 for (d = super->disks; d; d = d->next)
707 if (d->index == index)
708 return d;
709
710 return NULL;
711 }
712 /* retrieve a disk from the parsed metadata */
713 static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index)
714 {
715 struct dl *dl;
716
717 dl = get_imsm_dl_disk(super, index);
718 if (dl)
719 return &dl->disk;
720
721 return NULL;
722 }
723
724 /* generate a checksum directly from the anchor when the anchor is known to be
725 * up-to-date, currently only at load or write_super after coalescing
726 */
727 static __u32 __gen_imsm_checksum(struct imsm_super *mpb)
728 {
729 __u32 end = mpb->mpb_size / sizeof(end);
730 __u32 *p = (__u32 *) mpb;
731 __u32 sum = 0;
732
733 while (end--) {
734 sum += __le32_to_cpu(*p);
735 p++;
736 }
737
738 return sum - __le32_to_cpu(mpb->check_sum);
739 }
740
741 static size_t sizeof_imsm_map(struct imsm_map *map)
742 {
743 return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1);
744 }
745
746 struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
747 {
748 /* A device can have 2 maps if it is in the middle of a migration.
749 * If second_map is:
750 * MAP_0 - we return the first map
751 * MAP_1 - we return the second map if it exists, else NULL
752 * MAP_X - we return the second map if it exists, else the first
753 */
754 struct imsm_map *map = &dev->vol.map[0];
755 struct imsm_map *map2 = NULL;
756
757 if (dev->vol.migr_state)
758 map2 = (void *)map + sizeof_imsm_map(map);
759
760 switch (second_map) {
761 case MAP_0:
762 break;
763 case MAP_1:
764 map = map2;
765 break;
766 case MAP_X:
767 if (map2)
768 map = map2;
769 break;
770 default:
771 map = NULL;
772 }
773 return map;
774
775 }
776
777 /* return the size of the device.
778 * migr_state increases the returned size if map[0] were to be duplicated
779 */
780 static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state)
781 {
782 size_t size = sizeof(*dev) - sizeof(struct imsm_map) +
783 sizeof_imsm_map(get_imsm_map(dev, MAP_0));
784
785 /* migrating means an additional map */
786 if (dev->vol.migr_state)
787 size += sizeof_imsm_map(get_imsm_map(dev, MAP_1));
788 else if (migr_state)
789 size += sizeof_imsm_map(get_imsm_map(dev, MAP_0));
790
791 return size;
792 }
793
794 /* retrieve disk serial number list from a metadata update */
795 static struct disk_info *get_disk_info(struct imsm_update_create_array *update)
796 {
797 void *u = update;
798 struct disk_info *inf;
799
800 inf = u + sizeof(*update) - sizeof(struct imsm_dev) +
801 sizeof_imsm_dev(&update->dev, 0);
802
803 return inf;
804 }
805
806 static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index)
807 {
808 int offset;
809 int i;
810 void *_mpb = mpb;
811
812 if (index >= mpb->num_raid_devs)
813 return NULL;
814
815 /* devices start after all disks */
816 offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb;
817
818 for (i = 0; i <= index; i++)
819 if (i == index)
820 return _mpb + offset;
821 else
822 offset += sizeof_imsm_dev(_mpb + offset, 0);
823
824 return NULL;
825 }
826
827 static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index)
828 {
829 struct intel_dev *dv;
830
831 if (index >= super->anchor->num_raid_devs)
832 return NULL;
833 for (dv = super->devlist; dv; dv = dv->next)
834 if (dv->index == index)
835 return dv->dev;
836 return NULL;
837 }
838
839 static inline unsigned long long __le48_to_cpu(const struct bbm_log_block_addr
840 *addr)
841 {
842 return ((((__u64)__le32_to_cpu(addr->dw1)) << 16) |
843 __le16_to_cpu(addr->w1));
844 }
845
846 static inline struct bbm_log_block_addr __cpu_to_le48(unsigned long long sec)
847 {
848 struct bbm_log_block_addr addr;
849
850 addr.w1 = __cpu_to_le16((__u16)(sec & 0xffff));
851 addr.dw1 = __cpu_to_le32((__u32)(sec >> 16) & 0xffffffff);
852 return addr;
853 }
854
855 /* get size of the bbm log */
856 static __u32 get_imsm_bbm_log_size(struct bbm_log *log)
857 {
858 if (!log || log->entry_count == 0)
859 return 0;
860
861 return sizeof(log->signature) +
862 sizeof(log->entry_count) +
863 log->entry_count * sizeof(struct bbm_log_entry);
864 }
865
866 /* check if bad block is not partially stored in bbm log */
867 static int is_stored_in_bbm(struct bbm_log *log, const __u8 idx, const unsigned
868 long long sector, const int length, __u32 *pos)
869 {
870 __u32 i;
871
872 for (i = *pos; i < log->entry_count; i++) {
873 struct bbm_log_entry *entry = &log->marked_block_entries[i];
874 unsigned long long bb_start;
875 unsigned long long bb_end;
876
877 bb_start = __le48_to_cpu(&entry->defective_block_start);
878 bb_end = bb_start + (entry->marked_count + 1);
879
880 if ((entry->disk_ordinal == idx) && (bb_start >= sector) &&
881 (bb_end <= sector + length)) {
882 *pos = i;
883 return 1;
884 }
885 }
886 return 0;
887 }
888
889 /* record new bad block in bbm log */
890 static int record_new_badblock(struct bbm_log *log, const __u8 idx, unsigned
891 long long sector, int length)
892 {
893 int new_bb = 0;
894 __u32 pos = 0;
895 struct bbm_log_entry *entry = NULL;
896
897 while (is_stored_in_bbm(log, idx, sector, length, &pos)) {
898 struct bbm_log_entry *e = &log->marked_block_entries[pos];
899
900 if ((e->marked_count + 1 == BBM_LOG_MAX_LBA_ENTRY_VAL) &&
901 (__le48_to_cpu(&e->defective_block_start) == sector)) {
902 sector += BBM_LOG_MAX_LBA_ENTRY_VAL;
903 length -= BBM_LOG_MAX_LBA_ENTRY_VAL;
904 pos = pos + 1;
905 continue;
906 }
907 entry = e;
908 break;
909 }
910
911 if (entry) {
912 int cnt = (length <= BBM_LOG_MAX_LBA_ENTRY_VAL) ? length :
913 BBM_LOG_MAX_LBA_ENTRY_VAL;
914 entry->defective_block_start = __cpu_to_le48(sector);
915 entry->marked_count = cnt - 1;
916 if (cnt == length)
917 return 1;
918 sector += cnt;
919 length -= cnt;
920 }
921
922 new_bb = ROUND_UP(length, BBM_LOG_MAX_LBA_ENTRY_VAL) /
923 BBM_LOG_MAX_LBA_ENTRY_VAL;
924 if (log->entry_count + new_bb > BBM_LOG_MAX_ENTRIES)
925 return 0;
926
927 while (length > 0) {
928 int cnt = (length <= BBM_LOG_MAX_LBA_ENTRY_VAL) ? length :
929 BBM_LOG_MAX_LBA_ENTRY_VAL;
930 struct bbm_log_entry *entry =
931 &log->marked_block_entries[log->entry_count];
932
933 entry->defective_block_start = __cpu_to_le48(sector);
934 entry->marked_count = cnt - 1;
935 entry->disk_ordinal = idx;
936
937 sector += cnt;
938 length -= cnt;
939
940 log->entry_count++;
941 }
942
943 return new_bb;
944 }
945
946 /* clear all bad blocks for given disk */
947 static void clear_disk_badblocks(struct bbm_log *log, const __u8 idx)
948 {
949 __u32 i = 0;
950
951 while (i < log->entry_count) {
952 struct bbm_log_entry *entries = log->marked_block_entries;
953
954 if (entries[i].disk_ordinal == idx) {
955 if (i < log->entry_count - 1)
956 entries[i] = entries[log->entry_count - 1];
957 log->entry_count--;
958 } else {
959 i++;
960 }
961 }
962 }
963
964 /* clear given bad block */
965 static int clear_badblock(struct bbm_log *log, const __u8 idx, const unsigned
966 long long sector, const int length) {
967 __u32 i = 0;
968
969 while (i < log->entry_count) {
970 struct bbm_log_entry *entries = log->marked_block_entries;
971
972 if ((entries[i].disk_ordinal == idx) &&
973 (__le48_to_cpu(&entries[i].defective_block_start) ==
974 sector) && (entries[i].marked_count + 1 == length)) {
975 if (i < log->entry_count - 1)
976 entries[i] = entries[log->entry_count - 1];
977 log->entry_count--;
978 break;
979 }
980 i++;
981 }
982
983 return 1;
984 }
985
986 /* allocate and load BBM log from metadata */
987 static int load_bbm_log(struct intel_super *super)
988 {
989 struct imsm_super *mpb = super->anchor;
990 __u32 bbm_log_size = __le32_to_cpu(mpb->bbm_log_size);
991
992 super->bbm_log = xcalloc(1, sizeof(struct bbm_log));
993 if (!super->bbm_log)
994 return 1;
995
996 if (bbm_log_size) {
997 struct bbm_log *log = (void *)mpb +
998 __le32_to_cpu(mpb->mpb_size) - bbm_log_size;
999
1000 __u32 entry_count;
1001
1002 if (bbm_log_size < sizeof(log->signature) +
1003 sizeof(log->entry_count))
1004 return 2;
1005
1006 entry_count = __le32_to_cpu(log->entry_count);
1007 if ((__le32_to_cpu(log->signature) != BBM_LOG_SIGNATURE) ||
1008 (entry_count > BBM_LOG_MAX_ENTRIES))
1009 return 3;
1010
1011 if (bbm_log_size !=
1012 sizeof(log->signature) + sizeof(log->entry_count) +
1013 entry_count * sizeof(struct bbm_log_entry))
1014 return 4;
1015
1016 memcpy(super->bbm_log, log, bbm_log_size);
1017 } else {
1018 super->bbm_log->signature = __cpu_to_le32(BBM_LOG_SIGNATURE);
1019 super->bbm_log->entry_count = 0;
1020 }
1021
1022 return 0;
1023 }
1024
1025 /* checks if bad block is within volume boundaries */
1026 static int is_bad_block_in_volume(const struct bbm_log_entry *entry,
1027 const unsigned long long start_sector,
1028 const unsigned long long size)
1029 {
1030 unsigned long long bb_start;
1031 unsigned long long bb_end;
1032
1033 bb_start = __le48_to_cpu(&entry->defective_block_start);
1034 bb_end = bb_start + (entry->marked_count + 1);
1035
1036 if (((bb_start >= start_sector) && (bb_start < start_sector + size)) ||
1037 ((bb_end >= start_sector) && (bb_end <= start_sector + size)))
1038 return 1;
1039
1040 return 0;
1041 }
1042
1043 /* get list of bad blocks on a drive for a volume */
1044 static void get_volume_badblocks(const struct bbm_log *log, const __u8 idx,
1045 const unsigned long long start_sector,
1046 const unsigned long long size,
1047 struct md_bb *bbs)
1048 {
1049 __u32 count = 0;
1050 __u32 i;
1051
1052 for (i = 0; i < log->entry_count; i++) {
1053 const struct bbm_log_entry *ent =
1054 &log->marked_block_entries[i];
1055 struct md_bb_entry *bb;
1056
1057 if ((ent->disk_ordinal == idx) &&
1058 is_bad_block_in_volume(ent, start_sector, size)) {
1059
1060 if (!bbs->entries) {
1061 bbs->entries = xmalloc(BBM_LOG_MAX_ENTRIES *
1062 sizeof(*bb));
1063 if (!bbs->entries)
1064 break;
1065 }
1066
1067 bb = &bbs->entries[count++];
1068 bb->sector = __le48_to_cpu(&ent->defective_block_start);
1069 bb->length = ent->marked_count + 1;
1070 }
1071 }
1072 bbs->count = count;
1073 }
1074
1075 /*
1076 * for second_map:
1077 * == MAP_0 get first map
1078 * == MAP_1 get second map
1079 * == MAP_X than get map according to the current migr_state
1080 */
1081 static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev,
1082 int slot,
1083 int second_map)
1084 {
1085 struct imsm_map *map;
1086
1087 map = get_imsm_map(dev, second_map);
1088
1089 /* top byte identifies disk under rebuild */
1090 return __le32_to_cpu(map->disk_ord_tbl[slot]);
1091 }
1092
1093 #define ord_to_idx(ord) (((ord) << 8) >> 8)
1094 static __u32 get_imsm_disk_idx(struct imsm_dev *dev, int slot, int second_map)
1095 {
1096 __u32 ord = get_imsm_ord_tbl_ent(dev, slot, second_map);
1097
1098 return ord_to_idx(ord);
1099 }
1100
1101 static void set_imsm_ord_tbl_ent(struct imsm_map *map, int slot, __u32 ord)
1102 {
1103 map->disk_ord_tbl[slot] = __cpu_to_le32(ord);
1104 }
1105
1106 static int get_imsm_disk_slot(struct imsm_map *map, unsigned idx)
1107 {
1108 int slot;
1109 __u32 ord;
1110
1111 for (slot = 0; slot < map->num_members; slot++) {
1112 ord = __le32_to_cpu(map->disk_ord_tbl[slot]);
1113 if (ord_to_idx(ord) == idx)
1114 return slot;
1115 }
1116
1117 return -1;
1118 }
1119
1120 static int get_imsm_raid_level(struct imsm_map *map)
1121 {
1122 if (map->raid_level == 1) {
1123 if (map->num_members == 2)
1124 return 1;
1125 else
1126 return 10;
1127 }
1128
1129 return map->raid_level;
1130 }
1131
1132 static int cmp_extent(const void *av, const void *bv)
1133 {
1134 const struct extent *a = av;
1135 const struct extent *b = bv;
1136 if (a->start < b->start)
1137 return -1;
1138 if (a->start > b->start)
1139 return 1;
1140 return 0;
1141 }
1142
1143 static int count_memberships(struct dl *dl, struct intel_super *super)
1144 {
1145 int memberships = 0;
1146 int i;
1147
1148 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1149 struct imsm_dev *dev = get_imsm_dev(super, i);
1150 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1151
1152 if (get_imsm_disk_slot(map, dl->index) >= 0)
1153 memberships++;
1154 }
1155
1156 return memberships;
1157 }
1158
1159 static __u32 imsm_min_reserved_sectors(struct intel_super *super);
1160
1161 static int split_ull(unsigned long long n, __u32 *lo, __u32 *hi)
1162 {
1163 if (lo == 0 || hi == 0)
1164 return 1;
1165 *lo = __le32_to_cpu((unsigned)n);
1166 *hi = __le32_to_cpu((unsigned)(n >> 32));
1167 return 0;
1168 }
1169
1170 static unsigned long long join_u32(__u32 lo, __u32 hi)
1171 {
1172 return (unsigned long long)__le32_to_cpu(lo) |
1173 (((unsigned long long)__le32_to_cpu(hi)) << 32);
1174 }
1175
1176 static unsigned long long total_blocks(struct imsm_disk *disk)
1177 {
1178 if (disk == NULL)
1179 return 0;
1180 return join_u32(disk->total_blocks_lo, disk->total_blocks_hi);
1181 }
1182
1183 static unsigned long long pba_of_lba0(struct imsm_map *map)
1184 {
1185 if (map == NULL)
1186 return 0;
1187 return join_u32(map->pba_of_lba0_lo, map->pba_of_lba0_hi);
1188 }
1189
1190 static unsigned long long blocks_per_member(struct imsm_map *map)
1191 {
1192 if (map == NULL)
1193 return 0;
1194 return join_u32(map->blocks_per_member_lo, map->blocks_per_member_hi);
1195 }
1196
1197 static unsigned long long num_data_stripes(struct imsm_map *map)
1198 {
1199 if (map == NULL)
1200 return 0;
1201 return join_u32(map->num_data_stripes_lo, map->num_data_stripes_hi);
1202 }
1203
1204 static void set_total_blocks(struct imsm_disk *disk, unsigned long long n)
1205 {
1206 split_ull(n, &disk->total_blocks_lo, &disk->total_blocks_hi);
1207 }
1208
1209 static void set_pba_of_lba0(struct imsm_map *map, unsigned long long n)
1210 {
1211 split_ull(n, &map->pba_of_lba0_lo, &map->pba_of_lba0_hi);
1212 }
1213
1214 static void set_blocks_per_member(struct imsm_map *map, unsigned long long n)
1215 {
1216 split_ull(n, &map->blocks_per_member_lo, &map->blocks_per_member_hi);
1217 }
1218
1219 static void set_num_data_stripes(struct imsm_map *map, unsigned long long n)
1220 {
1221 split_ull(n, &map->num_data_stripes_lo, &map->num_data_stripes_hi);
1222 }
1223
1224 static struct extent *get_extents(struct intel_super *super, struct dl *dl)
1225 {
1226 /* find a list of used extents on the given physical device */
1227 struct extent *rv, *e;
1228 int i;
1229 int memberships = count_memberships(dl, super);
1230 __u32 reservation;
1231
1232 /* trim the reserved area for spares, so they can join any array
1233 * regardless of whether the OROM has assigned sectors from the
1234 * IMSM_RESERVED_SECTORS region
1235 */
1236 if (dl->index == -1)
1237 reservation = imsm_min_reserved_sectors(super);
1238 else
1239 reservation = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1240
1241 rv = xcalloc(sizeof(struct extent), (memberships + 1));
1242 e = rv;
1243
1244 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1245 struct imsm_dev *dev = get_imsm_dev(super, i);
1246 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1247
1248 if (get_imsm_disk_slot(map, dl->index) >= 0) {
1249 e->start = pba_of_lba0(map);
1250 e->size = blocks_per_member(map);
1251 e++;
1252 }
1253 }
1254 qsort(rv, memberships, sizeof(*rv), cmp_extent);
1255
1256 /* determine the start of the metadata
1257 * when no raid devices are defined use the default
1258 * ...otherwise allow the metadata to truncate the value
1259 * as is the case with older versions of imsm
1260 */
1261 if (memberships) {
1262 struct extent *last = &rv[memberships - 1];
1263 unsigned long long remainder;
1264
1265 remainder = total_blocks(&dl->disk) - (last->start + last->size);
1266 /* round down to 1k block to satisfy precision of the kernel
1267 * 'size' interface
1268 */
1269 remainder &= ~1UL;
1270 /* make sure remainder is still sane */
1271 if (remainder < (unsigned)ROUND_UP(super->len, 512) >> 9)
1272 remainder = ROUND_UP(super->len, 512) >> 9;
1273 if (reservation > remainder)
1274 reservation = remainder;
1275 }
1276 e->start = total_blocks(&dl->disk) - reservation;
1277 e->size = 0;
1278 return rv;
1279 }
1280
1281 /* try to determine how much space is reserved for metadata from
1282 * the last get_extents() entry, otherwise fallback to the
1283 * default
1284 */
1285 static __u32 imsm_reserved_sectors(struct intel_super *super, struct dl *dl)
1286 {
1287 struct extent *e;
1288 int i;
1289 __u32 rv;
1290
1291 /* for spares just return a minimal reservation which will grow
1292 * once the spare is picked up by an array
1293 */
1294 if (dl->index == -1)
1295 return MPB_SECTOR_CNT;
1296
1297 e = get_extents(super, dl);
1298 if (!e)
1299 return MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1300
1301 /* scroll to last entry */
1302 for (i = 0; e[i].size; i++)
1303 continue;
1304
1305 rv = total_blocks(&dl->disk) - e[i].start;
1306
1307 free(e);
1308
1309 return rv;
1310 }
1311
1312 static int is_spare(struct imsm_disk *disk)
1313 {
1314 return (disk->status & SPARE_DISK) == SPARE_DISK;
1315 }
1316
1317 static int is_configured(struct imsm_disk *disk)
1318 {
1319 return (disk->status & CONFIGURED_DISK) == CONFIGURED_DISK;
1320 }
1321
1322 static int is_failed(struct imsm_disk *disk)
1323 {
1324 return (disk->status & FAILED_DISK) == FAILED_DISK;
1325 }
1326
1327 static int is_journal(struct imsm_disk *disk)
1328 {
1329 return (disk->status & JOURNAL_DISK) == JOURNAL_DISK;
1330 }
1331
1332 /* round array size down to closest MB and ensure it splits evenly
1333 * between members
1334 */
1335 static unsigned long long round_size_to_mb(unsigned long long size, unsigned int
1336 disk_count)
1337 {
1338 size /= disk_count;
1339 size = (size >> SECT_PER_MB_SHIFT) << SECT_PER_MB_SHIFT;
1340 size *= disk_count;
1341
1342 return size;
1343 }
1344
1345 /* try to determine how much space is reserved for metadata from
1346 * the last get_extents() entry on the smallest active disk,
1347 * otherwise fallback to the default
1348 */
1349 static __u32 imsm_min_reserved_sectors(struct intel_super *super)
1350 {
1351 struct extent *e;
1352 int i;
1353 unsigned long long min_active;
1354 __u32 remainder;
1355 __u32 rv = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1356 struct dl *dl, *dl_min = NULL;
1357
1358 if (!super)
1359 return rv;
1360
1361 min_active = 0;
1362 for (dl = super->disks; dl; dl = dl->next) {
1363 if (dl->index < 0)
1364 continue;
1365 unsigned long long blocks = total_blocks(&dl->disk);
1366 if (blocks < min_active || min_active == 0) {
1367 dl_min = dl;
1368 min_active = blocks;
1369 }
1370 }
1371 if (!dl_min)
1372 return rv;
1373
1374 /* find last lba used by subarrays on the smallest active disk */
1375 e = get_extents(super, dl_min);
1376 if (!e)
1377 return rv;
1378 for (i = 0; e[i].size; i++)
1379 continue;
1380
1381 remainder = min_active - e[i].start;
1382 free(e);
1383
1384 /* to give priority to recovery we should not require full
1385 IMSM_RESERVED_SECTORS from the spare */
1386 rv = MPB_SECTOR_CNT + NUM_BLOCKS_DIRTY_STRIPE_REGION;
1387
1388 /* if real reservation is smaller use that value */
1389 return (remainder < rv) ? remainder : rv;
1390 }
1391
1392 /*
1393 * Return minimum size of a spare and sector size
1394 * that can be used in this array
1395 */
1396 int get_spare_criteria_imsm(struct supertype *st, struct spare_criteria *c)
1397 {
1398 struct intel_super *super = st->sb;
1399 struct dl *dl;
1400 struct extent *e;
1401 int i;
1402 unsigned long long size = 0;
1403
1404 c->min_size = 0;
1405 c->sector_size = 0;
1406
1407 if (!super)
1408 return -EINVAL;
1409 /* find first active disk in array */
1410 dl = super->disks;
1411 while (dl && (is_failed(&dl->disk) || dl->index == -1))
1412 dl = dl->next;
1413 if (!dl)
1414 return -EINVAL;
1415 /* find last lba used by subarrays */
1416 e = get_extents(super, dl);
1417 if (!e)
1418 return -EINVAL;
1419 for (i = 0; e[i].size; i++)
1420 continue;
1421 if (i > 0)
1422 size = e[i-1].start + e[i-1].size;
1423 free(e);
1424
1425 /* add the amount of space needed for metadata */
1426 size += imsm_min_reserved_sectors(super);
1427
1428 c->min_size = size * 512;
1429 c->sector_size = super->sector_size;
1430
1431 return 0;
1432 }
1433
1434 static int is_gen_migration(struct imsm_dev *dev);
1435
1436 #define IMSM_4K_DIV 8
1437
1438 static __u64 blocks_per_migr_unit(struct intel_super *super,
1439 struct imsm_dev *dev);
1440
1441 static void print_imsm_dev(struct intel_super *super,
1442 struct imsm_dev *dev,
1443 char *uuid,
1444 int disk_idx)
1445 {
1446 __u64 sz;
1447 int slot, i;
1448 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1449 struct imsm_map *map2 = get_imsm_map(dev, MAP_1);
1450 __u32 ord;
1451
1452 printf("\n");
1453 printf("[%.16s]:\n", dev->volume);
1454 printf(" UUID : %s\n", uuid);
1455 printf(" RAID Level : %d", get_imsm_raid_level(map));
1456 if (map2)
1457 printf(" <-- %d", get_imsm_raid_level(map2));
1458 printf("\n");
1459 printf(" Members : %d", map->num_members);
1460 if (map2)
1461 printf(" <-- %d", map2->num_members);
1462 printf("\n");
1463 printf(" Slots : [");
1464 for (i = 0; i < map->num_members; i++) {
1465 ord = get_imsm_ord_tbl_ent(dev, i, MAP_0);
1466 printf("%s", ord & IMSM_ORD_REBUILD ? "_" : "U");
1467 }
1468 printf("]");
1469 if (map2) {
1470 printf(" <-- [");
1471 for (i = 0; i < map2->num_members; i++) {
1472 ord = get_imsm_ord_tbl_ent(dev, i, MAP_1);
1473 printf("%s", ord & IMSM_ORD_REBUILD ? "_" : "U");
1474 }
1475 printf("]");
1476 }
1477 printf("\n");
1478 printf(" Failed disk : ");
1479 if (map->failed_disk_num == 0xff)
1480 printf("none");
1481 else
1482 printf("%i", map->failed_disk_num);
1483 printf("\n");
1484 slot = get_imsm_disk_slot(map, disk_idx);
1485 if (slot >= 0) {
1486 ord = get_imsm_ord_tbl_ent(dev, slot, MAP_X);
1487 printf(" This Slot : %d%s\n", slot,
1488 ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : "");
1489 } else
1490 printf(" This Slot : ?\n");
1491 printf(" Sector Size : %u\n", super->sector_size);
1492 sz = __le32_to_cpu(dev->size_high);
1493 sz <<= 32;
1494 sz += __le32_to_cpu(dev->size_low);
1495 printf(" Array Size : %llu%s\n",
1496 (unsigned long long)sz * 512 / super->sector_size,
1497 human_size(sz * 512));
1498 sz = blocks_per_member(map);
1499 printf(" Per Dev Size : %llu%s\n",
1500 (unsigned long long)sz * 512 / super->sector_size,
1501 human_size(sz * 512));
1502 printf(" Sector Offset : %llu\n",
1503 pba_of_lba0(map));
1504 printf(" Num Stripes : %llu\n",
1505 num_data_stripes(map));
1506 printf(" Chunk Size : %u KiB",
1507 __le16_to_cpu(map->blocks_per_strip) / 2);
1508 if (map2)
1509 printf(" <-- %u KiB",
1510 __le16_to_cpu(map2->blocks_per_strip) / 2);
1511 printf("\n");
1512 printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
1513 printf(" Migrate State : ");
1514 if (dev->vol.migr_state) {
1515 if (migr_type(dev) == MIGR_INIT)
1516 printf("initialize\n");
1517 else if (migr_type(dev) == MIGR_REBUILD)
1518 printf("rebuild\n");
1519 else if (migr_type(dev) == MIGR_VERIFY)
1520 printf("check\n");
1521 else if (migr_type(dev) == MIGR_GEN_MIGR)
1522 printf("general migration\n");
1523 else if (migr_type(dev) == MIGR_STATE_CHANGE)
1524 printf("state change\n");
1525 else if (migr_type(dev) == MIGR_REPAIR)
1526 printf("repair\n");
1527 else
1528 printf("<unknown:%d>\n", migr_type(dev));
1529 } else
1530 printf("idle\n");
1531 printf(" Map State : %s", map_state_str[map->map_state]);
1532 if (dev->vol.migr_state) {
1533 struct imsm_map *map = get_imsm_map(dev, MAP_1);
1534
1535 printf(" <-- %s", map_state_str[map->map_state]);
1536 printf("\n Checkpoint : %u ",
1537 __le32_to_cpu(dev->vol.curr_migr_unit));
1538 if (is_gen_migration(dev) && (slot > 1 || slot < 0))
1539 printf("(N/A)");
1540 else
1541 printf("(%llu)", (unsigned long long)
1542 blocks_per_migr_unit(super, dev));
1543 }
1544 printf("\n");
1545 printf(" Dirty State : %s\n", (dev->vol.dirty & RAIDVOL_DIRTY) ?
1546 "dirty" : "clean");
1547 printf(" RWH Policy : ");
1548 if (dev->rwh_policy == RWH_OFF || dev->rwh_policy == RWH_MULTIPLE_OFF)
1549 printf("off\n");
1550 else if (dev->rwh_policy == RWH_DISTRIBUTED)
1551 printf("PPL distributed\n");
1552 else if (dev->rwh_policy == RWH_JOURNALING_DRIVE)
1553 printf("PPL journaling drive\n");
1554 else if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
1555 printf("Multiple distributed PPLs\n");
1556 else if (dev->rwh_policy == RWH_MULTIPLE_PPLS_JOURNALING_DRIVE)
1557 printf("Multiple PPLs on journaling drive\n");
1558 else
1559 printf("<unknown:%d>\n", dev->rwh_policy);
1560 }
1561
1562 static void print_imsm_disk(struct imsm_disk *disk,
1563 int index,
1564 __u32 reserved,
1565 unsigned int sector_size) {
1566 char str[MAX_RAID_SERIAL_LEN + 1];
1567 __u64 sz;
1568
1569 if (index < -1 || !disk)
1570 return;
1571
1572 printf("\n");
1573 snprintf(str, MAX_RAID_SERIAL_LEN + 1, "%s", disk->serial);
1574 if (index >= 0)
1575 printf(" Disk%02d Serial : %s\n", index, str);
1576 else
1577 printf(" Disk Serial : %s\n", str);
1578 printf(" State :%s%s%s%s\n", is_spare(disk) ? " spare" : "",
1579 is_configured(disk) ? " active" : "",
1580 is_failed(disk) ? " failed" : "",
1581 is_journal(disk) ? " journal" : "");
1582 printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id));
1583 sz = total_blocks(disk) - reserved;
1584 printf(" Usable Size : %llu%s\n",
1585 (unsigned long long)sz * 512 / sector_size,
1586 human_size(sz * 512));
1587 }
1588
1589 void convert_to_4k_imsm_migr_rec(struct intel_super *super)
1590 {
1591 struct migr_record *migr_rec = super->migr_rec;
1592
1593 migr_rec->blocks_per_unit /= IMSM_4K_DIV;
1594 migr_rec->ckpt_area_pba /= IMSM_4K_DIV;
1595 migr_rec->dest_1st_member_lba /= IMSM_4K_DIV;
1596 migr_rec->dest_depth_per_unit /= IMSM_4K_DIV;
1597 split_ull((join_u32(migr_rec->post_migr_vol_cap,
1598 migr_rec->post_migr_vol_cap_hi) / IMSM_4K_DIV),
1599 &migr_rec->post_migr_vol_cap, &migr_rec->post_migr_vol_cap_hi);
1600 }
1601
1602 void convert_to_4k_imsm_disk(struct imsm_disk *disk)
1603 {
1604 set_total_blocks(disk, (total_blocks(disk)/IMSM_4K_DIV));
1605 }
1606
1607 void convert_to_4k(struct intel_super *super)
1608 {
1609 struct imsm_super *mpb = super->anchor;
1610 struct imsm_disk *disk;
1611 int i;
1612 __u32 bbm_log_size = __le32_to_cpu(mpb->bbm_log_size);
1613
1614 for (i = 0; i < mpb->num_disks ; i++) {
1615 disk = __get_imsm_disk(mpb, i);
1616 /* disk */
1617 convert_to_4k_imsm_disk(disk);
1618 }
1619 for (i = 0; i < mpb->num_raid_devs; i++) {
1620 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1621 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1622 /* dev */
1623 split_ull((join_u32(dev->size_low, dev->size_high)/IMSM_4K_DIV),
1624 &dev->size_low, &dev->size_high);
1625 dev->vol.curr_migr_unit /= IMSM_4K_DIV;
1626
1627 /* map0 */
1628 set_blocks_per_member(map, blocks_per_member(map)/IMSM_4K_DIV);
1629 map->blocks_per_strip /= IMSM_4K_DIV;
1630 set_pba_of_lba0(map, pba_of_lba0(map)/IMSM_4K_DIV);
1631
1632 if (dev->vol.migr_state) {
1633 /* map1 */
1634 map = get_imsm_map(dev, MAP_1);
1635 set_blocks_per_member(map,
1636 blocks_per_member(map)/IMSM_4K_DIV);
1637 map->blocks_per_strip /= IMSM_4K_DIV;
1638 set_pba_of_lba0(map, pba_of_lba0(map)/IMSM_4K_DIV);
1639 }
1640 }
1641 if (bbm_log_size) {
1642 struct bbm_log *log = (void *)mpb +
1643 __le32_to_cpu(mpb->mpb_size) - bbm_log_size;
1644 __u32 i;
1645
1646 for (i = 0; i < log->entry_count; i++) {
1647 struct bbm_log_entry *entry =
1648 &log->marked_block_entries[i];
1649
1650 __u8 count = entry->marked_count + 1;
1651 unsigned long long sector =
1652 __le48_to_cpu(&entry->defective_block_start);
1653
1654 entry->defective_block_start =
1655 __cpu_to_le48(sector/IMSM_4K_DIV);
1656 entry->marked_count = max(count/IMSM_4K_DIV, 1) - 1;
1657 }
1658 }
1659
1660 mpb->check_sum = __gen_imsm_checksum(mpb);
1661 }
1662
1663 void examine_migr_rec_imsm(struct intel_super *super)
1664 {
1665 struct migr_record *migr_rec = super->migr_rec;
1666 struct imsm_super *mpb = super->anchor;
1667 int i;
1668
1669 for (i = 0; i < mpb->num_raid_devs; i++) {
1670 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1671 struct imsm_map *map;
1672 int slot = -1;
1673
1674 if (is_gen_migration(dev) == 0)
1675 continue;
1676
1677 printf("\nMigration Record Information:");
1678
1679 /* first map under migration */
1680 map = get_imsm_map(dev, MAP_0);
1681 if (map)
1682 slot = get_imsm_disk_slot(map, super->disks->index);
1683 if (map == NULL || slot > 1 || slot < 0) {
1684 printf(" Empty\n ");
1685 printf("Examine one of first two disks in array\n");
1686 break;
1687 }
1688 printf("\n Status : ");
1689 if (__le32_to_cpu(migr_rec->rec_status) == UNIT_SRC_NORMAL)
1690 printf("Normal\n");
1691 else
1692 printf("Contains Data\n");
1693 printf(" Current Unit : %u\n",
1694 __le32_to_cpu(migr_rec->curr_migr_unit));
1695 printf(" Family : %u\n",
1696 __le32_to_cpu(migr_rec->family_num));
1697 printf(" Ascending : %u\n",
1698 __le32_to_cpu(migr_rec->ascending_migr));
1699 printf(" Blocks Per Unit : %u\n",
1700 __le32_to_cpu(migr_rec->blocks_per_unit));
1701 printf(" Dest. Depth Per Unit : %u\n",
1702 __le32_to_cpu(migr_rec->dest_depth_per_unit));
1703 printf(" Checkpoint Area pba : %u\n",
1704 __le32_to_cpu(migr_rec->ckpt_area_pba));
1705 printf(" First member lba : %u\n",
1706 __le32_to_cpu(migr_rec->dest_1st_member_lba));
1707 printf(" Total Number of Units : %u\n",
1708 __le32_to_cpu(migr_rec->num_migr_units));
1709 printf(" Size of volume : %u\n",
1710 __le32_to_cpu(migr_rec->post_migr_vol_cap));
1711 printf(" Expansion space for LBA64 : %u\n",
1712 __le32_to_cpu(migr_rec->post_migr_vol_cap_hi));
1713 printf(" Record was read from : %u\n",
1714 __le32_to_cpu(migr_rec->ckpt_read_disk_num));
1715
1716 break;
1717 }
1718 }
1719
1720 void convert_from_4k_imsm_migr_rec(struct intel_super *super)
1721 {
1722 struct migr_record *migr_rec = super->migr_rec;
1723
1724 migr_rec->blocks_per_unit *= IMSM_4K_DIV;
1725 migr_rec->ckpt_area_pba *= IMSM_4K_DIV;
1726 migr_rec->dest_1st_member_lba *= IMSM_4K_DIV;
1727 migr_rec->dest_depth_per_unit *= IMSM_4K_DIV;
1728 split_ull((join_u32(migr_rec->post_migr_vol_cap,
1729 migr_rec->post_migr_vol_cap_hi) * IMSM_4K_DIV),
1730 &migr_rec->post_migr_vol_cap,
1731 &migr_rec->post_migr_vol_cap_hi);
1732 }
1733
1734 void convert_from_4k(struct intel_super *super)
1735 {
1736 struct imsm_super *mpb = super->anchor;
1737 struct imsm_disk *disk;
1738 int i;
1739 __u32 bbm_log_size = __le32_to_cpu(mpb->bbm_log_size);
1740
1741 for (i = 0; i < mpb->num_disks ; i++) {
1742 disk = __get_imsm_disk(mpb, i);
1743 /* disk */
1744 set_total_blocks(disk, (total_blocks(disk)*IMSM_4K_DIV));
1745 }
1746
1747 for (i = 0; i < mpb->num_raid_devs; i++) {
1748 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1749 struct imsm_map *map = get_imsm_map(dev, MAP_0);
1750 /* dev */
1751 split_ull((join_u32(dev->size_low, dev->size_high)*IMSM_4K_DIV),
1752 &dev->size_low, &dev->size_high);
1753 dev->vol.curr_migr_unit *= IMSM_4K_DIV;
1754
1755 /* map0 */
1756 set_blocks_per_member(map, blocks_per_member(map)*IMSM_4K_DIV);
1757 map->blocks_per_strip *= IMSM_4K_DIV;
1758 set_pba_of_lba0(map, pba_of_lba0(map)*IMSM_4K_DIV);
1759
1760 if (dev->vol.migr_state) {
1761 /* map1 */
1762 map = get_imsm_map(dev, MAP_1);
1763 set_blocks_per_member(map,
1764 blocks_per_member(map)*IMSM_4K_DIV);
1765 map->blocks_per_strip *= IMSM_4K_DIV;
1766 set_pba_of_lba0(map, pba_of_lba0(map)*IMSM_4K_DIV);
1767 }
1768 }
1769 if (bbm_log_size) {
1770 struct bbm_log *log = (void *)mpb +
1771 __le32_to_cpu(mpb->mpb_size) - bbm_log_size;
1772 __u32 i;
1773
1774 for (i = 0; i < log->entry_count; i++) {
1775 struct bbm_log_entry *entry =
1776 &log->marked_block_entries[i];
1777
1778 __u8 count = entry->marked_count + 1;
1779 unsigned long long sector =
1780 __le48_to_cpu(&entry->defective_block_start);
1781
1782 entry->defective_block_start =
1783 __cpu_to_le48(sector*IMSM_4K_DIV);
1784 entry->marked_count = count*IMSM_4K_DIV - 1;
1785 }
1786 }
1787
1788 mpb->check_sum = __gen_imsm_checksum(mpb);
1789 }
1790
1791 /*******************************************************************************
1792 * function: imsm_check_attributes
1793 * Description: Function checks if features represented by attributes flags
1794 * are supported by mdadm.
1795 * Parameters:
1796 * attributes - Attributes read from metadata
1797 * Returns:
1798 * 0 - passed attributes contains unsupported features flags
1799 * 1 - all features are supported
1800 ******************************************************************************/
1801 static int imsm_check_attributes(__u32 attributes)
1802 {
1803 int ret_val = 1;
1804 __u32 not_supported = MPB_ATTRIB_SUPPORTED^0xffffffff;
1805
1806 not_supported &= ~MPB_ATTRIB_IGNORED;
1807
1808 not_supported &= attributes;
1809 if (not_supported) {
1810 pr_err("(IMSM): Unsupported attributes : %x\n",
1811 (unsigned)__le32_to_cpu(not_supported));
1812 if (not_supported & MPB_ATTRIB_CHECKSUM_VERIFY) {
1813 dprintf("\t\tMPB_ATTRIB_CHECKSUM_VERIFY \n");
1814 not_supported ^= MPB_ATTRIB_CHECKSUM_VERIFY;
1815 }
1816 if (not_supported & MPB_ATTRIB_2TB) {
1817 dprintf("\t\tMPB_ATTRIB_2TB\n");
1818 not_supported ^= MPB_ATTRIB_2TB;
1819 }
1820 if (not_supported & MPB_ATTRIB_RAID0) {
1821 dprintf("\t\tMPB_ATTRIB_RAID0\n");
1822 not_supported ^= MPB_ATTRIB_RAID0;
1823 }
1824 if (not_supported & MPB_ATTRIB_RAID1) {
1825 dprintf("\t\tMPB_ATTRIB_RAID1\n");
1826 not_supported ^= MPB_ATTRIB_RAID1;
1827 }
1828 if (not_supported & MPB_ATTRIB_RAID10) {
1829 dprintf("\t\tMPB_ATTRIB_RAID10\n");
1830 not_supported ^= MPB_ATTRIB_RAID10;
1831 }
1832 if (not_supported & MPB_ATTRIB_RAID1E) {
1833 dprintf("\t\tMPB_ATTRIB_RAID1E\n");
1834 not_supported ^= MPB_ATTRIB_RAID1E;
1835 }
1836 if (not_supported & MPB_ATTRIB_RAID5) {
1837 dprintf("\t\tMPB_ATTRIB_RAID5\n");
1838 not_supported ^= MPB_ATTRIB_RAID5;
1839 }
1840 if (not_supported & MPB_ATTRIB_RAIDCNG) {
1841 dprintf("\t\tMPB_ATTRIB_RAIDCNG\n");
1842 not_supported ^= MPB_ATTRIB_RAIDCNG;
1843 }
1844 if (not_supported & MPB_ATTRIB_BBM) {
1845 dprintf("\t\tMPB_ATTRIB_BBM\n");
1846 not_supported ^= MPB_ATTRIB_BBM;
1847 }
1848 if (not_supported & MPB_ATTRIB_CHECKSUM_VERIFY) {
1849 dprintf("\t\tMPB_ATTRIB_CHECKSUM_VERIFY (== MPB_ATTRIB_LEGACY)\n");
1850 not_supported ^= MPB_ATTRIB_CHECKSUM_VERIFY;
1851 }
1852 if (not_supported & MPB_ATTRIB_EXP_STRIPE_SIZE) {
1853 dprintf("\t\tMPB_ATTRIB_EXP_STRIP_SIZE\n");
1854 not_supported ^= MPB_ATTRIB_EXP_STRIPE_SIZE;
1855 }
1856 if (not_supported & MPB_ATTRIB_2TB_DISK) {
1857 dprintf("\t\tMPB_ATTRIB_2TB_DISK\n");
1858 not_supported ^= MPB_ATTRIB_2TB_DISK;
1859 }
1860 if (not_supported & MPB_ATTRIB_NEVER_USE2) {
1861 dprintf("\t\tMPB_ATTRIB_NEVER_USE2\n");
1862 not_supported ^= MPB_ATTRIB_NEVER_USE2;
1863 }
1864 if (not_supported & MPB_ATTRIB_NEVER_USE) {
1865 dprintf("\t\tMPB_ATTRIB_NEVER_USE\n");
1866 not_supported ^= MPB_ATTRIB_NEVER_USE;
1867 }
1868
1869 if (not_supported)
1870 dprintf("(IMSM): Unknown attributes : %x\n", not_supported);
1871
1872 ret_val = 0;
1873 }
1874
1875 return ret_val;
1876 }
1877
1878 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info, char *map);
1879
1880 static void examine_super_imsm(struct supertype *st, char *homehost)
1881 {
1882 struct intel_super *super = st->sb;
1883 struct imsm_super *mpb = super->anchor;
1884 char str[MAX_SIGNATURE_LENGTH];
1885 int i;
1886 struct mdinfo info;
1887 char nbuf[64];
1888 __u32 sum;
1889 __u32 reserved = imsm_reserved_sectors(super, super->disks);
1890 struct dl *dl;
1891
1892 strncpy(str, (char *)mpb->sig, MPB_SIG_LEN);
1893 str[MPB_SIG_LEN-1] = '\0';
1894 printf(" Magic : %s\n", str);
1895 snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
1896 printf(" Version : %s\n", get_imsm_version(mpb));
1897 printf(" Orig Family : %08x\n", __le32_to_cpu(mpb->orig_family_num));
1898 printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
1899 printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num));
1900 printf(" Attributes : ");
1901 if (imsm_check_attributes(mpb->attributes))
1902 printf("All supported\n");
1903 else
1904 printf("not supported\n");
1905 getinfo_super_imsm(st, &info, NULL);
1906 fname_from_uuid(st, &info, nbuf, ':');
1907 printf(" UUID : %s\n", nbuf + 5);
1908 sum = __le32_to_cpu(mpb->check_sum);
1909 printf(" Checksum : %08x %s\n", sum,
1910 __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect");
1911 printf(" MPB Sectors : %d\n", mpb_sectors(mpb, super->sector_size));
1912 printf(" Disks : %d\n", mpb->num_disks);
1913 printf(" RAID Devices : %d\n", mpb->num_raid_devs);
1914 print_imsm_disk(__get_imsm_disk(mpb, super->disks->index),
1915 super->disks->index, reserved, super->sector_size);
1916 if (get_imsm_bbm_log_size(super->bbm_log)) {
1917 struct bbm_log *log = super->bbm_log;
1918
1919 printf("\n");
1920 printf("Bad Block Management Log:\n");
1921 printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size));
1922 printf(" Signature : %x\n", __le32_to_cpu(log->signature));
1923 printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count));
1924 }
1925 for (i = 0; i < mpb->num_raid_devs; i++) {
1926 struct mdinfo info;
1927 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1928
1929 super->current_vol = i;
1930 getinfo_super_imsm(st, &info, NULL);
1931 fname_from_uuid(st, &info, nbuf, ':');
1932 print_imsm_dev(super, dev, nbuf + 5, super->disks->index);
1933 }
1934 for (i = 0; i < mpb->num_disks; i++) {
1935 if (i == super->disks->index)
1936 continue;
1937 print_imsm_disk(__get_imsm_disk(mpb, i), i, reserved,
1938 super->sector_size);
1939 }
1940
1941 for (dl = super->disks; dl; dl = dl->next)
1942 if (dl->index == -1)
1943 print_imsm_disk(&dl->disk, -1, reserved,
1944 super->sector_size);
1945
1946 examine_migr_rec_imsm(super);
1947 }
1948
1949 static void brief_examine_super_imsm(struct supertype *st, int verbose)
1950 {
1951 /* We just write a generic IMSM ARRAY entry */
1952 struct mdinfo info;
1953 char nbuf[64];
1954 struct intel_super *super = st->sb;
1955
1956 if (!super->anchor->num_raid_devs) {
1957 printf("ARRAY metadata=imsm\n");
1958 return;
1959 }
1960
1961 getinfo_super_imsm(st, &info, NULL);
1962 fname_from_uuid(st, &info, nbuf, ':');
1963 printf("ARRAY metadata=imsm UUID=%s\n", nbuf + 5);
1964 }
1965
1966 static void brief_examine_subarrays_imsm(struct supertype *st, int verbose)
1967 {
1968 /* We just write a generic IMSM ARRAY entry */
1969 struct mdinfo info;
1970 char nbuf[64];
1971 char nbuf1[64];
1972 struct intel_super *super = st->sb;
1973 int i;
1974
1975 if (!super->anchor->num_raid_devs)
1976 return;
1977
1978 getinfo_super_imsm(st, &info, NULL);
1979 fname_from_uuid(st, &info, nbuf, ':');
1980 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1981 struct imsm_dev *dev = get_imsm_dev(super, i);
1982
1983 super->current_vol = i;
1984 getinfo_super_imsm(st, &info, NULL);
1985 fname_from_uuid(st, &info, nbuf1, ':');
1986 printf("ARRAY /dev/md/%.16s container=%s member=%d UUID=%s\n",
1987 dev->volume, nbuf + 5, i, nbuf1 + 5);
1988 }
1989 }
1990
1991 static void export_examine_super_imsm(struct supertype *st)
1992 {
1993 struct intel_super *super = st->sb;
1994 struct imsm_super *mpb = super->anchor;
1995 struct mdinfo info;
1996 char nbuf[64];
1997
1998 getinfo_super_imsm(st, &info, NULL);
1999 fname_from_uuid(st, &info, nbuf, ':');
2000 printf("MD_METADATA=imsm\n");
2001 printf("MD_LEVEL=container\n");
2002 printf("MD_UUID=%s\n", nbuf+5);
2003 printf("MD_DEVICES=%u\n", mpb->num_disks);
2004 }
2005
2006 static int copy_metadata_imsm(struct supertype *st, int from, int to)
2007 {
2008 /* The second last sector of the device contains
2009 * the "struct imsm_super" metadata.
2010 * This contains mpb_size which is the size in bytes of the
2011 * extended metadata. This is located immediately before
2012 * the imsm_super.
2013 * We want to read all that, plus the last sector which
2014 * may contain a migration record, and write it all
2015 * to the target.
2016 */
2017 void *buf;
2018 unsigned long long dsize, offset;
2019 int sectors;
2020 struct imsm_super *sb;
2021 struct intel_super *super = st->sb;
2022 unsigned int sector_size = super->sector_size;
2023 unsigned int written = 0;
2024
2025 if (posix_memalign(&buf, MAX_SECTOR_SIZE, MAX_SECTOR_SIZE) != 0)
2026 return 1;
2027
2028 if (!get_dev_size(from, NULL, &dsize))
2029 goto err;
2030
2031 if (lseek64(from, dsize-(2*sector_size), 0) < 0)
2032 goto err;
2033 if ((unsigned int)read(from, buf, sector_size) != sector_size)
2034 goto err;
2035 sb = buf;
2036 if (strncmp((char*)sb->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0)
2037 goto err;
2038
2039 sectors = mpb_sectors(sb, sector_size) + 2;
2040 offset = dsize - sectors * sector_size;
2041 if (lseek64(from, offset, 0) < 0 ||
2042 lseek64(to, offset, 0) < 0)
2043 goto err;
2044 while (written < sectors * sector_size) {
2045 int n = sectors*sector_size - written;
2046 if (n > 4096)
2047 n = 4096;
2048 if (read(from, buf, n) != n)
2049 goto err;
2050 if (write(to, buf, n) != n)
2051 goto err;
2052 written += n;
2053 }
2054 free(buf);
2055 return 0;
2056 err:
2057 free(buf);
2058 return 1;
2059 }
2060
2061 static void detail_super_imsm(struct supertype *st, char *homehost)
2062 {
2063 struct mdinfo info;
2064 char nbuf[64];
2065
2066 getinfo_super_imsm(st, &info, NULL);
2067 fname_from_uuid(st, &info, nbuf, ':');
2068 printf("\n UUID : %s\n", nbuf + 5);
2069 }
2070
2071 static void brief_detail_super_imsm(struct supertype *st)
2072 {
2073 struct mdinfo info;
2074 char nbuf[64];
2075 getinfo_super_imsm(st, &info, NULL);
2076 fname_from_uuid(st, &info, nbuf, ':');
2077 printf(" UUID=%s", nbuf + 5);
2078 }
2079
2080 static int imsm_read_serial(int fd, char *devname, __u8 *serial);
2081 static void fd2devname(int fd, char *name);
2082
2083 static int ahci_enumerate_ports(const char *hba_path, int port_count, int host_base, int verbose)
2084 {
2085 /* dump an unsorted list of devices attached to AHCI Intel storage
2086 * controller, as well as non-connected ports
2087 */
2088 int hba_len = strlen(hba_path) + 1;
2089 struct dirent *ent;
2090 DIR *dir;
2091 char *path = NULL;
2092 int err = 0;
2093 unsigned long port_mask = (1 << port_count) - 1;
2094
2095 if (port_count > (int)sizeof(port_mask) * 8) {
2096 if (verbose > 0)
2097 pr_err("port_count %d out of range\n", port_count);
2098 return 2;
2099 }
2100
2101 /* scroll through /sys/dev/block looking for devices attached to
2102 * this hba
2103 */
2104 dir = opendir("/sys/dev/block");
2105 if (!dir)
2106 return 1;
2107
2108 for (ent = readdir(dir); ent; ent = readdir(dir)) {
2109 int fd;
2110 char model[64];
2111 char vendor[64];
2112 char buf[1024];
2113 int major, minor;
2114 char *device;
2115 char *c;
2116 int port;
2117 int type;
2118
2119 if (sscanf(ent->d_name, "%d:%d", &major, &minor) != 2)
2120 continue;
2121 path = devt_to_devpath(makedev(major, minor));
2122 if (!path)
2123 continue;
2124 if (!path_attached_to_hba(path, hba_path)) {
2125 free(path);
2126 path = NULL;
2127 continue;
2128 }
2129
2130 /* retrieve the scsi device type */
2131 if (asprintf(&device, "/sys/dev/block/%d:%d/device/xxxxxxx", major, minor) < 0) {
2132 if (verbose > 0)
2133 pr_err("failed to allocate 'device'\n");
2134 err = 2;
2135 break;
2136 }
2137 sprintf(device, "/sys/dev/block/%d:%d/device/type", major, minor);
2138 if (load_sys(device, buf, sizeof(buf)) != 0) {
2139 if (verbose > 0)
2140 pr_err("failed to read device type for %s\n",
2141 path);
2142 err = 2;
2143 free(device);
2144 break;
2145 }
2146 type = strtoul(buf, NULL, 10);
2147
2148 /* if it's not a disk print the vendor and model */
2149 if (!(type == 0 || type == 7 || type == 14)) {
2150 vendor[0] = '\0';
2151 model[0] = '\0';
2152 sprintf(device, "/sys/dev/block/%d:%d/device/vendor", major, minor);
2153 if (load_sys(device, buf, sizeof(buf)) == 0) {
2154 strncpy(vendor, buf, sizeof(vendor));
2155 vendor[sizeof(vendor) - 1] = '\0';
2156 c = (char *) &vendor[sizeof(vendor) - 1];
2157 while (isspace(*c) || *c == '\0')
2158 *c-- = '\0';
2159
2160 }
2161 sprintf(device, "/sys/dev/block/%d:%d/device/model", major, minor);
2162 if (load_sys(device, buf, sizeof(buf)) == 0) {
2163 strncpy(model, buf, sizeof(model));
2164 model[sizeof(model) - 1] = '\0';
2165 c = (char *) &model[sizeof(model) - 1];
2166 while (isspace(*c) || *c == '\0')
2167 *c-- = '\0';
2168 }
2169
2170 if (vendor[0] && model[0])
2171 sprintf(buf, "%.64s %.64s", vendor, model);
2172 else
2173 switch (type) { /* numbers from hald/linux/device.c */
2174 case 1: sprintf(buf, "tape"); break;
2175 case 2: sprintf(buf, "printer"); break;
2176 case 3: sprintf(buf, "processor"); break;
2177 case 4:
2178 case 5: sprintf(buf, "cdrom"); break;
2179 case 6: sprintf(buf, "scanner"); break;
2180 case 8: sprintf(buf, "media_changer"); break;
2181 case 9: sprintf(buf, "comm"); break;
2182 case 12: sprintf(buf, "raid"); break;
2183 default: sprintf(buf, "unknown");
2184 }
2185 } else
2186 buf[0] = '\0';
2187 free(device);
2188
2189 /* chop device path to 'host%d' and calculate the port number */
2190 c = strchr(&path[hba_len], '/');
2191 if (!c) {
2192 if (verbose > 0)
2193 pr_err("%s - invalid path name\n", path + hba_len);
2194 err = 2;
2195 break;
2196 }
2197 *c = '\0';
2198 if ((sscanf(&path[hba_len], "ata%d", &port) == 1) ||
2199 ((sscanf(&path[hba_len], "host%d", &port) == 1)))
2200 port -= host_base;
2201 else {
2202 if (verbose > 0) {
2203 *c = '/'; /* repair the full string */
2204 pr_err("failed to determine port number for %s\n",
2205 path);
2206 }
2207 err = 2;
2208 break;
2209 }
2210
2211 /* mark this port as used */
2212 port_mask &= ~(1 << port);
2213
2214 /* print out the device information */
2215 if (buf[0]) {
2216 printf(" Port%d : - non-disk device (%s) -\n", port, buf);
2217 continue;
2218 }
2219
2220 fd = dev_open(ent->d_name, O_RDONLY);
2221 if (fd < 0)
2222 printf(" Port%d : - disk info unavailable -\n", port);
2223 else {
2224 fd2devname(fd, buf);
2225 printf(" Port%d : %s", port, buf);
2226 if (imsm_read_serial(fd, NULL, (__u8 *) buf) == 0)
2227 printf(" (%.*s)\n", MAX_RAID_SERIAL_LEN, buf);
2228 else
2229 printf(" ()\n");
2230 close(fd);
2231 }
2232 free(path);
2233 path = NULL;
2234 }
2235 if (path)
2236 free(path);
2237 if (dir)
2238 closedir(dir);
2239 if (err == 0) {
2240 int i;
2241
2242 for (i = 0; i < port_count; i++)
2243 if (port_mask & (1 << i))
2244 printf(" Port%d : - no device attached -\n", i);
2245 }
2246
2247 return err;
2248 }
2249
2250 static int print_vmd_attached_devs(struct sys_dev *hba)
2251 {
2252 struct dirent *ent;
2253 DIR *dir;
2254 char path[292];
2255 char link[256];
2256 char *c, *rp;
2257
2258 if (hba->type != SYS_DEV_VMD)
2259 return 1;
2260
2261 /* scroll through /sys/dev/block looking for devices attached to
2262 * this hba
2263 */
2264 dir = opendir("/sys/bus/pci/drivers/nvme");
2265 if (!dir)
2266 return 1;
2267
2268 for (ent = readdir(dir); ent; ent = readdir(dir)) {
2269 int n;
2270
2271 /* is 'ent' a device? check that the 'subsystem' link exists and
2272 * that its target matches 'bus'
2273 */
2274 sprintf(path, "/sys/bus/pci/drivers/nvme/%s/subsystem",
2275 ent->d_name);
2276 n = readlink(path, link, sizeof(link));
2277 if (n < 0 || n >= (int)sizeof(link))
2278 continue;
2279 link[n] = '\0';
2280 c = strrchr(link, '/');
2281 if (!c)
2282 continue;
2283 if (strncmp("pci", c+1, strlen("pci")) != 0)
2284 continue;
2285
2286 sprintf(path, "/sys/bus/pci/drivers/nvme/%s", ent->d_name);
2287
2288 rp = realpath(path, NULL);
2289 if (!rp)
2290 continue;
2291
2292 if (path_attached_to_hba(rp, hba->path)) {
2293 printf(" NVMe under VMD : %s\n", rp);
2294 }
2295 free(rp);
2296 }
2297
2298 closedir(dir);
2299 return 0;
2300 }
2301
2302 static void print_found_intel_controllers(struct sys_dev *elem)
2303 {
2304 for (; elem; elem = elem->next) {
2305 pr_err("found Intel(R) ");
2306 if (elem->type == SYS_DEV_SATA)
2307 fprintf(stderr, "SATA ");
2308 else if (elem->type == SYS_DEV_SAS)
2309 fprintf(stderr, "SAS ");
2310 else if (elem->type == SYS_DEV_NVME)
2311 fprintf(stderr, "NVMe ");
2312
2313 if (elem->type == SYS_DEV_VMD)
2314 fprintf(stderr, "VMD domain");
2315 else
2316 fprintf(stderr, "RAID controller");
2317
2318 if (elem->pci_id)
2319 fprintf(stderr, " at %s", elem->pci_id);
2320 fprintf(stderr, ".\n");
2321 }
2322 fflush(stderr);
2323 }
2324
2325 static int ahci_get_port_count(const char *hba_path, int *port_count)
2326 {
2327 struct dirent *ent;
2328 DIR *dir;
2329 int host_base = -1;
2330
2331 *port_count = 0;
2332 if ((dir = opendir(hba_path)) == NULL)
2333 return -1;
2334
2335 for (ent = readdir(dir); ent; ent = readdir(dir)) {
2336 int host;
2337
2338 if ((sscanf(ent->d_name, "ata%d", &host) != 1) &&
2339 ((sscanf(ent->d_name, "host%d", &host) != 1)))
2340 continue;
2341 if (*port_count == 0)
2342 host_base = host;
2343 else if (host < host_base)
2344 host_base = host;
2345
2346 if (host + 1 > *port_count + host_base)
2347 *port_count = host + 1 - host_base;
2348 }
2349 closedir(dir);
2350 return host_base;
2351 }
2352
2353 static void print_imsm_capability(const struct imsm_orom *orom)
2354 {
2355 printf(" Platform : Intel(R) ");
2356 if (orom->capabilities == 0 && orom->driver_features == 0)
2357 printf("Matrix Storage Manager\n");
2358 else
2359 printf("Rapid Storage Technology%s\n",
2360 imsm_orom_is_enterprise(orom) ? " enterprise" : "");
2361 if (orom->major_ver || orom->minor_ver || orom->hotfix_ver || orom->build)
2362 printf(" Version : %d.%d.%d.%d\n", orom->major_ver,
2363 orom->minor_ver, orom->hotfix_ver, orom->build);
2364 printf(" RAID Levels :%s%s%s%s%s\n",
2365 imsm_orom_has_raid0(orom) ? " raid0" : "",
2366 imsm_orom_has_raid1(orom) ? " raid1" : "",
2367 imsm_orom_has_raid1e(orom) ? " raid1e" : "",
2368 imsm_orom_has_raid10(orom) ? " raid10" : "",
2369 imsm_orom_has_raid5(orom) ? " raid5" : "");
2370 printf(" Chunk Sizes :%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2371 imsm_orom_has_chunk(orom, 2) ? " 2k" : "",
2372 imsm_orom_has_chunk(orom, 4) ? " 4k" : "",
2373 imsm_orom_has_chunk(orom, 8) ? " 8k" : "",
2374 imsm_orom_has_chunk(orom, 16) ? " 16k" : "",
2375 imsm_orom_has_chunk(orom, 32) ? " 32k" : "",
2376 imsm_orom_has_chunk(orom, 64) ? " 64k" : "",
2377 imsm_orom_has_chunk(orom, 128) ? " 128k" : "",
2378 imsm_orom_has_chunk(orom, 256) ? " 256k" : "",
2379 imsm_orom_has_chunk(orom, 512) ? " 512k" : "",
2380 imsm_orom_has_chunk(orom, 1024*1) ? " 1M" : "",
2381 imsm_orom_has_chunk(orom, 1024*2) ? " 2M" : "",
2382 imsm_orom_has_chunk(orom, 1024*4) ? " 4M" : "",
2383 imsm_orom_has_chunk(orom, 1024*8) ? " 8M" : "",
2384 imsm_orom_has_chunk(orom, 1024*16) ? " 16M" : "",
2385 imsm_orom_has_chunk(orom, 1024*32) ? " 32M" : "",
2386 imsm_orom_has_chunk(orom, 1024*64) ? " 64M" : "");
2387 printf(" 2TB volumes :%s supported\n",
2388 (orom->attr & IMSM_OROM_ATTR_2TB)?"":" not");
2389 printf(" 2TB disks :%s supported\n",
2390 (orom->attr & IMSM_OROM_ATTR_2TB_DISK)?"":" not");
2391 printf(" Max Disks : %d\n", orom->tds);
2392 printf(" Max Volumes : %d per array, %d per %s\n",
2393 orom->vpa, orom->vphba,
2394 imsm_orom_is_nvme(orom) ? "platform" : "controller");
2395 return;
2396 }
2397
2398 static void print_imsm_capability_export(const struct imsm_orom *orom)
2399 {
2400 printf("MD_FIRMWARE_TYPE=imsm\n");
2401 if (orom->major_ver || orom->minor_ver || orom->hotfix_ver || orom->build)
2402 printf("IMSM_VERSION=%d.%d.%d.%d\n", orom->major_ver, orom->minor_ver,
2403 orom->hotfix_ver, orom->build);
2404 printf("IMSM_SUPPORTED_RAID_LEVELS=%s%s%s%s%s\n",
2405 imsm_orom_has_raid0(orom) ? "raid0 " : "",
2406 imsm_orom_has_raid1(orom) ? "raid1 " : "",
2407 imsm_orom_has_raid1e(orom) ? "raid1e " : "",
2408 imsm_orom_has_raid5(orom) ? "raid10 " : "",
2409 imsm_orom_has_raid10(orom) ? "raid5 " : "");
2410 printf("IMSM_SUPPORTED_CHUNK_SIZES=%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2411 imsm_orom_has_chunk(orom, 2) ? "2k " : "",
2412 imsm_orom_has_chunk(orom, 4) ? "4k " : "",
2413 imsm_orom_has_chunk(orom, 8) ? "8k " : "",
2414 imsm_orom_has_chunk(orom, 16) ? "16k " : "",
2415 imsm_orom_has_chunk(orom, 32) ? "32k " : "",
2416 imsm_orom_has_chunk(orom, 64) ? "64k " : "",
2417 imsm_orom_has_chunk(orom, 128) ? "128k " : "",
2418 imsm_orom_has_chunk(orom, 256) ? "256k " : "",
2419 imsm_orom_has_chunk(orom, 512) ? "512k " : "",
2420 imsm_orom_has_chunk(orom, 1024*1) ? "1M " : "",
2421 imsm_orom_has_chunk(orom, 1024*2) ? "2M " : "",
2422 imsm_orom_has_chunk(orom, 1024*4) ? "4M " : "",
2423 imsm_orom_has_chunk(orom, 1024*8) ? "8M " : "",
2424 imsm_orom_has_chunk(orom, 1024*16) ? "16M " : "",
2425 imsm_orom_has_chunk(orom, 1024*32) ? "32M " : "",
2426 imsm_orom_has_chunk(orom, 1024*64) ? "64M " : "");
2427 printf("IMSM_2TB_VOLUMES=%s\n",(orom->attr & IMSM_OROM_ATTR_2TB) ? "yes" : "no");
2428 printf("IMSM_2TB_DISKS=%s\n",(orom->attr & IMSM_OROM_ATTR_2TB_DISK) ? "yes" : "no");
2429 printf("IMSM_MAX_DISKS=%d\n",orom->tds);
2430 printf("IMSM_MAX_VOLUMES_PER_ARRAY=%d\n",orom->vpa);
2431 printf("IMSM_MAX_VOLUMES_PER_CONTROLLER=%d\n",orom->vphba);
2432 }
2433
2434 static int detail_platform_imsm(int verbose, int enumerate_only, char *controller_path)
2435 {
2436 /* There are two components to imsm platform support, the ahci SATA
2437 * controller and the option-rom. To find the SATA controller we
2438 * simply look in /sys/bus/pci/drivers/ahci to see if an ahci
2439 * controller with the Intel vendor id is present. This approach
2440 * allows mdadm to leverage the kernel's ahci detection logic, with the
2441 * caveat that if ahci.ko is not loaded mdadm will not be able to
2442 * detect platform raid capabilities. The option-rom resides in a
2443 * platform "Adapter ROM". We scan for its signature to retrieve the
2444 * platform capabilities. If raid support is disabled in the BIOS the
2445 * option-rom capability structure will not be available.
2446 */
2447 struct sys_dev *list, *hba;
2448 int host_base = 0;
2449 int port_count = 0;
2450 int result=1;
2451
2452 if (enumerate_only) {
2453 if (check_env("IMSM_NO_PLATFORM"))
2454 return 0;
2455 list = find_intel_devices();
2456 if (!list)
2457 return 2;
2458 for (hba = list; hba; hba = hba->next) {
2459 if (find_imsm_capability(hba)) {
2460 result = 0;
2461 break;
2462 }
2463 else
2464 result = 2;
2465 }
2466 return result;
2467 }
2468
2469 list = find_intel_devices();
2470 if (!list) {
2471 if (verbose > 0)
2472 pr_err("no active Intel(R) RAID controller found.\n");
2473 return 2;
2474 } else if (verbose > 0)
2475 print_found_intel_controllers(list);
2476
2477 for (hba = list; hba; hba = hba->next) {
2478 if (controller_path && (compare_paths(hba->path, controller_path) != 0))
2479 continue;
2480 if (!find_imsm_capability(hba)) {
2481 char buf[PATH_MAX];
2482 pr_err("imsm capabilities not found for controller: %s (type %s)\n",
2483 hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path,
2484 get_sys_dev_type(hba->type));
2485 continue;
2486 }
2487 result = 0;
2488 }
2489
2490 if (controller_path && result == 1) {
2491 pr_err("no active Intel(R) RAID controller found under %s\n",
2492 controller_path);
2493 return result;
2494 }
2495
2496 const struct orom_entry *entry;
2497
2498 for (entry = orom_entries; entry; entry = entry->next) {
2499 if (entry->type == SYS_DEV_VMD) {
2500 print_imsm_capability(&entry->orom);
2501 printf(" 3rd party NVMe :%s supported\n",
2502 imsm_orom_has_tpv_support(&entry->orom)?"":" not");
2503 for (hba = list; hba; hba = hba->next) {
2504 if (hba->type == SYS_DEV_VMD) {
2505 char buf[PATH_MAX];
2506 printf(" I/O Controller : %s (%s)\n",
2507 vmd_domain_to_controller(hba, buf), get_sys_dev_type(hba->type));
2508 if (print_vmd_attached_devs(hba)) {
2509 if (verbose > 0)
2510 pr_err("failed to get devices attached to VMD domain.\n");
2511 result |= 2;
2512 }
2513 }
2514 }
2515 printf("\n");
2516 continue;
2517 }
2518
2519 print_imsm_capability(&entry->orom);
2520 if (entry->type == SYS_DEV_NVME) {
2521 for (hba = list; hba; hba = hba->next) {
2522 if (hba->type == SYS_DEV_NVME)
2523 printf(" NVMe Device : %s\n", hba->path);
2524 }
2525 printf("\n");
2526 continue;
2527 }
2528
2529 struct devid_list *devid;
2530 for (devid = entry->devid_list; devid; devid = devid->next) {
2531 hba = device_by_id(devid->devid);
2532 if (!hba)
2533 continue;
2534
2535 printf(" I/O Controller : %s (%s)\n",
2536 hba->path, get_sys_dev_type(hba->type));
2537 if (hba->type == SYS_DEV_SATA) {
2538 host_base = ahci_get_port_count(hba->path, &port_count);
2539 if (ahci_enumerate_ports(hba->path, port_count, host_base, verbose)) {
2540 if (verbose > 0)
2541 pr_err("failed to enumerate ports on SATA controller at %s.\n", hba->pci_id);
2542 result |= 2;
2543 }
2544 }
2545 }
2546 printf("\n");
2547 }
2548
2549 return result;
2550 }
2551
2552 static int export_detail_platform_imsm(int verbose, char *controller_path)
2553 {
2554 struct sys_dev *list, *hba;
2555 int result=1;
2556
2557 list = find_intel_devices();
2558 if (!list) {
2559 if (verbose > 0)
2560 pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_INTEL_DEVICES\n");
2561 result = 2;
2562 return result;
2563 }
2564
2565 for (hba = list; hba; hba = hba->next) {
2566 if (controller_path && (compare_paths(hba->path,controller_path) != 0))
2567 continue;
2568 if (!find_imsm_capability(hba) && verbose > 0) {
2569 char buf[PATH_MAX];
2570 pr_err("IMSM_DETAIL_PLATFORM_ERROR=NO_IMSM_CAPABLE_DEVICE_UNDER_%s\n",
2571 hba->type == SYS_DEV_VMD ? vmd_domain_to_controller(hba, buf) : hba->path);
2572 }
2573 else
2574 result = 0;
2575 }
2576
2577 const struct orom_entry *entry;
2578
2579 for (entry = orom_entries; entry; entry = entry->next) {
2580 if (entry->type == SYS_DEV_VMD) {
2581 for (hba = list; hba; hba = hba->next)
2582 print_imsm_capability_export(&entry->orom);
2583 continue;
2584 }
2585 print_imsm_capability_export(&entry->orom);
2586 }
2587
2588 return result;
2589 }
2590
2591 static int match_home_imsm(struct supertype *st, char *homehost)
2592 {
2593 /* the imsm metadata format does not specify any host
2594 * identification information. We return -1 since we can never
2595 * confirm nor deny whether a given array is "meant" for this
2596 * host. We rely on compare_super and the 'family_num' fields to
2597 * exclude member disks that do not belong, and we rely on
2598 * mdadm.conf to specify the arrays that should be assembled.
2599 * Auto-assembly may still pick up "foreign" arrays.
2600 */
2601
2602 return -1;
2603 }
2604
2605 static void uuid_from_super_imsm(struct supertype *st, int uuid[4])
2606 {
2607 /* The uuid returned here is used for:
2608 * uuid to put into bitmap file (Create, Grow)
2609 * uuid for backup header when saving critical section (Grow)
2610 * comparing uuids when re-adding a device into an array
2611 * In these cases the uuid required is that of the data-array,
2612 * not the device-set.
2613 * uuid to recognise same set when adding a missing device back
2614 * to an array. This is a uuid for the device-set.
2615 *
2616 * For each of these we can make do with a truncated
2617 * or hashed uuid rather than the original, as long as
2618 * everyone agrees.
2619 * In each case the uuid required is that of the data-array,
2620 * not the device-set.
2621 */
2622 /* imsm does not track uuid's so we synthesis one using sha1 on
2623 * - The signature (Which is constant for all imsm array, but no matter)
2624 * - the orig_family_num of the container
2625 * - the index number of the volume
2626 * - the 'serial' number of the volume.
2627 * Hopefully these are all constant.
2628 */
2629 struct intel_super *super = st->sb;
2630
2631 char buf[20];
2632 struct sha1_ctx ctx;
2633 struct imsm_dev *dev = NULL;
2634 __u32 family_num;
2635
2636 /* some mdadm versions failed to set ->orig_family_num, in which
2637 * case fall back to ->family_num. orig_family_num will be
2638 * fixed up with the first metadata update.
2639 */
2640 family_num = super->anchor->orig_family_num;
2641 if (family_num == 0)
2642 family_num = super->anchor->family_num;
2643 sha1_init_ctx(&ctx);
2644 sha1_process_bytes(super->anchor->sig, MPB_SIG_LEN, &ctx);
2645 sha1_process_bytes(&family_num, sizeof(__u32), &ctx);
2646 if (super->current_vol >= 0)
2647 dev = get_imsm_dev(super, super->current_vol);
2648 if (dev) {
2649 __u32 vol = super->current_vol;
2650 sha1_process_bytes(&vol, sizeof(vol), &ctx);
2651 sha1_process_bytes(dev->volume, MAX_RAID_SERIAL_LEN, &ctx);
2652 }
2653 sha1_finish_ctx(&ctx, buf);
2654 memcpy(uuid, buf, 4*4);
2655 }
2656
2657 #if 0
2658 static void
2659 get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p)
2660 {
2661 __u8 *v = get_imsm_version(mpb);
2662 __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH;
2663 char major[] = { 0, 0, 0 };
2664 char minor[] = { 0 ,0, 0 };
2665 char patch[] = { 0, 0, 0 };
2666 char *ver_parse[] = { major, minor, patch };
2667 int i, j;
2668
2669 i = j = 0;
2670 while (*v != '\0' && v < end) {
2671 if (*v != '.' && j < 2)
2672 ver_parse[i][j++] = *v;
2673 else {
2674 i++;
2675 j = 0;
2676 }
2677 v++;
2678 }
2679
2680 *m = strtol(minor, NULL, 0);
2681 *p = strtol(patch, NULL, 0);
2682 }
2683 #endif
2684
2685 static __u32 migr_strip_blocks_resync(struct imsm_dev *dev)
2686 {
2687 /* migr_strip_size when repairing or initializing parity */
2688 struct imsm_map *map = get_imsm_map(dev, MAP_0);
2689 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2690
2691 switch (get_imsm_raid_level(map)) {
2692 case 5:
2693 case 10:
2694 return chunk;
2695 default:
2696 return 128*1024 >> 9;
2697 }
2698 }
2699
2700 static __u32 migr_strip_blocks_rebuild(struct imsm_dev *dev)
2701 {
2702 /* migr_strip_size when rebuilding a degraded disk, no idea why
2703 * this is different than migr_strip_size_resync(), but it's good
2704 * to be compatible
2705 */
2706 struct imsm_map *map = get_imsm_map(dev, MAP_1);
2707 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2708
2709 switch (get_imsm_raid_level(map)) {
2710 case 1:
2711 case 10:
2712 if (map->num_members % map->num_domains == 0)
2713 return 128*1024 >> 9;
2714 else
2715 return chunk;
2716 case 5:
2717 return max((__u32) 64*1024 >> 9, chunk);
2718 default:
2719 return 128*1024 >> 9;
2720 }
2721 }
2722
2723 static __u32 num_stripes_per_unit_resync(struct imsm_dev *dev)
2724 {
2725 struct imsm_map *lo = get_imsm_map(dev, MAP_0);
2726 struct imsm_map *hi = get_imsm_map(dev, MAP_1);
2727 __u32 lo_chunk = __le32_to_cpu(lo->blocks_per_strip);
2728 __u32 hi_chunk = __le32_to_cpu(hi->blocks_per_strip);
2729
2730 return max((__u32) 1, hi_chunk / lo_chunk);
2731 }
2732
2733 static __u32 num_stripes_per_unit_rebuild(struct imsm_dev *dev)
2734 {
2735 struct imsm_map *lo = get_imsm_map(dev, MAP_0);
2736 int level = get_imsm_raid_level(lo);
2737
2738 if (level == 1 || level == 10) {
2739 struct imsm_map *hi = get_imsm_map(dev, MAP_1);
2740
2741 return hi->num_domains;
2742 } else
2743 return num_stripes_per_unit_resync(dev);
2744 }
2745
2746 static __u8 imsm_num_data_members(struct imsm_dev *dev, int second_map)
2747 {
2748 /* named 'imsm_' because raid0, raid1 and raid10
2749 * counter-intuitively have the same number of data disks
2750 */
2751 struct imsm_map *map = get_imsm_map(dev, second_map);
2752
2753 switch (get_imsm_raid_level(map)) {
2754 case 0:
2755 return map->num_members;
2756 break;
2757 case 1:
2758 case 10:
2759 return map->num_members/2;
2760 case 5:
2761 return map->num_members - 1;
2762 default:
2763 dprintf("unsupported raid level\n");
2764 return 0;
2765 }
2766 }
2767
2768 static __u32 parity_segment_depth(struct imsm_dev *dev)
2769 {
2770 struct imsm_map *map = get_imsm_map(dev, MAP_0);
2771 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2772
2773 switch(get_imsm_raid_level(map)) {
2774 case 1:
2775 case 10:
2776 return chunk * map->num_domains;
2777 case 5:
2778 return chunk * map->num_members;
2779 default:
2780 return chunk;
2781 }
2782 }
2783
2784 static __u32 map_migr_block(struct imsm_dev *dev, __u32 block)
2785 {
2786 struct imsm_map *map = get_imsm_map(dev, MAP_1);
2787 __u32 chunk = __le32_to_cpu(map->blocks_per_strip);
2788 __u32 strip = block / chunk;
2789
2790 switch (get_imsm_raid_level(map)) {
2791 case 1:
2792 case 10: {
2793 __u32 vol_strip = (strip * map->num_domains) + 1;
2794 __u32 vol_stripe = vol_strip / map->num_members;
2795
2796 return vol_stripe * chunk + block % chunk;
2797 } case 5: {
2798 __u32 stripe = strip / (map->num_members - 1);
2799
2800 return stripe * chunk + block % chunk;
2801 }
2802 default:
2803 return 0;
2804 }
2805 }
2806
2807 static __u64 blocks_per_migr_unit(struct intel_super *super,
2808 struct imsm_dev *dev)
2809 {
2810 /* calculate the conversion factor between per member 'blocks'
2811 * (md/{resync,rebuild}_start) and imsm migration units, return
2812 * 0 for the 'not migrating' and 'unsupported migration' cases
2813 */
2814 if (!dev->vol.migr_state)
2815 return 0;
2816
2817 switch (migr_type(dev)) {
2818 case MIGR_GEN_MIGR: {
2819 struct migr_record *migr_rec = super->migr_rec;
2820 return __le32_to_cpu(migr_rec->blocks_per_unit);
2821 }
2822 case MIGR_VERIFY:
2823 case MIGR_REPAIR:
2824 case MIGR_INIT: {
2825 struct imsm_map *map = get_imsm_map(dev, MAP_0);
2826 __u32 stripes_per_unit;
2827 __u32 blocks_per_unit;
2828 __u32 parity_depth;
2829 __u32 migr_chunk;
2830 __u32 block_map;
2831 __u32 block_rel;
2832 __u32 segment;
2833 __u32 stripe;
2834 __u8 disks;
2835
2836 /* yes, this is really the translation of migr_units to
2837 * per-member blocks in the 'resync' case
2838 */
2839 stripes_per_unit = num_stripes_per_unit_resync(dev);
2840 migr_chunk = migr_strip_blocks_resync(dev);
2841 disks = imsm_num_data_members(dev, MAP_0);
2842 blocks_per_unit = stripes_per_unit * migr_chunk * disks;
2843 stripe = __le16_to_cpu(map->blocks_per_strip) * disks;
2844 segment = blocks_per_unit / stripe;
2845 block_rel = blocks_per_unit - segment * stripe;
2846 parity_depth = parity_segment_depth(dev);
2847 block_map = map_migr_block(dev, block_rel);
2848 return block_map + parity_depth * segment;
2849 }
2850 case MIGR_REBUILD: {
2851 __u32 stripes_per_unit;
2852 __u32 migr_chunk;
2853
2854 stripes_per_unit = num_stripes_per_unit_rebuild(dev);
2855 migr_chunk = migr_strip_blocks_rebuild(dev);
2856 return migr_chunk * stripes_per_unit;
2857 }
2858 case MIGR_STATE_CHANGE:
2859 default:
2860 return 0;
2861 }
2862 }
2863
2864 static int imsm_level_to_layout(int level)
2865 {
2866 switch (level) {
2867 case 0:
2868 case 1:
2869 return 0;
2870 case 5:
2871 case 6:
2872 return ALGORITHM_LEFT_ASYMMETRIC;
2873 case 10:
2874 return 0x102;
2875 }
2876 return UnSet;
2877 }
2878
2879 /*******************************************************************************
2880 * Function: read_imsm_migr_rec
2881 * Description: Function reads imsm migration record from last sector of disk
2882 * Parameters:
2883 * fd : disk descriptor
2884 * super : metadata info
2885 * Returns:
2886 * 0 : success,
2887 * -1 : fail
2888 ******************************************************************************/
2889 static int read_imsm_migr_rec(int fd, struct intel_super *super)
2890 {
2891 int ret_val = -1;
2892 unsigned int sector_size = super->sector_size;
2893 unsigned long long dsize;
2894
2895 get_dev_size(fd, NULL, &dsize);
2896 if (lseek64(fd, dsize - (sector_size*MIGR_REC_SECTOR_POSITION),
2897 SEEK_SET) < 0) {
2898 pr_err("Cannot seek to anchor block: %s\n",
2899 strerror(errno));
2900 goto out;
2901 }
2902 if ((unsigned int)read(fd, super->migr_rec_buf,
2903 MIGR_REC_BUF_SECTORS*sector_size) !=
2904 MIGR_REC_BUF_SECTORS*sector_size) {
2905 pr_err("Cannot read migr record block: %s\n",
2906 strerror(errno));
2907 goto out;
2908 }
2909 ret_val = 0;
2910 if (sector_size == 4096)
2911 convert_from_4k_imsm_migr_rec(super);
2912
2913 out:
2914 return ret_val;
2915 }
2916
2917 static struct imsm_dev *imsm_get_device_during_migration(
2918 struct intel_super *super)
2919 {
2920
2921 struct intel_dev *dv;
2922
2923 for (dv = super->devlist; dv; dv = dv->next) {
2924 if (is_gen_migration(dv->dev))
2925 return dv->dev;
2926 }
2927 return NULL;
2928 }
2929
2930 /*******************************************************************************
2931 * Function: load_imsm_migr_rec
2932 * Description: Function reads imsm migration record (it is stored at the last
2933 * sector of disk)
2934 * Parameters:
2935 * super : imsm internal array info
2936 * info : general array info
2937 * Returns:
2938 * 0 : success
2939 * -1 : fail
2940 * -2 : no migration in progress
2941 ******************************************************************************/
2942 static int load_imsm_migr_rec(struct intel_super *super, struct mdinfo *info)
2943 {
2944 struct mdinfo *sd;
2945 struct dl *dl;
2946 char nm[30];
2947 int retval = -1;
2948 int fd = -1;
2949 struct imsm_dev *dev;
2950 struct imsm_map *map;
2951 int slot = -1;
2952
2953 /* find map under migration */
2954 dev = imsm_get_device_during_migration(super);
2955 /* nothing to load,no migration in progress?
2956 */
2957 if (dev == NULL)
2958 return -2;
2959
2960 if (info) {
2961 for (sd = info->devs ; sd ; sd = sd->next) {
2962 /* read only from one of the first two slots */
2963 if ((sd->disk.raid_disk < 0) ||
2964 (sd->disk.raid_disk > 1))
2965 continue;
2966
2967 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2968 fd = dev_open(nm, O_RDONLY);
2969 if (fd >= 0)
2970 break;
2971 }
2972 }
2973 if (fd < 0) {
2974 map = get_imsm_map(dev, MAP_0);
2975 for (dl = super->disks; dl; dl = dl->next) {
2976 /* skip spare and failed disks
2977 */
2978 if (dl->index < 0)
2979 continue;
2980 /* read only from one of the first two slots */
2981 if (map)
2982 slot = get_imsm_disk_slot(map, dl->index);
2983 if (map == NULL || slot > 1 || slot < 0)
2984 continue;
2985 sprintf(nm, "%d:%d", dl->major, dl->minor);
2986 fd = dev_open(nm, O_RDONLY);
2987 if (fd >= 0)
2988 break;
2989 }
2990 }
2991 if (fd < 0)
2992 goto out;
2993 retval = read_imsm_migr_rec(fd, super);
2994
2995 out:
2996 if (fd >= 0)
2997 close(fd);
2998 return retval;
2999 }
3000
3001 /*******************************************************************************
3002 * function: imsm_create_metadata_checkpoint_update
3003 * Description: It creates update for checkpoint change.
3004 * Parameters:
3005 * super : imsm internal array info
3006 * u : pointer to prepared update
3007 * Returns:
3008 * Uptate length.
3009 * If length is equal to 0, input pointer u contains no update
3010 ******************************************************************************/
3011 static int imsm_create_metadata_checkpoint_update(
3012 struct intel_super *super,
3013 struct imsm_update_general_migration_checkpoint **u)
3014 {
3015
3016 int update_memory_size = 0;
3017
3018 dprintf("(enter)\n");
3019
3020 if (u == NULL)
3021 return 0;
3022 *u = NULL;
3023
3024 /* size of all update data without anchor */
3025 update_memory_size =
3026 sizeof(struct imsm_update_general_migration_checkpoint);
3027
3028 *u = xcalloc(1, update_memory_size);
3029 if (*u == NULL) {
3030 dprintf("error: cannot get memory\n");
3031 return 0;
3032 }
3033 (*u)->type = update_general_migration_checkpoint;
3034 (*u)->curr_migr_unit = __le32_to_cpu(super->migr_rec->curr_migr_unit);
3035 dprintf("prepared for %u\n", (*u)->curr_migr_unit);
3036
3037 return update_memory_size;
3038 }
3039
3040 static void imsm_update_metadata_locally(struct supertype *st,
3041 void *buf, int len);
3042
3043 /*******************************************************************************
3044 * Function: write_imsm_migr_rec
3045 * Description: Function writes imsm migration record
3046 * (at the last sector of disk)
3047 * Parameters:
3048 * super : imsm internal array info
3049 * Returns:
3050 * 0 : success
3051 * -1 : if fail
3052 ******************************************************************************/
3053 static int write_imsm_migr_rec(struct supertype *st)
3054 {
3055 struct intel_super *super = st->sb;
3056 unsigned int sector_size = super->sector_size;
3057 unsigned long long dsize;
3058 char nm[30];
3059 int fd = -1;
3060 int retval = -1;
3061 struct dl *sd;
3062 int len;
3063 struct imsm_update_general_migration_checkpoint *u;
3064 struct imsm_dev *dev;
3065 struct imsm_map *map;
3066
3067 /* find map under migration */
3068 dev = imsm_get_device_during_migration(super);
3069 /* if no migration, write buffer anyway to clear migr_record
3070 * on disk based on first available device
3071 */
3072 if (dev == NULL)
3073 dev = get_imsm_dev(super, super->current_vol < 0 ? 0 :
3074 super->current_vol);
3075
3076 map = get_imsm_map(dev, MAP_0);
3077
3078 if (sector_size == 4096)
3079 convert_to_4k_imsm_migr_rec(super);
3080 for (sd = super->disks ; sd ; sd = sd->next) {
3081 int slot = -1;
3082
3083 /* skip failed and spare devices */
3084 if (sd->index < 0)
3085 continue;
3086 /* write to 2 first slots only */
3087 if (map)
3088 slot = get_imsm_disk_slot(map, sd->index);
3089 if (map == NULL || slot > 1 || slot < 0)
3090 continue;
3091
3092 sprintf(nm, "%d:%d", sd->major, sd->minor);
3093 fd = dev_open(nm, O_RDWR);
3094 if (fd < 0)
3095 continue;
3096 get_dev_size(fd, NULL, &dsize);
3097 if (lseek64(fd, dsize - (MIGR_REC_SECTOR_POSITION*sector_size),
3098 SEEK_SET) < 0) {
3099 pr_err("Cannot seek to anchor block: %s\n",
3100 strerror(errno));
3101 goto out;
3102 }
3103 if ((unsigned int)write(fd, super->migr_rec_buf,
3104 MIGR_REC_BUF_SECTORS*sector_size) !=
3105 MIGR_REC_BUF_SECTORS*sector_size) {
3106 pr_err("Cannot write migr record block: %s\n",
3107 strerror(errno));
3108 goto out;
3109 }
3110 close(fd);
3111 fd = -1;
3112 }
3113 if (sector_size == 4096)
3114 convert_from_4k_imsm_migr_rec(super);
3115 /* update checkpoint information in metadata */
3116 len = imsm_create_metadata_checkpoint_update(super, &u);
3117 if (len <= 0) {
3118 dprintf("imsm: Cannot prepare update\n");
3119 goto out;
3120 }
3121 /* update metadata locally */
3122 imsm_update_metadata_locally(st, u, len);
3123 /* and possibly remotely */
3124 if (st->update_tail) {
3125 append_metadata_update(st, u, len);
3126 /* during reshape we do all work inside metadata handler
3127 * manage_reshape(), so metadata update has to be triggered
3128 * insida it
3129 */
3130 flush_metadata_updates(st);
3131 st->update_tail = &st->updates;
3132 } else
3133 free(u);
3134
3135 retval = 0;
3136 out:
3137 if (fd >= 0)
3138 close(fd);
3139 return retval;
3140 }
3141
3142 /* spare/missing disks activations are not allowe when
3143 * array/container performs reshape operation, because
3144 * all arrays in container works on the same disks set
3145 */
3146 int imsm_reshape_blocks_arrays_changes(struct intel_super *super)
3147 {
3148 int rv = 0;
3149 struct intel_dev *i_dev;
3150 struct imsm_dev *dev;
3151
3152 /* check whole container
3153 */
3154 for (i_dev = super->devlist; i_dev; i_dev = i_dev->next) {
3155 dev = i_dev->dev;
3156 if (is_gen_migration(dev)) {
3157 /* No repair during any migration in container
3158 */
3159 rv = 1;
3160 break;
3161 }
3162 }
3163 return rv;
3164 }
3165 static unsigned long long imsm_component_size_aligment_check(int level,
3166 int chunk_size,
3167 unsigned int sector_size,
3168 unsigned long long component_size)
3169 {
3170 unsigned int component_size_alligment;
3171
3172 /* check component size aligment
3173 */
3174 component_size_alligment = component_size % (chunk_size/sector_size);
3175
3176 dprintf("(Level: %i, chunk_size = %i, component_size = %llu), component_size_alligment = %u\n",
3177 level, chunk_size, component_size,
3178 component_size_alligment);
3179
3180 if (component_size_alligment && (level != 1) && (level != UnSet)) {
3181 dprintf("imsm: reported component size alligned from %llu ",
3182 component_size);
3183 component_size -= component_size_alligment;
3184 dprintf_cont("to %llu (%i).\n",
3185 component_size, component_size_alligment);
3186 }
3187
3188 return component_size;
3189 }
3190
3191 static unsigned long long get_ppl_sector(struct intel_super *super, int dev_idx)
3192 {
3193 struct imsm_dev *dev = get_imsm_dev(super, dev_idx);
3194 struct imsm_map *map = get_imsm_map(dev, MAP_0);
3195
3196 return pba_of_lba0(map) +
3197 (num_data_stripes(map) * map->blocks_per_strip);
3198 }
3199
3200 static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info, char *dmap)
3201 {
3202 struct intel_super *super = st->sb;
3203 struct migr_record *migr_rec = super->migr_rec;
3204 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
3205 struct imsm_map *map = get_imsm_map(dev, MAP_0);
3206 struct imsm_map *prev_map = get_imsm_map(dev, MAP_1);
3207 struct imsm_map *map_to_analyse = map;
3208 struct dl *dl;
3209 int map_disks = info->array.raid_disks;
3210
3211 memset(info, 0, sizeof(*info));
3212 if (prev_map)
3213 map_to_analyse = prev_map;
3214
3215 dl = super->current_disk;
3216
3217 info->container_member = super->current_vol;
3218 info->array.raid_disks = map->num_members;
3219 info->array.level = get_imsm_raid_level(map_to_analyse);
3220 info->array.layout = imsm_level_to_layout(info->array.level);
3221 info->array.md_minor = -1;
3222 info->array.ctime = 0;
3223 info->array.utime = 0;
3224 info->array.chunk_size =
3225 __le16_to_cpu(map_to_analyse->blocks_per_strip) << 9;
3226 info->array.state = !(dev->vol.dirty & RAIDVOL_DIRTY);
3227 info->custom_array_size = __le32_to_cpu(dev->size_high);
3228 info->custom_array_size <<= 32;
3229 info->custom_array_size |= __le32_to_cpu(dev->size_low);
3230 info->recovery_blocked = imsm_reshape_blocks_arrays_changes(st->sb);
3231
3232 if (is_gen_migration(dev)) {
3233 info->reshape_active = 1;
3234 info->new_level = get_imsm_raid_level(map);
3235 info->new_layout = imsm_level_to_layout(info->new_level);
3236 info->new_chunk = __le16_to_cpu(map->blocks_per_strip) << 9;
3237 info->delta_disks = map->num_members - prev_map->num_members;
3238 if (info->delta_disks) {
3239 /* this needs to be applied to every array
3240 * in the container.
3241 */
3242 info->reshape_active = CONTAINER_RESHAPE;
3243 }
3244 /* We shape information that we give to md might have to be
3245 * modify to cope with md's requirement for reshaping arrays.
3246 * For example, when reshaping a RAID0, md requires it to be
3247 * presented as a degraded RAID4.
3248 * Also if a RAID0 is migrating to a RAID5 we need to specify
3249 * the array as already being RAID5, but the 'before' layout
3250 * is a RAID4-like layout.
3251 */
3252 switch (info->array.level) {
3253 case 0:
3254 switch(info->new_level) {
3255 case 0:
3256 /* conversion is happening as RAID4 */
3257 info->array.level = 4;
3258 info->array.raid_disks += 1;
3259 break;
3260 case 5:
3261 /* conversion is happening as RAID5 */
3262 info->array.level = 5;
3263 info->array.layout = ALGORITHM_PARITY_N;
3264 info->delta_disks -= 1;
3265 break;
3266 default:
3267 /* FIXME error message */
3268 info->array.level = UnSet;
3269 break;
3270 }
3271 break;
3272 }
3273 } else {
3274 info->new_level = UnSet;
3275 info->new_layout = UnSet;
3276 info->new_chunk = info->array.chunk_size;
3277 info->delta_disks = 0;
3278 }
3279
3280 if (dl) {
3281 info->disk.major = dl->major;
3282 info->disk.minor = dl->minor;
3283 info->disk.number = dl->index;
3284 info->disk.raid_disk = get_imsm_disk_slot(map_to_analyse,
3285 dl->index);
3286 }
3287
3288 info->data_offset = pba_of_lba0(map_to_analyse);
3289
3290 if (info->array.level == 5) {
3291 info->component_size = num_data_stripes(map_to_analyse) *
3292 map_to_analyse->blocks_per_strip;
3293 } else {
3294 info->component_size = blocks_per_member(map_to_analyse);
3295 }
3296
3297 info->component_size = imsm_component_size_aligment_check(
3298 info->array.level,
3299 info->array.chunk_size,
3300 super->sector_size,
3301 info->component_size);
3302 info->bb.supported = 1;
3303
3304 memset(info->uuid, 0, sizeof(info->uuid));
3305 info->recovery_start = MaxSector;
3306
3307 if (info->array.level == 5 &&
3308 (dev->rwh_policy == RWH_DISTRIBUTED ||
3309 dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)) {
3310 info->consistency_policy = CONSISTENCY_POLICY_PPL;
3311 info->ppl_sector = get_ppl_sector(super, super->current_vol);
3312 if (dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
3313 info->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
3314 else
3315 info->ppl_size = (PPL_HEADER_SIZE + PPL_ENTRY_SPACE)
3316 >> 9;
3317 } else if (info->array.level <= 0) {
3318 info->consistency_policy = CONSISTENCY_POLICY_NONE;
3319 } else {
3320 info->consistency_policy = CONSISTENCY_POLICY_RESYNC;
3321 }
3322
3323 info->reshape_progress = 0;
3324 info->resync_start = MaxSector;
3325 if ((map_to_analyse->map_state == IMSM_T_STATE_UNINITIALIZED ||
3326 !(info->array.state & 1)) &&
3327 imsm_reshape_blocks_arrays_changes(super) == 0) {
3328 info->resync_start = 0;
3329 }
3330 if (dev->vol.migr_state) {
3331 switch (migr_type(dev)) {
3332 case MIGR_REPAIR:
3333 case MIGR_INIT: {
3334 __u64 blocks_per_unit = blocks_per_migr_unit(super,
3335 dev);
3336 __u64 units = __le32_to_cpu(dev->vol.curr_migr_unit);
3337
3338 info->resync_start = blocks_per_unit * units;
3339 break;
3340 }
3341 case MIGR_GEN_MIGR: {
3342 __u64 blocks_per_unit = blocks_per_migr_unit(super,
3343 dev);
3344 __u64 units = __le32_to_cpu(migr_rec->curr_migr_unit);
3345 unsigned long long array_blocks;
3346 int used_disks;
3347
3348 if (__le32_to_cpu(migr_rec->ascending_migr) &&
3349 (units <
3350 (__le32_to_cpu(migr_rec->num_migr_units)-1)) &&
3351 (super->migr_rec->rec_status ==
3352 __cpu_to_le32(UNIT_SRC_IN_CP_AREA)))
3353 units++;
3354
3355 info->reshape_progress = blocks_per_unit * units;
3356
3357 dprintf("IMSM: General Migration checkpoint : %llu (%llu) -> read reshape progress : %llu\n",
3358 (unsigned long long)units,
3359 (unsigned long long)blocks_per_unit,
3360 info->reshape_progress);
3361
3362 used_disks = imsm_num_data_members(dev, MAP_1);
3363 if (used_disks > 0) {
3364 array_blocks = blocks_per_member(map) *
3365 used_disks;
3366 info->custom_array_size =
3367 round_size_to_mb(array_blocks,
3368 used_disks);
3369
3370 }
3371 }
3372 case MIGR_VERIFY:
3373 /* we could emulate the checkpointing of
3374 * 'sync_action=check' migrations, but for now
3375 * we just immediately complete them
3376 */
3377 case MIGR_REBUILD:
3378 /* this is handled by container_content_imsm() */
3379 case MIGR_STATE_CHANGE:
3380 /* FIXME handle other migrations */
3381 default:
3382 /* we are not dirty, so... */
3383 info->resync_start = MaxSector;
3384 }
3385 }
3386
3387 strncpy(info->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
3388 info->name[MAX_RAID_SERIAL_LEN] = 0;
3389
3390 info->array.major_version = -1;
3391 info->array.minor_version = -2;
3392 sprintf(info->text_version, "/%s/%d", st->container_devnm, info->container_member);
3393 info->safe_mode_delay = 4000; /* 4 secs like the Matrix driver */
3394 uuid_from_super_imsm(st, info->uuid);
3395
3396 if (dmap) {
3397 int i, j;
3398 for (i=0; i<map_disks; i++) {
3399 dmap[i] = 0;
3400 if (i < info->array.raid_disks) {
3401 struct imsm_disk *dsk;
3402 j = get_imsm_disk_idx(dev, i, MAP_X);
3403 dsk = get_imsm_disk(super, j);
3404 if (dsk && (dsk->status & CONFIGURED_DISK))
3405 dmap[i] = 1;
3406 }
3407 }
3408 }
3409 }
3410
3411 static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev,
3412 int failed, int look_in_map);
3413
3414 static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev,
3415 int look_in_map);
3416
3417 static void manage_second_map(struct intel_super *super, struct imsm_dev *dev)
3418 {
3419 if (is_gen_migration(dev)) {
3420 int failed;
3421 __u8 map_state;
3422 struct imsm_map *map2 = get_imsm_map(dev, MAP_1);
3423
3424 failed = imsm_count_failed(super, dev, MAP_1);
3425 map_state = imsm_check_degraded(super, dev, failed, MAP_1);
3426 if (map2->map_state != map_state) {
3427 map2->map_state = map_state;
3428 super->updates_pending++;
3429 }
3430 }
3431 }
3432
3433 static struct imsm_disk *get_imsm_missing(struct intel_super *super, __u8 index)
3434 {
3435 struct dl *d;
3436
3437 for (d = super->missing; d; d = d->next)
3438 if (d->index == index)
3439 return &d->disk;
3440 return NULL;
3441 }
3442
3443 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info, char *map)
3444 {
3445 struct intel_super *super = st->sb;
3446 struct imsm_disk *disk;
3447 int map_disks = info->array.raid_disks;
3448 int max_enough = -1;
3449 int i;
3450 struct imsm_super *mpb;
3451
3452 if (super->current_vol >= 0) {
3453 getinfo_super_imsm_volume(st, info, map);
3454 return;
3455 }
3456 memset(info, 0, sizeof(*info));
3457
3458 /* Set raid_disks to zero so that Assemble will always pull in valid
3459 * spares
3460 */
3461 info->array.raid_disks = 0;
3462 info->array.level = LEVEL_CONTAINER;
3463 info->array.layout = 0;
3464 info->array.md_minor = -1;
3465 info->array.ctime = 0; /* N/A for imsm */
3466 info->array.utime = 0;
3467 info->array.chunk_size = 0;
3468
3469 info->disk.major = 0;
3470 info->disk.minor = 0;
3471 info->disk.raid_disk = -1;
3472 info->reshape_active = 0;
3473 info->array.major_version = -1;
3474 info->array.minor_version = -2;
3475 strcpy(info->text_version, "imsm");
3476 info->safe_mode_delay = 0;
3477 info->disk.number = -1;
3478 info->disk.state = 0;
3479 info->name[0] = 0;
3480 info->recovery_start = MaxSector;
3481 info->recovery_blocked = imsm_reshape_blocks_arrays_changes(st->sb);
3482 info->bb.supported = 1;
3483
3484 /* do we have the all the insync disks that we expect? */
3485 mpb = super->anchor;
3486 info->events = __le32_to_cpu(mpb->generation_num);
3487
3488 for (i = 0; i < mpb->num_raid_devs; i++) {
3489 struct imsm_dev *dev = get_imsm_dev(super, i);
3490 int failed, enough, j, missing = 0;
3491 struct imsm_map *map;
3492 __u8 state;
3493
3494 failed = imsm_count_failed(super, dev, MAP_0);
3495 state = imsm_check_degraded(super, dev, failed, MAP_0);
3496 map = get_imsm_map(dev, MAP_0);
3497
3498 /* any newly missing disks?
3499 * (catches single-degraded vs double-degraded)
3500 */
3501 for (j = 0; j < map->num_members; j++) {
3502 __u32 ord = get_imsm_ord_tbl_ent(dev, j, MAP_0);
3503 __u32 idx = ord_to_idx(ord);
3504
3505 if (!(ord & IMSM_ORD_REBUILD) &&
3506 get_imsm_missing(super, idx)) {
3507 missing = 1;
3508 break;
3509 }
3510 }
3511
3512 if (state == IMSM_T_STATE_FAILED)
3513 enough = -1;
3514 else if (state == IMSM_T_STATE_DEGRADED &&
3515 (state != map->map_state || missing))
3516 enough = 0;
3517 else /* we're normal, or already degraded */
3518 enough = 1;
3519 if (is_gen_migration(dev) && missing) {
3520 /* during general migration we need all disks
3521 * that process is running on.
3522 * No new missing disk is allowed.
3523 */
3524 max_enough = -1;
3525 enough = -1;
3526 /* no more checks necessary
3527 */
3528 break;
3529 }
3530 /* in the missing/failed disk case check to see
3531 * if at least one array is runnable
3532 */
3533 max_enough = max(max_enough, enough);
3534 }
3535 dprintf("enough: %d\n", max_enough);
3536 info->container_enough = max_enough;
3537
3538 if (super->disks) {
3539 __u32 reserved = imsm_reserved_sectors(super, super->disks);
3540
3541 disk = &super->disks->disk;
3542 info->data_offset = total_blocks(&super->disks->disk) - reserved;
3543 info->component_size = reserved;
3544 info->disk.state = is_configured(disk) ? (1 << MD_DISK_ACTIVE) : 0;
3545 /* we don't change info->disk.raid_disk here because
3546 * this state will be finalized in mdmon after we have
3547 * found the 'most fresh' version of the metadata
3548 */
3549 info->disk.state |= is_failed(disk) ? (1 << MD_DISK_FAULTY) : 0;
3550 info->disk.state |= (is_spare(disk) || is_journal(disk)) ?
3551 0 : (1 << MD_DISK_SYNC);
3552 }
3553
3554 /* only call uuid_from_super_imsm when this disk is part of a populated container,
3555 * ->compare_super may have updated the 'num_raid_devs' field for spares
3556 */
3557 if (info->disk.state & (1 << MD_DISK_SYNC) || super->anchor->num_raid_devs)
3558 uuid_from_super_imsm(st, info->uuid);
3559 else
3560 memcpy(info->uuid, uuid_zero, sizeof(uuid_zero));
3561
3562 /* I don't know how to compute 'map' on imsm, so use safe default */
3563 if (map) {
3564 int i;
3565 for (i = 0; i < map_disks; i++)
3566 map[i] = 1;
3567 }
3568
3569 }
3570
3571 /* allocates memory and fills disk in mdinfo structure
3572 * for each disk in array */
3573 struct mdinfo *getinfo_super_disks_imsm(struct supertype *st)
3574 {
3575 struct mdinfo *mddev;
3576 struct intel_super *super = st->sb;
3577 struct imsm_disk *disk;
3578 int count = 0;
3579 struct dl *dl;
3580 if (!super || !super->disks)
3581 return NULL;
3582 dl = super->disks;
3583 mddev = xcalloc(1, sizeof(*mddev));
3584 while (dl) {
3585 struct mdinfo *tmp;
3586 disk = &dl->disk;
3587 tmp = xcalloc(1, sizeof(*tmp));
3588 if (mddev->devs)
3589 tmp->next = mddev->devs;
3590 mddev->devs = tmp;
3591 tmp->disk.number = count++;
3592 tmp->disk.major = dl->major;
3593 tmp->disk.minor = dl->minor;
3594 tmp->disk.state = is_configured(disk) ?
3595 (1 << MD_DISK_ACTIVE) : 0;
3596 tmp->disk.state |= is_failed(disk) ? (1 << MD_DISK_FAULTY) : 0;
3597 tmp->disk.state |= is_spare(disk) ? 0 : (1 << MD_DISK_SYNC);
3598 tmp->disk.raid_disk = -1;
3599 dl = dl->next;
3600 }
3601 return mddev;
3602 }
3603
3604 static int update_super_imsm(struct supertype *st, struct mdinfo *info,
3605 char *update, char *devname, int verbose,
3606 int uuid_set, char *homehost)
3607 {
3608 /* For 'assemble' and 'force' we need to return non-zero if any
3609 * change was made. For others, the return value is ignored.
3610 * Update options are:
3611 * force-one : This device looks a bit old but needs to be included,
3612 * update age info appropriately.
3613 * assemble: clear any 'faulty' flag to allow this device to
3614 * be assembled.
3615 * force-array: Array is degraded but being forced, mark it clean
3616 * if that will be needed to assemble it.
3617 *
3618 * newdev: not used ????
3619 * grow: Array has gained a new device - this is currently for
3620 * linear only
3621 * resync: mark as dirty so a resync will happen.
3622 * name: update the name - preserving the homehost
3623 * uuid: Change the uuid of the array to match watch is given
3624 *
3625 * Following are not relevant for this imsm:
3626 * sparc2.2 : update from old dodgey metadata
3627 * super-minor: change the preferred_minor number
3628 * summaries: update redundant counters.
3629 * homehost: update the recorded homehost
3630 * _reshape_progress: record new reshape_progress position.
3631 */
3632 int rv = 1;
3633 struct intel_super *super = st->sb;
3634 struct imsm_super *mpb;
3635
3636 /* we can only update container info */
3637 if (!super || super->current_vol >= 0 || !super->anchor)
3638 return 1;
3639
3640 mpb = super->anchor;
3641
3642 if (strcmp(update, "uuid") == 0) {
3643 /* We take this to mean that the family_num should be updated.
3644 * However that is much smaller than the uuid so we cannot really
3645 * allow an explicit uuid to be given. And it is hard to reliably
3646 * know if one was.
3647 * So if !uuid_set we know the current uuid is random and just used
3648 * the first 'int' and copy it to the other 3 positions.
3649 * Otherwise we require the 4 'int's to be the same as would be the
3650 * case if we are using a random uuid. So an explicit uuid will be
3651 * accepted as long as all for ints are the same... which shouldn't hurt
3652 */
3653 if (!uuid_set) {
3654 info->uuid[1] = info->uuid[2] = info->uuid[3] = info->uuid[0];
3655 rv = 0;
3656 } else {
3657 if (info->uuid[0] != info->uuid[1] ||
3658 info->uuid[1] != info->uuid[2] ||
3659 info->uuid[2] != info->uuid[3])
3660 rv = -1;
3661 else
3662 rv = 0;
3663 }
3664 if (rv == 0)
3665 mpb->orig_family_num = info->uuid[0];
3666 } else if (strcmp(update, "assemble") == 0)
3667 rv = 0;
3668 else
3669 rv = -1;
3670
3671 /* successful update? recompute checksum */
3672 if (rv == 0)
3673 mpb->check_sum = __le32_to_cpu(__gen_imsm_checksum(mpb));
3674
3675 return rv;
3676 }
3677
3678 static size_t disks_to_mpb_size(int disks)
3679 {
3680 size_t size;
3681
3682 size = sizeof(struct imsm_super);
3683 size += (disks - 1) * sizeof(struct imsm_disk);
3684 size += 2 * sizeof(struct imsm_dev);
3685 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
3686 size += (4 - 2) * sizeof(struct imsm_map);
3687 /* 4 possible disk_ord_tbl's */
3688 size += 4 * (disks - 1) * sizeof(__u32);
3689 /* maximum bbm log */
3690 size += sizeof(struct bbm_log);
3691
3692 return size;
3693 }
3694
3695 static __u64 avail_size_imsm(struct supertype *st, __u64 devsize,
3696 unsigned long long data_offset)
3697 {
3698 if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS))
3699 return 0;
3700
3701 return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
3702 }
3703
3704 static void free_devlist(struct intel_super *super)
3705 {
3706 struct intel_dev *dv;
3707
3708 while (super->devlist) {
3709 dv = super->devlist->next;
3710 free(super->devlist->dev);
3711 free(super->devlist);
3712 super->devlist = dv;
3713 }
3714 }
3715
3716 static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src)
3717 {
3718 memcpy(dest, src, sizeof_imsm_dev(src, 0));
3719 }
3720
3721 static int compare_super_imsm(struct supertype *st, struct supertype *tst)
3722 {
3723 /*
3724 * return:
3725 * 0 same, or first was empty, and second was copied
3726 * 1 second had wrong number
3727 * 2 wrong uuid
3728 * 3 wrong other info
3729 */
3730 struct intel_super *first = st->sb;
3731 struct intel_super *sec = tst->sb;
3732
3733 if (!first) {
3734 st->sb = tst->sb;
3735 tst->sb = NULL;
3736 return 0;
3737 }
3738 /* in platform dependent environment test if the disks
3739 * use the same Intel hba
3740 * If not on Intel hba at all, allow anything.
3741 */
3742 if (!check_env("IMSM_NO_PLATFORM") && first->hba && sec->hba) {
3743 if (first->hba->type != sec->hba->type) {
3744 fprintf(stderr,
3745 "HBAs of devices do not match %s != %s\n",
3746 get_sys_dev_type(first->hba->type),
3747 get_sys_dev_type(sec->hba->type));
3748 return 3;
3749 }
3750 if (first->orom != sec->orom) {
3751 fprintf(stderr,
3752 "HBAs of devices do not match %s != %s\n",
3753 first->hba->pci_id, sec->hba->pci_id);
3754 return 3;
3755 }
3756 }
3757
3758 /* if an anchor does not have num_raid_devs set then it is a free
3759 * floating spare
3760 */
3761 if (first->anchor->num_raid_devs > 0 &&
3762 sec->anchor->num_raid_devs > 0) {
3763 /* Determine if these disks might ever have been
3764 * related. Further disambiguation can only take place
3765 * in load_super_imsm_all
3766 */
3767 __u32 first_family = first->anchor->orig_family_num;
3768 __u32 sec_family = sec->anchor->orig_family_num;
3769
3770 if (memcmp(first->anchor->sig, sec->anchor->sig,
3771 MAX_SIGNATURE_LENGTH) != 0)
3772 return 3;
3773
3774 if (first_family == 0)
3775 first_family = first->anchor->family_num;
3776 if (sec_family == 0)
3777 sec_family = sec->anchor->family_num;
3778
3779 if (first_family != sec_family)
3780 return 3;
3781
3782 }
3783
3784 /* if 'first' is a spare promote it to a populated mpb with sec's
3785 * family number
3786 */
3787 if (first->anchor->num_raid_devs == 0 &&
3788 sec->anchor->num_raid_devs > 0) {
3789 int i;
3790 struct intel_dev *dv;
3791 struct imsm_dev *dev;
3792
3793 /* we need to copy raid device info from sec if an allocation
3794 * fails here we don't associate the spare
3795 */
3796 for (i = 0; i < sec->anchor->num_raid_devs; i++) {
3797 dv = xmalloc(sizeof(*dv));
3798 dev = xmalloc(sizeof_imsm_dev(get_imsm_dev(sec, i), 1));
3799 dv->dev = dev;
3800 dv->index = i;
3801 dv->next = first->devlist;
3802 first->devlist = dv;
3803 }
3804 if (i < sec->anchor->num_raid_devs) {
3805 /* allocation failure */
3806 free_devlist(first);
3807 pr_err("imsm: failed to associate spare\n");
3808 return 3;
3809 }
3810 first->anchor->num_raid_devs = sec->anchor->num_raid_devs;
3811 first->anchor->orig_family_num = sec->anchor->orig_family_num;
3812 first->anchor->family_num = sec->anchor->family_num;
3813 memcpy(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH);
3814 for (i = 0; i < sec->anchor->num_raid_devs; i++)
3815 imsm_copy_dev(get_imsm_dev(first, i), get_imsm_dev(sec, i));
3816 }
3817
3818 return 0;
3819 }
3820
3821 static void fd2devname(int fd, char *name)
3822 {
3823 struct stat st;
3824 char path[256];
3825 char dname[PATH_MAX];
3826 char *nm;
3827 int rv;
3828
3829 name[0] = '\0';
3830 if (fstat(fd, &st) != 0)
3831 return;
3832 sprintf(path, "/sys/dev/block/%d:%d",
3833 major(st.st_rdev), minor(st.st_rdev));
3834
3835 rv = readlink(path, dname, sizeof(dname)-1);
3836 if (rv <= 0)
3837 return;
3838
3839 dname[rv] = '\0';
3840 nm = strrchr(dname, '/');
3841 if (nm) {
3842 nm++;
3843 snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
3844 }
3845 }
3846
3847 static int nvme_get_serial(int fd, void *buf, size_t buf_len)
3848 {
3849 char path[60];
3850 char *name = fd2kname(fd);
3851
3852 if (!name)
3853 return 1;
3854
3855 if (strncmp(name, "nvme", 4) != 0)
3856 return 1;
3857
3858 snprintf(path, sizeof(path) - 1, "/sys/block/%s/device/serial", name);
3859
3860 return load_sys(path, buf, buf_len);
3861 }
3862
3863 extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
3864
3865 static int imsm_read_serial(int fd, char *devname,
3866 __u8 serial[MAX_RAID_SERIAL_LEN])
3867 {
3868 char buf[50];
3869 int rv;
3870 int len;
3871 char *dest;
3872 char *src;
3873 unsigned int i;
3874
3875 memset(buf, 0, sizeof(buf));
3876
3877 rv = nvme_get_serial(fd, buf, sizeof(buf));
3878
3879 if (rv)
3880 rv = scsi_get_serial(fd, buf, sizeof(buf));
3881
3882 if (rv && check_env("IMSM_DEVNAME_AS_SERIAL")) {
3883 memset(serial, 0, MAX_RAID_SERIAL_LEN);
3884 fd2devname(fd, (char *) serial);
3885 return 0;
3886 }
3887
3888 if (rv != 0) {
3889 if (devname)
3890 pr_err("Failed to retrieve serial for %s\n",
3891 devname);
3892 return rv;
3893 }
3894
3895 /* trim all whitespace and non-printable characters and convert
3896 * ':' to ';'
3897 */
3898 for (i = 0, dest = buf; i < sizeof(buf) && buf[i]; i++) {
3899 src = &buf[i];
3900 if (*src > 0x20) {
3901 /* ':' is reserved for use in placeholder serial
3902 * numbers for missing disks
3903 */
3904 if (*src == ':')
3905 *dest++ = ';';
3906 else
3907 *dest++ = *src;
3908 }
3909 }
3910 len = dest - buf;
3911 dest = buf;
3912
3913 /* truncate leading characters */
3914 if (len > MAX_RAID_SERIAL_LEN) {
3915 dest += len - MAX_RAID_SERIAL_LEN;
3916 len = MAX_RAID_SERIAL_LEN;
3917 }
3918
3919 memset(serial, 0, MAX_RAID_SERIAL_LEN);
3920 memcpy(serial, dest, len);
3921
3922 return 0;
3923 }
3924
3925 static int serialcmp(__u8 *s1, __u8 *s2)
3926 {
3927 return strncmp((char *) s1, (char *) s2, MAX_RAID_SERIAL_LEN);
3928 }
3929
3930 static void serialcpy(__u8 *dest, __u8 *src)
3931 {
3932 strncpy((char *) dest, (char *) src, MAX_RAID_SERIAL_LEN);
3933 }
3934
3935 static struct dl *serial_to_dl(__u8 *serial, struct intel_super *super)
3936 {
3937 struct dl *dl;
3938
3939 for (dl = super->disks; dl; dl = dl->next)
3940 if (serialcmp(dl->serial, serial) == 0)
3941 break;
3942
3943 return dl;
3944 }
3945
3946 static struct imsm_disk *
3947 __serial_to_disk(__u8 *serial, struct imsm_super *mpb, int *idx)
3948 {
3949 int i;
3950
3951 for (i = 0; i < mpb->num_disks; i++) {
3952 struct imsm_disk *disk = __get_imsm_disk(mpb, i);
3953
3954 if (serialcmp(disk->serial, serial) == 0) {
3955 if (idx)
3956 *idx = i;
3957 return disk;
3958 }
3959 }
3960
3961 return NULL;
3962 }
3963
3964 static int
3965 load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
3966 {
3967 struct imsm_disk *disk;
3968 struct dl *dl;
3969 struct stat stb;
3970 int rv;
3971 char name[40];
3972 __u8 serial[MAX_RAID_SERIAL_LEN];
3973
3974 rv = imsm_read_serial(fd, devname, serial);
3975
3976 if (rv != 0)
3977 return 2;
3978
3979 dl = xcalloc(1, sizeof(*dl));
3980
3981 fstat(fd, &stb);
3982 dl->major = major(stb.st_rdev);
3983 dl->minor = minor(stb.st_rdev);
3984 dl->next = super->disks;
3985 dl->fd = keep_fd ? fd : -1;
3986 assert(super->disks == NULL);
3987 super->disks = dl;
3988 serialcpy(dl->serial, serial);
3989 dl->index = -2;
3990 dl->e = NULL;
3991 fd2devname(fd, name);
3992 if (devname)
3993 dl->devname = xstrdup(devname);
3994 else
3995 dl->devname = xstrdup(name);
3996
3997 /* look up this disk's index in the current anchor */
3998 disk = __serial_to_disk(dl->serial, super->anchor, &dl->index);
3999 if (disk) {
4000 dl->disk = *disk;
4001 /* only set index on disks that are a member of a
4002 * populated contianer, i.e. one with raid_devs
4003 */
4004 if (is_failed(&dl->disk))
4005 dl->index = -2;
4006 else if (is_spare(&dl->disk) || is_journal(&dl->disk))
4007 dl->index = -1;
4008 }
4009
4010 return 0;
4011 }
4012
4013 /* When migrating map0 contains the 'destination' state while map1
4014 * contains the current state. When not migrating map0 contains the
4015 * current state. This routine assumes that map[0].map_state is set to
4016 * the current array state before being called.
4017 *
4018 * Migration is indicated by one of the following states
4019 * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed)
4020 * 2/ Initialize (migr_state=1 migr_type=MIGR_INIT map0state=normal
4021 * map1state=unitialized)
4022 * 3/ Repair (Resync) (migr_state=1 migr_type=MIGR_REPAIR map0state=normal
4023 * map1state=normal)
4024 * 4/ Rebuild (migr_state=1 migr_type=MIGR_REBUILD map0state=normal
4025 * map1state=degraded)
4026 * 5/ Migration (mig_state=1 migr_type=MIGR_GEN_MIGR map0state=normal
4027 * map1state=normal)
4028 */
4029 static void migrate(struct imsm_dev *dev, struct intel_super *super,
4030 __u8 to_state, int migr_type)
4031 {
4032 struct imsm_map *dest;
4033 struct imsm_map *src = get_imsm_map(dev, MAP_0);
4034
4035 dev->vol.migr_state = 1;
4036 set_migr_type(dev, migr_type);
4037 dev->vol.curr_migr_unit = 0;
4038 dest = get_imsm_map(dev, MAP_1);
4039
4040 /* duplicate and then set the target end state in map[0] */
4041 memcpy(dest, src, sizeof_imsm_map(src));
4042 if (migr_type == MIGR_GEN_MIGR) {
4043 __u32 ord;
4044 int i;
4045
4046 for (i = 0; i < src->num_members; i++) {
4047 ord = __le32_to_cpu(src->disk_ord_tbl[i]);
4048 set_imsm_ord_tbl_ent(src, i, ord_to_idx(ord));
4049 }
4050 }
4051
4052 if (migr_type == MIGR_GEN_MIGR)
4053 /* Clear migration record */
4054 memset(super->migr_rec, 0, sizeof(struct migr_record));
4055
4056 src->map_state = to_state;
4057 }
4058
4059 static void end_migration(struct imsm_dev *dev, struct intel_super *super,
4060 __u8 map_state)
4061 {
4062 struct imsm_map *map = get_imsm_map(dev, MAP_0);
4063 struct imsm_map *prev = get_imsm_map(dev, dev->vol.migr_state == 0 ?
4064 MAP_0 : MAP_1);
4065 int i, j;
4066
4067 /* merge any IMSM_ORD_REBUILD bits that were not successfully
4068 * completed in the last migration.
4069 *
4070 * FIXME add support for raid-level-migration
4071 */
4072 if (map_state != map->map_state && (is_gen_migration(dev) == 0) &&
4073 prev->map_state != IMSM_T_STATE_UNINITIALIZED) {
4074 /* when final map state is other than expected
4075 * merge maps (not for migration)
4076 */
4077 int failed;
4078
4079 for (i = 0; i < prev->num_members; i++)
4080 for (j = 0; j < map->num_members; j++)
4081 /* during online capacity expansion
4082 * disks position can be changed
4083 * if takeover is used
4084 */
4085 if (ord_to_idx(map->disk_ord_tbl[j]) ==
4086 ord_to_idx(prev->disk_ord_tbl[i])) {
4087 map->disk_ord_tbl[j] |=
4088 prev->disk_ord_tbl[i];
4089 break;
4090 }
4091 failed = imsm_count_failed(super, dev, MAP_0);
4092 map_state = imsm_check_degraded(super, dev, failed, MAP_0);
4093 }
4094
4095 dev->vol.migr_state = 0;
4096 set_migr_type(dev, 0);
4097 dev->vol.curr_migr_unit = 0;
4098 map->map_state = map_state;
4099 }
4100
4101 static int parse_raid_devices(struct intel_super *super)
4102 {
4103 int i;
4104 struct imsm_dev *dev_new;
4105 size_t len, len_migr;
4106 size_t max_len = 0;
4107 size_t space_needed = 0;
4108 struct imsm_super *mpb = super->anchor;
4109
4110 for (i = 0; i < super->anchor->num_raid_devs; i++) {
4111 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
4112 struct intel_dev *dv;
4113
4114 len = sizeof_imsm_dev(dev_iter, 0);
4115 len_migr = sizeof_imsm_dev(dev_iter, 1);
4116 if (len_migr > len)
4117 space_needed += len_migr - len;
4118
4119 dv = xmalloc(sizeof(*dv));
4120 if (max_len < len_migr)
4121 max_len = len_migr;
4122 if (max_len > len_migr)
4123 space_needed += max_len - len_migr;
4124 dev_new = xmalloc(max_len);
4125 imsm_copy_dev(dev_new, dev_iter);
4126 dv->dev = dev_new;
4127 dv->index = i;
4128 dv->next = super->devlist;
4129 super->devlist = dv;
4130 }
4131
4132 /* ensure that super->buf is large enough when all raid devices
4133 * are migrating
4134 */
4135 if (__le32_to_cpu(mpb->mpb_size) + space_needed > super->len) {
4136 void *buf;
4137
4138 len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + space_needed,
4139 super->sector_size);
4140 if (posix_memalign(&buf, MAX_SECTOR_SIZE, len) != 0)
4141 return 1;
4142
4143 memcpy(buf, super->buf, super->len);
4144 memset(buf + super->len, 0, len - super->len);
4145 free(super->buf);
4146 super->buf = buf;
4147 super->len = len;
4148 }
4149
4150 super->extra_space += space_needed;
4151
4152 return 0;
4153 }
4154
4155 /*******************************************************************************
4156 * Function: check_mpb_migr_compatibility
4157 * Description: Function checks for unsupported migration features:
4158 * - migration optimization area (pba_of_lba0)
4159 * - descending reshape (ascending_migr)
4160 * Parameters:
4161 * super : imsm metadata information
4162 * Returns:
4163 * 0 : migration is compatible
4164 * -1 : migration is not compatible
4165 ******************************************************************************/
4166 int check_mpb_migr_compatibility(struct intel_super *super)
4167 {
4168 struct imsm_map *map0, *map1;
4169 struct migr_record *migr_rec = super->migr_rec;
4170 int i;
4171
4172 for (i = 0; i < super->anchor->num_raid_devs; i++) {
4173 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
4174
4175 if (dev_iter &&
4176 dev_iter->vol.migr_state == 1 &&
4177 dev_iter->vol.migr_type == MIGR_GEN_MIGR) {
4178 /* This device is migrating */
4179 map0 = get_imsm_map(dev_iter, MAP_0);
4180 map1 = get_imsm_map(dev_iter, MAP_1);
4181 if (pba_of_lba0(map0) != pba_of_lba0(map1))
4182 /* migration optimization area was used */
4183 return -1;
4184 if (migr_rec->ascending_migr == 0 &&
4185 migr_rec->dest_depth_per_unit > 0)
4186 /* descending reshape not supported yet */
4187 return -1;
4188 }
4189 }
4190 return 0;
4191 }
4192
4193 static void __free_imsm(struct intel_super *super, int free_disks);
4194
4195 /* load_imsm_mpb - read matrix metadata
4196 * allocates super->mpb to be freed by free_imsm
4197 */
4198 static int load_imsm_mpb(int fd, struct intel_super *super, char *devname)
4199 {
4200 unsigned long long dsize;
4201 unsigned long long sectors;
4202 unsigned int sector_size = super->sector_size;
4203 struct stat;
4204 struct imsm_super *anchor;
4205 __u32 check_sum;
4206
4207 get_dev_size(fd, NULL, &dsize);
4208 if (dsize < 2*sector_size) {
4209 if (devname)
4210 pr_err("%s: device to small for imsm\n",
4211 devname);
4212 return 1;
4213 }
4214
4215 if (lseek64(fd, dsize - (sector_size * 2), SEEK_SET) < 0) {
4216 if (devname)
4217 pr_err("Cannot seek to anchor block on %s: %s\n",
4218 devname, strerror(errno));
4219 return 1;
4220 }
4221
4222 if (posix_memalign((void **)&anchor, sector_size, sector_size) != 0) {
4223 if (devname)
4224 pr_err("Failed to allocate imsm anchor buffer on %s\n", devname);
4225 return 1;
4226 }
4227 if ((unsigned int)read(fd, anchor, sector_size) != sector_size) {
4228 if (devname)
4229 pr_err("Cannot read anchor block on %s: %s\n",
4230 devname, strerror(errno));
4231 free(anchor);
4232 return 1;
4233 }
4234
4235 if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) {
4236 if (devname)
4237 pr_err("no IMSM anchor on %s\n", devname);
4238 free(anchor);
4239 return 2;
4240 }
4241
4242 __free_imsm(super, 0);
4243 /* reload capability and hba */
4244
4245 /* capability and hba must be updated with new super allocation */
4246 find_intel_hba_capability(fd, super, devname);
4247 super->len = ROUND_UP(anchor->mpb_size, sector_size);
4248 if (posix_memalign(&super->buf, MAX_SECTOR_SIZE, super->len) != 0) {
4249 if (devname)
4250 pr_err("unable to allocate %zu byte mpb buffer\n",
4251 super->len);
4252 free(anchor);
4253 return 2;
4254 }
4255 memcpy(super->buf, anchor, sector_size);
4256
4257 sectors = mpb_sectors(anchor, sector_size) - 1;
4258 free(anchor);
4259
4260 if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
4261 MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE) != 0) {
4262 pr_err("could not allocate migr_rec buffer\n");
4263 free(super->buf);
4264 return 2;
4265 }
4266 super->clean_migration_record_by_mdmon = 0;
4267
4268 if (!sectors) {
4269 check_sum = __gen_imsm_checksum(super->anchor);
4270 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
4271 if (devname)
4272 pr_err("IMSM checksum %x != %x on %s\n",
4273 check_sum,
4274 __le32_to_cpu(super->anchor->check_sum),
4275 devname);
4276 return 2;
4277 }
4278
4279 return 0;
4280 }
4281
4282 /* read the extended mpb */
4283 if (lseek64(fd, dsize - (sector_size * (2 + sectors)), SEEK_SET) < 0) {
4284 if (devname)
4285 pr_err("Cannot seek to extended mpb on %s: %s\n",
4286 devname, strerror(errno));
4287 return 1;
4288 }
4289
4290 if ((unsigned int)read(fd, super->buf + sector_size,
4291 super->len - sector_size) != super->len - sector_size) {
4292 if (devname)
4293 pr_err("Cannot read extended mpb on %s: %s\n",
4294 devname, strerror(errno));
4295 return 2;
4296 }
4297
4298 check_sum = __gen_imsm_checksum(super->anchor);
4299 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
4300 if (devname)
4301 pr_err("IMSM checksum %x != %x on %s\n",
4302 check_sum, __le32_to_cpu(super->anchor->check_sum),
4303 devname);
4304 return 3;
4305 }
4306
4307 return 0;
4308 }
4309
4310 static int read_imsm_migr_rec(int fd, struct intel_super *super);
4311
4312 /* clears hi bits in metadata if MPB_ATTRIB_2TB_DISK not set */
4313 static void clear_hi(struct intel_super *super)
4314 {
4315 struct imsm_super *mpb = super->anchor;
4316 int i, n;
4317 if (mpb->attributes & MPB_ATTRIB_2TB_DISK)
4318 return;
4319 for (i = 0; i < mpb->num_disks; ++i) {
4320 struct imsm_disk *disk = &mpb->disk[i];
4321 disk->total_blocks_hi = 0;
4322 }
4323 for (i = 0; i < mpb->num_raid_devs; ++i) {
4324 struct imsm_dev *dev = get_imsm_dev(super, i);
4325 if (!dev)
4326 return;
4327 for (n = 0; n < 2; ++n) {
4328 struct imsm_map *map = get_imsm_map(dev, n);
4329 if (!map)
4330 continue;
4331 map->pba_of_lba0_hi = 0;
4332 map->blocks_per_member_hi = 0;
4333 map->num_data_stripes_hi = 0;
4334 }
4335 }
4336 }
4337
4338 static int
4339 load_and_parse_mpb(int fd, struct intel_super *super, char *devname, int keep_fd)
4340 {
4341 int err;
4342
4343 err = load_imsm_mpb(fd, super, devname);
4344 if (err)
4345 return err;
4346 if (super->sector_size == 4096)
4347 convert_from_4k(super);
4348 err = load_imsm_disk(fd, super, devname, keep_fd);
4349 if (err)
4350 return err;
4351 err = parse_raid_devices(super);
4352 if (err)
4353 return err;
4354 err = load_bbm_log(super);
4355 clear_hi(super);
4356 return err;
4357 }
4358
4359 static void __free_imsm_disk(struct dl *d)
4360 {
4361 if (d->fd >= 0)
4362 close(d->fd);
4363 if (d->devname)
4364 free(d->devname);
4365 if (d->e)
4366 free(d->e);
4367 free(d);
4368
4369 }
4370
4371 static void free_imsm_disks(struct intel_super *super)
4372 {
4373 struct dl *d;
4374
4375 while (super->disks) {
4376 d = super->disks;
4377 super->disks = d->next;
4378 __free_imsm_disk(d);
4379 }
4380 while (super->disk_mgmt_list) {
4381 d = super->disk_mgmt_list;
4382 super->disk_mgmt_list = d->next;
4383 __free_imsm_disk(d);
4384 }
4385 while (super->missing) {
4386 d = super->missing;
4387 super->missing = d->next;
4388 __free_imsm_disk(d);
4389 }
4390
4391 }
4392
4393 /* free all the pieces hanging off of a super pointer */
4394 static void __free_imsm(struct intel_super *super, int free_disks)
4395 {
4396 struct intel_hba *elem, *next;
4397
4398 if (super->buf) {
4399 free(super->buf);
4400 super->buf = NULL;
4401 }
4402 /* unlink capability description */
4403 super->orom = NULL;
4404 if (super->migr_rec_buf) {
4405 free(super->migr_rec_buf);
4406 super->migr_rec_buf = NULL;
4407 }
4408 if (free_disks)
4409 free_imsm_disks(super);
4410 free_devlist(super);
4411 elem = super->hba;
4412 while (elem) {
4413 if (elem->path)
4414 free((void *)elem->path);
4415 next = elem->next;
4416 free(elem);
4417 elem = next;
4418 }
4419 if (super->bbm_log)
4420 free(super->bbm_log);
4421 super->hba = NULL;
4422 }
4423
4424 static void free_imsm(struct intel_super *super)
4425 {
4426 __free_imsm(super, 1);
4427 free(super->bb.entries);
4428 free(super);
4429 }
4430
4431 static void free_super_imsm(struct supertype *st)
4432 {
4433 struct intel_super *super = st->sb;
4434
4435 if (!super)
4436 return;
4437
4438 free_imsm(super);
4439 st->sb = NULL;
4440 }
4441
4442 static struct intel_super *alloc_super(void)
4443 {
4444 struct intel_super *super = xcalloc(1, sizeof(*super));
4445
4446 super->current_vol = -1;
4447 super->create_offset = ~((unsigned long long) 0);
4448
4449 super->bb.entries = xmalloc(BBM_LOG_MAX_ENTRIES *
4450 sizeof(struct md_bb_entry));
4451 if (!super->bb.entries) {
4452 free(super);
4453 return NULL;
4454 }
4455
4456 return super;
4457 }
4458
4459 /*
4460 * find and allocate hba and OROM/EFI based on valid fd of RAID component device
4461 */
4462 static int find_intel_hba_capability(int fd, struct intel_super *super, char *devname)
4463 {
4464 struct sys_dev *hba_name;
4465 int rv = 0;
4466
4467 if (fd < 0 || check_env("IMSM_NO_PLATFORM")) {
4468 super->orom = NULL;
4469 super->hba = NULL;
4470 return 0;
4471 }
4472 hba_name = find_disk_attached_hba(fd, NULL);
4473 if (!hba_name) {
4474 if (devname)
4475 pr_err("%s is not attached to Intel(R) RAID controller.\n",
4476 devname);
4477 return 1;
4478 }
4479 rv = attach_hba_to_super(super, hba_name);
4480 if (rv == 2) {
4481 if (devname) {
4482 struct intel_hba *hba = super->hba;
4483
4484 pr_err("%s is attached to Intel(R) %s %s (%s),\n"
4485 " but the container is assigned to Intel(R) %s %s (",
4486 devname,
4487 get_sys_dev_type(hba_name->type),
4488 hba_name->type == SYS_DEV_VMD ? "domain" : "RAID controller",
4489 hba_name->pci_id ? : "Err!",
4490 get_sys_dev_type(super->hba->type),
4491 hba->type == SYS_DEV_VMD ? "domain" : "RAID controller");
4492
4493 while (hba) {
4494 fprintf(stderr, "%s", hba->pci_id ? : "Err!");
4495 if (hba->next)
4496 fprintf(stderr, ", ");
4497 hba = hba->next;
4498 }
4499 fprintf(stderr, ").\n"
4500 " Mixing devices attached to different %s is not allowed.\n",
4501 hba_name->type == SYS_DEV_VMD ? "VMD domains" : "controllers");
4502 }
4503 return 2;
4504 }
4505 super->orom = find_imsm_capability(hba_name);
4506 if (!super->orom)
4507 return 3;
4508
4509 return 0;
4510 }
4511
4512 /* find_missing - helper routine for load_super_imsm_all that identifies
4513 * disks that have disappeared from the system. This routine relies on
4514 * the mpb being uptodate, which it is at load time.
4515 */
4516 static int find_missing(struct intel_super *super)
4517 {
4518 int i;
4519 struct imsm_super *mpb = super->anchor;
4520 struct dl *dl;
4521 struct imsm_disk *disk;
4522
4523 for (i = 0; i < mpb->num_disks; i++) {
4524 disk = __get_imsm_disk(mpb, i);
4525 dl = serial_to_dl(disk->serial, super);
4526 if (dl)
4527 continue;
4528
4529 dl = xmalloc(sizeof(*dl));
4530 dl->major = 0;
4531 dl->minor = 0;
4532 dl->fd = -1;
4533 dl->devname = xstrdup("missing");
4534 dl->index = i;
4535 serialcpy(dl->serial, disk->serial);
4536 dl->disk = *disk;
4537 dl->e = NULL;
4538 dl->next = super->missing;
4539 super->missing = dl;
4540 }
4541
4542 return 0;
4543 }
4544
4545 static struct intel_disk *disk_list_get(__u8 *serial, struct intel_disk *disk_list)
4546 {
4547 struct intel_disk *idisk = disk_list;
4548
4549 while (idisk) {
4550 if (serialcmp(idisk->disk.serial, serial) == 0)
4551 break;
4552 idisk = idisk->next;
4553 }
4554
4555 return idisk;
4556 }
4557
4558 static int __prep_thunderdome(struct intel_super **table, int tbl_size,
4559 struct intel_super *super,
4560 struct intel_disk **disk_list)
4561 {
4562 struct imsm_disk *d = &super->disks->disk;
4563 struct imsm_super *mpb = super->anchor;
4564 int i, j;
4565
4566 for (i = 0; i < tbl_size; i++) {
4567 struct imsm_super *tbl_mpb = table[i]->anchor;
4568 struct imsm_disk *tbl_d = &table[i]->disks->disk;
4569
4570 if (tbl_mpb->family_num == mpb->family_num) {
4571 if (tbl_mpb->check_sum == mpb->check_sum) {
4572 dprintf("mpb from %d:%d matches %d:%d\n",
4573 super->disks->major,
4574 super->disks->minor,
4575 table[i]->disks->major,
4576 table[i]->disks->minor);
4577 break;
4578 }
4579
4580 if (((is_configured(d) && !is_configured(tbl_d)) ||
4581 is_configured(d) == is_configured(tbl_d)) &&
4582 tbl_mpb->generation_num < mpb->generation_num) {
4583 /* current version of the mpb is a
4584 * better candidate than the one in
4585 * super_table, but copy over "cross
4586 * generational" status
4587 */
4588 struct intel_disk *idisk;
4589
4590 dprintf("mpb from %d:%d replaces %d:%d\n",
4591 super->disks->major,
4592 super->disks->minor,
4593 table[i]->disks->major,
4594 table[i]->disks->minor);
4595
4596 idisk = disk_list_get(tbl_d->serial, *disk_list);
4597 if (idisk && is_failed(&idisk->disk))
4598 tbl_d->status |= FAILED_DISK;
4599 break;
4600 } else {
4601 struct intel_disk *idisk;
4602 struct imsm_disk *disk;
4603
4604 /* tbl_mpb is more up to date, but copy
4605 * over cross generational status before
4606 * returning
4607 */
4608 disk = __serial_to_disk(d->serial, mpb, NULL);
4609 if (disk && is_failed(disk))
4610 d->status |= FAILED_DISK;
4611
4612 idisk = disk_list_get(d->serial, *disk_list);
4613 if (idisk) {
4614 idisk->owner = i;
4615 if (disk && is_configured(disk))
4616 idisk->disk.status |= CONFIGURED_DISK;
4617 }
4618
4619 dprintf("mpb from %d:%d prefer %d:%d\n",
4620 super->disks->major,
4621 super->disks->minor,
4622 table[i]->disks->major,
4623 table[i]->disks->minor);
4624
4625 return tbl_size;
4626 }
4627 }
4628 }
4629
4630 if (i >= tbl_size)
4631 table[tbl_size++] = super;
4632 else
4633 table[i] = super;
4634
4635 /* update/extend the merged list of imsm_disk records */
4636 for (j = 0; j < mpb->num_disks; j++) {
4637 struct imsm_disk *disk = __get_imsm_disk(mpb, j);
4638 struct intel_disk *idisk;
4639
4640 idisk = disk_list_get(disk->serial, *disk_list);
4641 if (idisk) {
4642 idisk->disk.status |= disk->status;
4643 if (is_configured(&idisk->disk) ||
4644 is_failed(&idisk->disk))
4645 idisk->disk.status &= ~(SPARE_DISK);
4646 } else {
4647 idisk = xcalloc(1, sizeof(*idisk));
4648 idisk->owner = IMSM_UNKNOWN_OWNER;
4649 idisk->disk = *disk;
4650 idisk->next = *disk_list;
4651 *disk_list = idisk;
4652 }
4653
4654 if (serialcmp(idisk->disk.serial, d->serial) == 0)
4655 idisk->owner = i;
4656 }
4657
4658 return tbl_size;
4659 }
4660
4661 static struct intel_super *
4662 validate_members(struct intel_super *super, struct intel_disk *disk_list,
4663 const int owner)
4664 {
4665 struct imsm_super *mpb = super->anchor;
4666 int ok_count = 0;
4667 int i;
4668
4669 for (i = 0; i < mpb->num_disks; i++) {
4670 struct imsm_disk *disk = __get_imsm_disk(mpb, i);
4671 struct intel_disk *idisk;
4672
4673 idisk = disk_list_get(disk->serial, disk_list);
4674 if (idisk) {
4675 if (idisk->owner == owner ||
4676 idisk->owner == IMSM_UNKNOWN_OWNER)
4677 ok_count++;
4678 else
4679 dprintf("'%.16s' owner %d != %d\n",
4680 disk->serial, idisk->owner,
4681 owner);
4682 } else {
4683 dprintf("unknown disk %x [%d]: %.16s\n",
4684 __le32_to_cpu(mpb->family_num), i,
4685 disk->serial);
4686 break;
4687 }
4688 }
4689
4690 if (ok_count == mpb->num_disks)
4691 return super;
4692 return NULL;
4693 }
4694
4695 static void show_conflicts(__u32 family_num, struct intel_super *super_list)
4696 {
4697 struct intel_super *s;
4698
4699 for (s = super_list; s; s = s->next) {
4700 if (family_num != s->anchor->family_num)
4701 continue;
4702 pr_err("Conflict, offlining family %#x on '%s'\n",
4703 __le32_to_cpu(family_num), s->disks->devname);
4704 }
4705 }
4706
4707 static struct intel_super *
4708 imsm_thunderdome(struct intel_super **super_list, int len)
4709 {
4710 struct intel_super *super_table[len];
4711 struct intel_disk *disk_list = NULL;
4712 struct intel_super *champion, *spare;
4713 struct intel_super *s, **del;
4714 int tbl_size = 0;
4715 int conflict;
4716 int i;
4717
4718 memset(super_table, 0, sizeof(super_table));
4719 for (s = *super_list; s; s = s->next)
4720 tbl_size = __prep_thunderdome(super_table, tbl_size, s, &disk_list);
4721
4722 for (i = 0; i < tbl_size; i++) {
4723 struct imsm_disk *d;
4724 struct intel_disk *idisk;
4725 struct imsm_super *mpb = super_table[i]->anchor;
4726
4727 s = super_table[i];
4728 d = &s->disks->disk;
4729
4730 /* 'd' must appear in merged disk list for its
4731 * configuration to be valid
4732 */
4733 idisk = disk_list_get(d->serial, disk_list);
4734 if (idisk && idisk->owner == i)
4735 s = validate_members(s, disk_list, i);
4736 else
4737 s = NULL;
4738
4739 if (!s)
4740 dprintf("marking family: %#x from %d:%d offline\n",
4741 mpb->family_num,
4742 super_table[i]->disks->major,
4743 super_table[i]->disks->minor);
4744 super_table[i] = s;
4745 }
4746
4747 /* This is where the mdadm implementation differs from the Windows
4748 * driver which has no strict concept of a container. We can only
4749 * assemble one family from a container, so when returning a prodigal
4750 * array member to this system the code will not be able to disambiguate
4751 * the container contents that should be assembled ("foreign" versus
4752 * "local"). It requires user intervention to set the orig_family_num
4753 * to a new value to establish a new container. The Windows driver in
4754 * this situation fixes up the volume name in place and manages the
4755 * foreign array as an independent entity.
4756 */
4757 s = NULL;
4758 spare = NULL;
4759 conflict = 0;
4760 for (i = 0; i < tbl_size; i++) {
4761 struct intel_super *tbl_ent = super_table[i];
4762 int is_spare = 0;
4763
4764 if (!tbl_ent)
4765 continue;
4766
4767 if (tbl_ent->anchor->num_raid_devs == 0) {
4768 spare = tbl_ent;
4769 is_spare = 1;
4770 }
4771
4772 if (s && !is_spare) {
4773 show_conflicts(tbl_ent->anchor->family_num, *super_list);
4774 conflict++;
4775 } else if (!s && !is_spare)
4776 s = tbl_ent;
4777 }
4778
4779 if (!s)
4780 s = spare;
4781 if (!s) {
4782 champion = NULL;
4783 goto out;
4784 }
4785 champion = s;
4786
4787 if (conflict)
4788 pr_err("Chose family %#x on '%s', assemble conflicts to new container with '--update=uuid'\n",
4789 __le32_to_cpu(s->anchor->family_num), s->disks->devname);
4790
4791 /* collect all dl's onto 'champion', and update them to
4792 * champion's version of the status
4793 */
4794 for (s = *super_list; s; s = s->next) {
4795 struct imsm_super *mpb = champion->anchor;
4796 struct dl *dl = s->disks;
4797
4798 if (s == champion)
4799 continue;
4800
4801 mpb->attributes |= s->anchor->attributes & MPB_ATTRIB_2TB_DISK;
4802
4803 for (i = 0; i < mpb->num_disks; i++) {
4804 struct imsm_disk *disk;
4805
4806 disk = __serial_to_disk(dl->serial, mpb, &dl->index);
4807 if (disk) {
4808 dl->disk = *disk;
4809 /* only set index on disks that are a member of
4810 * a populated contianer, i.e. one with
4811 * raid_devs
4812 */
4813 if (is_failed(&dl->disk))
4814 dl->index = -2;
4815 else if (is_spare(&dl->disk))
4816 dl->index = -1;
4817 break;
4818 }
4819 }
4820
4821 if (i >= mpb->num_disks) {
4822 struct intel_disk *idisk;
4823
4824 idisk = disk_list_get(dl->serial, disk_list);
4825 if (idisk && is_spare(&idisk->disk) &&
4826 !is_failed(&idisk->disk) && !is_configured(&idisk->disk))
4827 dl->index = -1;
4828 else {
4829 dl->index = -2;
4830 continue;
4831 }
4832 }
4833
4834 dl->next = champion->disks;
4835 champion->disks = dl;
4836 s->disks = NULL;
4837 }
4838
4839 /* delete 'champion' from super_list */
4840 for (del = super_list; *del; ) {
4841 if (*del == champion) {
4842 *del = (*del)->next;
4843 break;
4844 } else
4845 del = &(*del)->next;
4846 }
4847 champion->next = NULL;
4848
4849 out:
4850 while (disk_list) {
4851 struct intel_disk *idisk = disk_list;
4852
4853 disk_list = disk_list->next;
4854 free(idisk);
4855 }
4856
4857 return champion;
4858 }
4859
4860 static int
4861 get_sra_super_block(int fd, struct intel_super **super_list, char *devname, int *max, int keep_fd);
4862 static int get_super_block(struct intel_super **super_list, char *devnm, char *devname,
4863 int major, int minor, int keep_fd);
4864 static int
4865 get_devlist_super_block(struct md_list *devlist, struct intel_super **super_list,
4866 int *max, int keep_fd);
4867
4868 static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
4869 char *devname, struct md_list *devlist,
4870 int keep_fd)
4871 {
4872 struct intel_super *super_list = NULL;
4873 struct intel_super *super = NULL;
4874 int err = 0;
4875 int i = 0;
4876
4877 if (fd >= 0)
4878 /* 'fd' is an opened container */
4879 err = get_sra_super_block(fd, &super_list, devname, &i, keep_fd);
4880 else
4881 /* get super block from devlist devices */
4882 err = get_devlist_super_block(devlist, &super_list, &i, keep_fd);
4883 if (err)
4884 goto error;
4885 /* all mpbs enter, maybe one leaves */
4886 super = imsm_thunderdome(&super_list, i);
4887 if (!super) {
4888 err = 1;
4889 goto error;
4890 }
4891
4892 if (find_missing(super) != 0) {
4893 free_imsm(super);
4894 err = 2;
4895 goto error;
4896 }
4897
4898 /* load migration record */
4899 err = load_imsm_migr_rec(super, NULL);
4900 if (err == -1) {
4901 /* migration is in progress,
4902 * but migr_rec cannot be loaded,
4903 */
4904 err = 4;
4905 goto error;
4906 }
4907
4908 /* Check migration compatibility */
4909 if (err == 0 && check_mpb_migr_compatibility(super) != 0) {
4910 pr_err("Unsupported migration detected");
4911 if (devname)
4912 fprintf(stderr, " on %s\n", devname);
4913 else
4914 fprintf(stderr, " (IMSM).\n");
4915
4916 err = 5;
4917 goto error;
4918 }
4919
4920 err = 0;
4921
4922 error:
4923 while (super_list) {
4924 struct intel_super *s = super_list;
4925
4926 super_list = super_list->next;
4927 free_imsm(s);
4928 }
4929
4930 if (err)
4931 return err;
4932
4933 *sbp = super;
4934 if (fd >= 0)
4935 strcpy(st->container_devnm, fd2devnm(fd));
4936 else
4937 st->container_devnm[0] = 0;
4938 if (err == 0 && st->ss == NULL) {
4939 st->ss = &super_imsm;
4940 st->minor_version = 0;
4941 st->max_devs = IMSM_MAX_DEVICES;
4942 }
4943 return 0;
4944 }
4945
4946 static int
4947 get_devlist_super_block(struct md_list *devlist, struct intel_super **super_list,
4948 int *max, int keep_fd)
4949 {
4950 struct md_list *tmpdev;
4951 int err = 0;
4952 int i = 0;
4953
4954 for (i = 0, tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
4955 if (tmpdev->used != 1)
4956 continue;
4957 if (tmpdev->container == 1) {
4958 int lmax = 0;
4959 int fd = dev_open(tmpdev->devname, O_RDONLY|O_EXCL);
4960 if (fd < 0) {
4961 pr_err("cannot open device %s: %s\n",
4962 tmpdev->devname, strerror(errno));
4963 err = 8;
4964 goto error;
4965 }
4966 err = get_sra_super_block(fd, super_list,
4967 tmpdev->devname, &lmax,
4968 keep_fd);
4969 i += lmax;
4970 close(fd);
4971 if (err) {
4972 err = 7;
4973 goto error;
4974 }
4975 } else {
4976 int major = major(tmpdev->st_rdev);
4977 int minor = minor(tmpdev->st_rdev);
4978 err = get_super_block(super_list,
4979 NULL,
4980 tmpdev->devname,
4981 major, minor,
4982 keep_fd);
4983 i++;
4984 if (err) {
4985 err = 6;
4986 goto error;
4987 }
4988 }
4989 }
4990 error:
4991 *max = i;
4992 return err;
4993 }
4994
4995 static int get_super_block(struct intel_super **super_list, char *devnm, char *devname,
4996 int major, int minor, int keep_fd)
4997 {
4998 struct intel_super *s;
4999 char nm[32];
5000 int dfd = -1;
5001 int err = 0;
5002 int retry;
5003
5004 s = alloc_super();
5005 if (!s) {
5006 err = 1;
5007 goto error;
5008 }
5009
5010 sprintf(nm, "%d:%d", major, minor);
5011 dfd = dev_open(nm, O_RDWR);
5012 if (dfd < 0) {
5013 err = 2;
5014 goto error;
5015 }
5016
5017 get_dev_sector_size(dfd, NULL, &s->sector_size);
5018 find_intel_hba_capability(dfd, s, devname);
5019 err = load_and_parse_mpb(dfd, s, NULL, keep_fd);
5020
5021 /* retry the load if we might have raced against mdmon */
5022 if (err == 3 && devnm && mdmon_running(devnm))
5023 for (retry = 0; retry < 3; retry++) {
5024 usleep(3000);
5025 err = load_and_parse_mpb(dfd, s, NULL, keep_fd);
5026 if (err != 3)
5027 break;
5028 }
5029 error:
5030 if (!err) {
5031 s->next = *super_list;
5032 *super_list = s;
5033 } else {
5034 if (s)
5035 free_imsm(s);
5036 if (dfd >= 0)
5037 close(dfd);
5038 }
5039 if (dfd >= 0 && !keep_fd)
5040 close(dfd);
5041 return err;
5042
5043 }
5044
5045 static int
5046 get_sra_super_block(int fd, struct intel_super **super_list, char *devname, int *max, int keep_fd)
5047 {
5048 struct mdinfo *sra;
5049 char *devnm;
5050 struct mdinfo *sd;
5051 int err = 0;
5052 int i = 0;
5053 sra = sysfs_read(fd, NULL, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
5054 if (!sra)
5055 return 1;
5056
5057 if (sra->array.major_version != -1 ||
5058 sra->array.minor_version != -2 ||
5059 strcmp(sra->text_version, "imsm") != 0) {
5060 err = 1;
5061 goto error;
5062 }
5063 /* load all mpbs */
5064 devnm = fd2devnm(fd);
5065 for (sd = sra->devs, i = 0; sd; sd = sd->next, i++) {
5066 if (get_super_block(super_list, devnm, devname,
5067 sd->disk.major, sd->disk.minor, keep_fd) != 0) {
5068 err = 7;
5069 goto error;
5070 }
5071 }
5072 error:
5073 sysfs_free(sra);
5074 *max = i;
5075 return err;
5076 }
5077
5078 static int load_container_imsm(struct supertype *st, int fd, char *devname)
5079 {
5080 return load_super_imsm_all(st, fd, &st->sb, devname, NULL, 1);
5081 }
5082
5083 static int load_super_imsm(struct supertype *st, int fd, char *devname)
5084 {
5085 struct intel_super *super;
5086 int rv;
5087 int retry;
5088
5089 if (test_partition(fd))
5090 /* IMSM not allowed on partitions */
5091 return 1;
5092
5093 free_super_imsm(st);
5094
5095 super = alloc_super();
5096 get_dev_sector_size(fd, NULL, &super->sector_size);
5097 if (!super)
5098 return 1;
5099 /* Load hba and capabilities if they exist.
5100 * But do not preclude loading metadata in case capabilities or hba are
5101 * non-compliant and ignore_hw_compat is set.
5102 */
5103 rv = find_intel_hba_capability(fd, super, devname);
5104 /* no orom/efi or non-intel hba of the disk */
5105 if (rv != 0 && st->ignore_hw_compat == 0) {
5106 if (devname)
5107 pr_err("No OROM/EFI properties for %s\n", devname);
5108 free_imsm(super);
5109 return 2;
5110 }
5111 rv = load_and_parse_mpb(fd, super, devname, 0);
5112
5113 /* retry the load if we might have raced against mdmon */
5114 if (rv == 3) {
5115 struct mdstat_ent *mdstat = NULL;
5116 char *name = fd2kname(fd);
5117
5118 if (name)
5119 mdstat = mdstat_by_component(name);
5120
5121 if (mdstat && mdmon_running(mdstat->devnm) && getpid() != mdmon_pid(mdstat->devnm)) {
5122 for (retry = 0; retry < 3; retry++) {
5123 usleep(3000);
5124 rv = load_and_parse_mpb(fd, super, devname, 0);
5125 if (rv != 3)
5126 break;
5127 }
5128 }
5129
5130 free_mdstat(mdstat);
5131 }
5132
5133 if (rv) {
5134 if (devname)
5135 pr_err("Failed to load all information sections on %s\n", devname);
5136 free_imsm(super);
5137 return rv;
5138 }
5139
5140 st->sb = super;
5141 if (st->ss == NULL) {
5142 st->ss = &super_imsm;
5143 st->minor_version = 0;
5144 st->max_devs = IMSM_MAX_DEVICES;
5145 }
5146
5147 /* load migration record */
5148 if (load_imsm_migr_rec(super, NULL) == 0) {
5149 /* Check for unsupported migration features */
5150 if (check_mpb_migr_compatibility(super) != 0) {
5151 pr_err("Unsupported migration detected");
5152 if (devname)
5153 fprintf(stderr, " on %s\n", devname);
5154 else
5155 fprintf(stderr, " (IMSM).\n");
5156 return 3;
5157 }
5158 }
5159
5160 return 0;
5161 }
5162
5163 static __u16 info_to_blocks_per_strip(mdu_array_info_t *info)
5164 {
5165 if (info->level == 1)
5166 return 128;
5167 return info->chunk_size >> 9;
5168 }
5169
5170 static unsigned long long info_to_blocks_per_member(mdu_array_info_t *info,
5171 unsigned long long size)
5172 {
5173 if (info->level == 1)
5174 return size * 2;
5175 else
5176 return (size * 2) & ~(info_to_blocks_per_strip(info) - 1);
5177 }
5178
5179 static void imsm_update_version_info(struct intel_super *super)
5180 {
5181 /* update the version and attributes */
5182 struct imsm_super *mpb = super->anchor;
5183 char *version;
5184 struct imsm_dev *dev;
5185 struct imsm_map *map;
5186 int i;
5187
5188 for (i = 0; i < mpb->num_raid_devs; i++) {
5189 dev = get_imsm_dev(super, i);
5190 map = get_imsm_map(dev, MAP_0);
5191 if (__le32_to_cpu(dev->size_high) > 0)
5192 mpb->attributes |= MPB_ATTRIB_2TB;
5193
5194 /* FIXME detect when an array spans a port multiplier */
5195 #if 0
5196 mpb->attributes |= MPB_ATTRIB_PM;
5197 #endif
5198
5199 if (mpb->num_raid_devs > 1 ||
5200 mpb->attributes != MPB_ATTRIB_CHECKSUM_VERIFY) {
5201 version = MPB_VERSION_ATTRIBS;
5202 switch (get_imsm_raid_level(map)) {
5203 case 0: mpb->attributes |= MPB_ATTRIB_RAID0; break;
5204 case 1: mpb->attributes |= MPB_ATTRIB_RAID1; break;
5205 case 10: mpb->attributes |= MPB_ATTRIB_RAID10; break;
5206 case 5: mpb->attributes |= MPB_ATTRIB_RAID5; break;
5207 }
5208 } else {
5209 if (map->num_members >= 5)
5210 version = MPB_VERSION_5OR6_DISK_ARRAY;
5211 else if (dev->status == DEV_CLONE_N_GO)
5212 version = MPB_VERSION_CNG;
5213 else if (get_imsm_raid_level(map) == 5)
5214 version = MPB_VERSION_RAID5;
5215 else if (map->num_members >= 3)
5216 version = MPB_VERSION_3OR4_DISK_ARRAY;
5217 else if (get_imsm_raid_level(map) == 1)
5218 version = MPB_VERSION_RAID1;
5219 else
5220 version = MPB_VERSION_RAID0;
5221 }
5222 strcpy(((char *) mpb->sig) + strlen(MPB_SIGNATURE), version);
5223 }
5224 }
5225
5226 static int check_name(struct intel_super *super, char *name, int quiet)
5227 {
5228 struct imsm_super *mpb = super->anchor;
5229 char *reason = NULL;
5230 int i;
5231
5232 if (strlen(name) > MAX_RAID_SERIAL_LEN)
5233 reason = "must be 16 characters or less";
5234
5235 for (i = 0; i < mpb->num_raid_devs; i++) {
5236 struct imsm_dev *dev = get_imsm_dev(super, i);
5237
5238 if (strncmp((char *) dev->volume, name, MAX_RAID_SERIAL_LEN) == 0) {
5239 reason = "already exists";
5240 break;
5241 }
5242 }
5243
5244 if (reason && !quiet)
5245 pr_err("imsm volume name %s\n", reason);
5246
5247 return !reason;
5248 }
5249
5250 static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
5251 struct shape *s, char *name,
5252 char *homehost, int *uuid,
5253 long long data_offset)
5254 {
5255 /* We are creating a volume inside a pre-existing container.
5256 * so st->sb is already set.
5257 */
5258 struct intel_super *super = st->sb;
5259 unsigned int sector_size = super->sector_size;
5260 struct imsm_super *mpb = super->anchor;
5261 struct intel_dev *dv;
5262 struct imsm_dev *dev;
5263 struct imsm_vol *vol;
5264 struct imsm_map *map;
5265 int idx = mpb->num_raid_devs;
5266 int i;
5267 unsigned long long array_blocks;
5268 size_t size_old, size_new;
5269 unsigned long long num_data_stripes;
5270 unsigned int data_disks;
5271 unsigned long long size_per_member;
5272
5273 if (super->orom && mpb->num_raid_devs >= super->orom->vpa) {
5274 pr_err("This imsm-container already has the maximum of %d volumes\n", super->orom->vpa);
5275 return 0;
5276 }
5277
5278 /* ensure the mpb is large enough for the new data */
5279 size_old = __le32_to_cpu(mpb->mpb_size);
5280 size_new = disks_to_mpb_size(info->nr_disks);
5281 if (size_new > size_old) {
5282 void *mpb_new;
5283 size_t size_round = ROUND_UP(size_new, sector_size);
5284
5285 if (posix_memalign(&mpb_new, sector_size, size_round) != 0) {
5286 pr_err("could not allocate new mpb\n");
5287 return 0;
5288 }
5289 if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
5290 MIGR_REC_BUF_SECTORS*
5291 MAX_SECTOR_SIZE) != 0) {
5292 pr_err("could not allocate migr_rec buffer\n");
5293 free(super->buf);
5294 free(super);
5295 free(mpb_new);
5296 return 0;
5297 }
5298 memcpy(mpb_new, mpb, size_old);
5299 free(mpb);
5300 mpb = mpb_new;
5301 super->anchor = mpb_new;
5302 mpb->mpb_size = __cpu_to_le32(size_new);
5303 memset(mpb_new + size_old, 0, size_round - size_old);
5304 super->len = size_round;
5305 }
5306 super->current_vol = idx;
5307
5308 /* handle 'failed_disks' by either:
5309 * a) create dummy disk entries in the table if this the first
5310 * volume in the array. We add them here as this is the only
5311 * opportunity to add them. add_to_super_imsm_volume()
5312 * handles the non-failed disks and continues incrementing
5313 * mpb->num_disks.
5314 * b) validate that 'failed_disks' matches the current number
5315 * of missing disks if the container is populated
5316 */
5317 if (super->current_vol == 0) {
5318 mpb->num_disks = 0;
5319 for (i = 0; i < info->failed_disks; i++) {
5320 struct imsm_disk *disk;
5321
5322 mpb->num_disks++;
5323 disk = __get_imsm_disk(mpb, i);
5324 disk->status = CONFIGURED_DISK | FAILED_DISK;
5325 disk->scsi_id = __cpu_to_le32(~(__u32)0);
5326 snprintf((char *) disk->serial, MAX_RAID_SERIAL_LEN,
5327 "missing:%d", (__u8)i);
5328 }
5329 find_missing(super);
5330 } else {
5331 int missing = 0;
5332 struct dl *d;
5333
5334 for (d = super->missing; d; d = d->next)
5335 missing++;
5336 if (info->failed_disks > missing) {
5337 pr_err("unable to add 'missing' disk to container\n");
5338 return 0;
5339 }
5340 }
5341
5342 if (!check_name(super, name, 0))
5343 return 0;
5344 dv = xmalloc(sizeof(*dv));
5345 dev = xcalloc(1, sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
5346 strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
5347 array_blocks = calc_array_size(info->level, info->raid_disks,
5348 info->layout, info->chunk_size,
5349 s->size * BLOCKS_PER_KB);
5350 data_disks = get_data_disks(info->level, info->layout,
5351 info->raid_disks);
5352 array_blocks = round_size_to_mb(array_blocks, data_disks);
5353 size_per_member = array_blocks / data_disks;
5354
5355 dev->size_low = __cpu_to_le32((__u32) array_blocks);
5356 dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
5357 dev->status = (DEV_READ_COALESCING | DEV_WRITE_COALESCING);
5358 vol = &dev->vol;
5359 vol->migr_state = 0;
5360 set_migr_type(dev, MIGR_INIT);
5361 vol->dirty = !info->state;
5362 vol->curr_migr_unit = 0;
5363 map = get_imsm_map(dev, MAP_0);
5364 set_pba_of_lba0(map, super->create_offset);
5365 set_blocks_per_member(map, info_to_blocks_per_member(info,
5366 size_per_member /
5367 BLOCKS_PER_KB));
5368 map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
5369 map->failed_disk_num = ~0;
5370 if (info->level > 0)
5371 map->map_state = (info->state ? IMSM_T_STATE_NORMAL
5372 : IMSM_T_STATE_UNINITIALIZED);
5373 else
5374 map->map_state = info->failed_disks ? IMSM_T_STATE_FAILED :
5375 IMSM_T_STATE_NORMAL;
5376 map->ddf = 1;
5377
5378 if (info->level == 1 && info->raid_disks > 2) {
5379 free(dev);
5380 free(dv);
5381 pr_err("imsm does not support more than 2 disksin a raid1 volume\n");
5382 return 0;
5383 }
5384
5385 map->raid_level = info->level;
5386 if (info->level == 10) {
5387 map->raid_level = 1;
5388 map->num_domains = info->raid_disks / 2;
5389 } else if (info->level == 1)
5390 map->num_domains = info->raid_disks;
5391 else
5392 map->num_domains = 1;
5393
5394 /* info->size is only int so use the 'size' parameter instead */
5395 num_data_stripes = size_per_member / info_to_blocks_per_strip(info);
5396 num_data_stripes /= map->num_domains;
5397 set_num_data_stripes(map, num_data_stripes);
5398
5399 map->num_members = info->raid_disks;
5400 for (i = 0; i < map->num_members; i++) {
5401 /* initialized in add_to_super */
5402 set_imsm_ord_tbl_ent(map, i, IMSM_ORD_REBUILD);
5403 }
5404 mpb->num_raid_devs++;
5405 mpb->num_raid_devs_created++;
5406 dev->my_vol_raid_dev_num = mpb->num_raid_devs_created;
5407
5408 if (s->consistency_policy <= CONSISTENCY_POLICY_RESYNC) {
5409 dev->rwh_policy = RWH_MULTIPLE_OFF;
5410 } else if (s->consistency_policy == CONSISTENCY_POLICY_PPL) {
5411 dev->rwh_policy = RWH_MULTIPLE_DISTRIBUTED;
5412 } else {
5413 free(dev);
5414 free(dv);
5415 pr_err("imsm does not support consistency policy %s\n",
5416 map_num(consistency_policies, s->consistency_policy));
5417 return 0;
5418 }
5419
5420 dv->dev = dev;
5421 dv->index = super->current_vol;
5422 dv->next = super->devlist;
5423 super->devlist = dv;
5424
5425 imsm_update_version_info(super);
5426
5427 return 1;
5428 }
5429
5430 static int init_super_imsm(struct supertype *st, mdu_array_info_t *info,
5431 struct shape *s, char *name,
5432 char *homehost, int *uuid,
5433 unsigned long long data_offset)
5434 {
5435 /* This is primarily called by Create when creating a new array.
5436 * We will then get add_to_super called for each component, and then
5437 * write_init_super called to write it out to each device.
5438 * For IMSM, Create can create on fresh devices or on a pre-existing
5439 * array.
5440 * To create on a pre-existing array a different method will be called.
5441 * This one is just for fresh drives.
5442 */
5443 struct intel_super *super;
5444 struct imsm_super *mpb;
5445 size_t mpb_size;
5446 char *version;
5447
5448 if (data_offset != INVALID_SECTORS) {
5449 pr_err("data-offset not supported by imsm\n");
5450 return 0;
5451 }
5452
5453 if (st->sb)
5454 return init_super_imsm_volume(st, info, s, name, homehost, uuid,
5455 data_offset);
5456
5457 if (info)
5458 mpb_size = disks_to_mpb_size(info->nr_disks);
5459 else
5460 mpb_size = MAX_SECTOR_SIZE;
5461
5462 super = alloc_super();
5463 if (super &&
5464 posix_memalign(&super->buf, MAX_SECTOR_SIZE, mpb_size) != 0) {
5465 free_imsm(super);
5466 super = NULL;
5467 }
5468 if (!super) {
5469 pr_err("could not allocate superblock\n");
5470 return 0;
5471 }
5472 if (posix_memalign(&super->migr_rec_buf, MAX_SECTOR_SIZE,
5473 MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE) != 0) {
5474 pr_err("could not allocate migr_rec buffer\n");
5475 free(super->buf);
5476 free_imsm(super);
5477 return 0;
5478 }
5479 memset(super->buf, 0, mpb_size);
5480 mpb = super->buf;
5481 mpb->mpb_size = __cpu_to_le32(mpb_size);
5482 st->sb = super;
5483
5484 if (info == NULL) {
5485 /* zeroing superblock */
5486 return 0;
5487 }
5488
5489 mpb->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
5490
5491 version = (char *) mpb->sig;
5492 strcpy(version, MPB_SIGNATURE);
5493 version += strlen(MPB_SIGNATURE);
5494 strcpy(version, MPB_VERSION_RAID0);
5495
5496 return 1;
5497 }
5498
5499 static int drive_validate_sector_size(struct intel_super *super, struct dl *dl)
5500 {
5501 unsigned int member_sector_size;
5502
5503 if (dl->fd < 0) {
5504 pr_err("Invalid file descriptor for %s\n", dl->devname);
5505 return 0;
5506 }
5507
5508 if (!get_dev_sector_size(dl->fd, dl->devname, &member_sector_size))
5509 return 0;
5510 if (member_sector_size != super->sector_size)
5511 return 0;
5512 return 1;
5513 }
5514
5515 static int add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
5516 int fd, char *devname)
5517 {
5518 struct intel_super *super = st->sb;
5519 struct imsm_super *mpb = super->anchor;
5520 struct imsm_disk *_disk;
5521 struct imsm_dev *dev;
5522 struct imsm_map *map;
5523 struct dl *dl, *df;
5524 int slot;
5525
5526 dev = get_imsm_dev(super, super->current_vol);
5527 map = get_imsm_map(dev, MAP_0);
5528
5529 if (! (dk->state & (1<<MD_DISK_SYNC))) {
5530 pr_err("%s: Cannot add spare devices to IMSM volume\n",
5531 devname);
5532 return 1;
5533 }
5534
5535 if (fd == -1) {
5536 /* we're doing autolayout so grab the pre-marked (in
5537 * validate_geometry) raid_disk
5538 */
5539 for (dl = super->disks; dl; dl = dl->next)
5540 if (dl->raiddisk == dk->raid_disk)
5541 break;
5542 } else {
5543 for (dl = super->disks; dl ; dl = dl->next)
5544 if (dl->major == dk->major &&
5545 dl->minor == dk->minor)
5546 break;
5547 }
5548
5549 if (!dl) {
5550 pr_err("%s is not a member of the same container\n", devname);
5551 return 1;
5552 }
5553
5554 if (!drive_validate_sector_size(super, dl)) {
5555 pr_err("Combining drives of different sector size in one volume is not allowed\n");
5556 return 1;
5557 }
5558
5559 /* add a pristine spare to the metadata */
5560 if (dl->index < 0) {
5561 dl->index = super->anchor->num_disks;
5562 super->anchor->num_disks++;
5563 }
5564 /* Check the device has not already been added */
5565 slot = get_imsm_disk_slot(map, dl->index);
5566 if (slot >= 0 &&
5567 (get_imsm_ord_tbl_ent(dev, slot, MAP_X) & IMSM_ORD_REBUILD) == 0) {
5568 pr_err("%s has been included in this array twice\n",
5569 devname);
5570 return 1;
5571 }
5572 set_imsm_ord_tbl_ent(map, dk->raid_disk, dl->index);
5573 dl->disk.status = CONFIGURED_DISK;
5574
5575 /* update size of 'missing' disks to be at least as large as the
5576 * largest acitve member (we only have dummy missing disks when
5577 * creating the first volume)
5578 */
5579 if (super->current_vol == 0) {
5580 for (df = super->missing; df; df = df->next) {
5581 if (total_blocks(&dl->disk) > total_blocks(&df->disk))
5582 set_total_blocks(&df->disk, total_blocks(&dl->disk));
5583 _disk = __get_imsm_disk(mpb, df->index);
5584 *_disk = df->disk;
5585 }
5586 }
5587
5588 /* refresh unset/failed slots to point to valid 'missing' entries */
5589 for (df = super->missing; df; df = df->next)
5590 for (slot = 0; slot < mpb->num_disks; slot++) {
5591 __u32 ord = get_imsm_ord_tbl_ent(dev, slot, MAP_X);
5592
5593 if ((ord & IMSM_ORD_REBUILD) == 0)
5594 continue;
5595 set_imsm_ord_tbl_ent(map, slot, df->index | IMSM_ORD_REBUILD);
5596 if (is_gen_migration(dev)) {
5597 struct imsm_map *map2 = get_imsm_map(dev,
5598 MAP_1);
5599 int slot2 = get_imsm_disk_slot(map2, df->index);
5600 if (slot2 < map2->num_members && slot2 >= 0) {
5601 __u32 ord2 = get_imsm_ord_tbl_ent(dev,
5602 slot2,
5603 MAP_1);
5604 if ((unsigned)df->index ==
5605 ord_to_idx(ord2))
5606 set_imsm_ord_tbl_ent(map2,
5607 slot2,
5608 df->index |
5609 IMSM_ORD_REBUILD);
5610 }
5611 }
5612 dprintf("set slot:%d to missing disk:%d\n", slot, df->index);
5613 break;
5614 }
5615
5616 /* if we are creating the first raid device update the family number */
5617 if (super->current_vol == 0) {
5618 __u32 sum;
5619 struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
5620
5621 _disk = __get_imsm_disk(mpb, dl->index);
5622 if (!_dev || !_disk) {
5623 pr_err("BUG mpb setup error\n");
5624 return 1;
5625 }
5626 *_dev = *dev;
5627 *_disk = dl->disk;
5628 sum = random32();
5629 sum += __gen_imsm_checksum(mpb);
5630 mpb->family_num = __cpu_to_le32(sum);
5631 mpb->orig_family_num = mpb->family_num;
5632 }
5633 super->current_disk = dl;
5634 return 0;
5635 }
5636
5637 /* mark_spare()
5638 * Function marks disk as spare and restores disk serial
5639 * in case it was previously marked as failed by takeover operation
5640 * reruns:
5641 * -1 : critical error
5642 * 0 : disk is marked as spare but serial is not set
5643 * 1 : success
5644 */
5645 int mark_spare(struct dl *disk)
5646 {
5647 __u8 serial[MAX_RAID_SERIAL_LEN];
5648 int ret_val = -1;
5649
5650 if (!disk)
5651 return ret_val;
5652
5653 ret_val = 0;
5654 if (!imsm_read_serial(disk->fd, NULL, serial)) {
5655 /* Restore disk serial number, because takeover marks disk
5656 * as failed and adds to serial ':0' before it becomes
5657 * a spare disk.
5658 */
5659 serialcpy(disk->serial, serial);
5660 serialcpy(disk->disk.serial, serial);
5661 ret_val = 1;
5662 }
5663 disk->disk.status = SPARE_DISK;
5664 disk->index = -1;
5665
5666 return ret_val;
5667 }
5668
5669 static int add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
5670 int fd, char *devname,
5671 unsigned long long data_offset)
5672 {
5673 struct intel_super *super = st->sb;
5674 struct dl *dd;
5675 unsigned long long size;
5676 unsigned int member_sector_size;
5677 __u32 id;
5678 int rv;
5679 struct stat stb;
5680
5681 /* If we are on an RAID enabled platform check that the disk is
5682 * attached to the raid controller.
5683 * We do not need to test disks attachment for container based additions,
5684 * they shall be already tested when container was created/assembled.
5685 */
5686 rv = find_intel_hba_capability(fd, super, devname);
5687 /* no orom/efi or non-intel hba of the disk */
5688 if (rv != 0) {
5689 dprintf("capability: %p fd: %d ret: %d\n",
5690 super->orom, fd, rv);
5691 return 1;
5692 }
5693
5694 if (super->current_vol >= 0)
5695 return add_to_super_imsm_volume(st, dk, fd, devname);
5696
5697 fstat(fd, &stb);
5698 dd = xcalloc(sizeof(*dd), 1);
5699 dd->major = major(stb.st_rdev);
5700 dd->minor = minor(stb.st_rdev);
5701 dd->devname = devname ? xstrdup(devname) : NULL;
5702 dd->fd = fd;
5703 dd->e = NULL;
5704 dd->action = DISK_ADD;
5705 rv = imsm_read_serial(fd, devname, dd->serial);
5706 if (rv) {
5707 pr_err("failed to retrieve scsi serial, aborting\n");
5708 if (dd->devname)
5709 free(dd->devname);
5710 free(dd);
5711 abort();
5712 }
5713 if (super->hba && ((super->hba->type == SYS_DEV_NVME) ||
5714 (super->hba->type == SYS_DEV_VMD))) {
5715 int i;
5716 char *devpath = diskfd_to_devpath(fd);
5717 char controller_path[PATH_MAX];
5718
5719 if (!devpath) {
5720 pr_err("failed to get devpath, aborting\n");
5721 if (dd->devname)
5722 free(dd->devname);
5723 free(dd);
5724 return 1;
5725 }
5726
5727 snprintf(controller_path, PATH_MAX-1, "%s/device", devpath);
5728 free(devpath);
5729
5730 if (devpath_to_vendor(controller_path) == 0x8086) {
5731 /*
5732 * If Intel's NVMe drive has serial ended with
5733 * "-A","-B","-1" or "-2" it means that this is "x8"
5734 * device (double drive on single PCIe card).
5735 * User should be warned about potential data loss.
5736 */
5737 for (i = MAX_RAID_SERIAL_LEN-1; i > 0; i--) {
5738 /* Skip empty character at the end */
5739 if (dd->serial[i] == 0)
5740 continue;
5741
5742 if (((dd->serial[i] == 'A') ||
5743 (dd->serial[i] == 'B') ||
5744 (dd->serial[i] == '1') ||
5745 (dd->serial[i] == '2')) &&
5746 (dd->serial[i-1] == '-'))
5747 pr_err("\tThe action you are about to take may put your data at risk.\n"
5748 "\tPlease note that x8 devices may consist of two separate x4 devices "
5749 "located on a single PCIe port.\n"
5750 "\tRAID 0 is the only supported configuration for this type of x8 device.\n");
5751 break;
5752 }
5753 } else if (super->hba->type == SYS_DEV_VMD && super->orom &&
5754 !imsm_orom_has_tpv_support(super->orom)) {
5755 pr_err("\tPlatform configuration does not support non-Intel NVMe drives.\n"
5756 "\tPlease refer to Intel(R) RSTe user guide.\n");
5757 free(dd->devname);
5758 free(dd);
5759 return 1;
5760 }
5761 }
5762
5763 get_dev_size(fd, NULL, &size);
5764 get_dev_sector_size(fd, NULL, &member_sector_size);
5765
5766 if (super->sector_size == 0) {
5767 /* this a first device, so sector_size is not set yet */
5768 super->sector_size = member_sector_size;
5769 }
5770
5771 /* clear migr_rec when adding disk to container */
5772 memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
5773 if (lseek64(fd, size - MIGR_REC_SECTOR_POSITION*member_sector_size,
5774 SEEK_SET) >= 0) {
5775 if ((unsigned int)write(fd, super->migr_rec_buf,
5776 MIGR_REC_BUF_SECTORS*member_sector_size) !=
5777 MIGR_REC_BUF_SECTORS*member_sector_size)
5778 perror("Write migr_rec failed");
5779 }
5780
5781 size /= 512;
5782 serialcpy(dd->disk.serial, dd->serial);
5783 set_total_blocks(&dd->disk, size);
5784 if (__le32_to_cpu(dd->disk.total_blocks_hi) > 0) {
5785 struct imsm_super *mpb = super->anchor;
5786 mpb->attributes |= MPB_ATTRIB_2TB_DISK;
5787 }
5788 mark_spare(dd);
5789 if (sysfs_disk_to_scsi_id(fd, &id) == 0)
5790 dd->disk.scsi_id = __cpu_to_le32(id);
5791 else
5792 dd->disk.scsi_id = __cpu_to_le32(0);
5793
5794 if (st->update_tail) {
5795 dd->next = super->disk_mgmt_list;
5796 super->disk_mgmt_list = dd;
5797 } else {
5798 dd->next = super->disks;
5799 super->disks = dd;
5800 super->updates_pending++;
5801 }
5802
5803 return 0;
5804 }
5805
5806 static int remove_from_super_imsm(struct supertype *st, mdu_disk_info_t *dk)
5807 {
5808 struct intel_super *super = st->sb;
5809 struct dl *dd;
5810
5811 /* remove from super works only in mdmon - for communication
5812 * manager - monitor. Check if communication memory buffer
5813 * is prepared.
5814 */
5815 if (!st->update_tail) {
5816 pr_err("shall be used in mdmon context only\n");
5817 return 1;
5818 }
5819 dd = xcalloc(1, sizeof(*dd));
5820 dd->major = dk->major;
5821 dd->minor = dk->minor;
5822 dd->fd = -1;
5823 mark_spare(dd);
5824 dd->action = DISK_REMOVE;
5825
5826 dd->next = super->disk_mgmt_list;
5827 super->disk_mgmt_list = dd;
5828
5829 return 0;
5830 }
5831
5832 static int store_imsm_mpb(int fd, struct imsm_super *mpb);
5833
5834 static union {
5835 char buf[MAX_SECTOR_SIZE];
5836 struct imsm_super anchor;
5837 } spare_record __attribute__ ((aligned(MAX_SECTOR_SIZE)));
5838
5839 /* spare records have their own family number and do not have any defined raid
5840 * devices
5841 */
5842 static int write_super_imsm_spares(struct intel_super *super, int doclose)
5843 {
5844 struct imsm_super *mpb = super->anchor;
5845 struct imsm_super *spare = &spare_record.anchor;
5846 __u32 sum;
5847 struct dl *d;
5848
5849 spare->mpb_size = __cpu_to_le32(sizeof(struct imsm_super));
5850 spare->generation_num = __cpu_to_le32(1UL);
5851 spare->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
5852 spare->num_disks = 1;
5853 spare->num_raid_devs = 0;
5854 spare->cache_size = mpb->cache_size;
5855 spare->pwr_cycle_count = __cpu_to_le32(1);
5856
5857 snprintf((char *) spare->sig, MAX_SIGNATURE_LENGTH,
5858 MPB_SIGNATURE MPB_VERSION_RAID0);
5859
5860 for (d = super->disks; d; d = d->next) {
5861 if (d->index != -1)
5862 continue;
5863
5864 spare->disk[0] = d->disk;
5865 if (__le32_to_cpu(d->disk.total_blocks_hi) > 0)
5866 spare->attributes |= MPB_ATTRIB_2TB_DISK;
5867
5868 if (super->sector_size == 4096)
5869 convert_to_4k_imsm_disk(&spare->disk[0]);
5870
5871 sum = __gen_imsm_checksum(spare);
5872 spare->family_num = __cpu_to_le32(sum);
5873 spare->orig_family_num = 0;
5874 sum = __gen_imsm_checksum(spare);
5875 spare->check_sum = __cpu_to_le32(sum);
5876
5877 if (store_imsm_mpb(d->fd, spare)) {
5878 pr_err("failed for device %d:%d %s\n",
5879 d->major, d->minor, strerror(errno));
5880 return 1;
5881 }
5882 if (doclose) {
5883 close(d->fd);
5884 d->fd = -1;
5885 }
5886 }
5887
5888 return 0;
5889 }
5890
5891 static int write_super_imsm(struct supertype *st, int doclose)
5892 {
5893 struct intel_super *super = st->sb;
5894 unsigned int sector_size = super->sector_size;
5895 struct imsm_super *mpb = super->anchor;
5896 struct dl *d;
5897 __u32 generation;
5898 __u32 sum;
5899 int spares = 0;
5900 int i;
5901 __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk);
5902 int num_disks = 0;
5903 int clear_migration_record = 1;
5904 __u32 bbm_log_size;
5905
5906 /* 'generation' is incremented everytime the metadata is written */
5907 generation = __le32_to_cpu(mpb->generation_num);
5908 generation++;
5909 mpb->generation_num = __cpu_to_le32(generation);
5910
5911 /* fix up cases where previous mdadm releases failed to set
5912 * orig_family_num
5913 */
5914 if (mpb->orig_family_num == 0)
5915 mpb->orig_family_num = mpb->family_num;
5916
5917 for (d = super->disks; d; d = d->next) {
5918 if (d->index == -1)
5919 spares++;
5920 else {
5921 mpb->disk[d->index] = d->disk;
5922 num_disks++;
5923 }
5924 }
5925 for (d = super->missing; d; d = d->next) {
5926 mpb->disk[d->index] = d->disk;
5927 num_disks++;
5928 }
5929 mpb->num_disks = num_disks;
5930 mpb_size += sizeof(struct imsm_disk) * mpb->num_disks;
5931
5932 for (i = 0; i < mpb->num_raid_devs; i++) {
5933 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
5934 struct imsm_dev *dev2 = get_imsm_dev(super, i);
5935 if (dev && dev2) {
5936 imsm_copy_dev(dev, dev2);
5937 mpb_size += sizeof_imsm_dev(dev, 0);
5938 }
5939 if (is_gen_migration(dev2))
5940 clear_migration_record = 0;
5941 }
5942
5943 bbm_log_size = get_imsm_bbm_log_size(super->bbm_log);
5944
5945 if (bbm_log_size) {
5946 memcpy((void *)mpb + mpb_size, super->bbm_log, bbm_log_size);
5947 mpb->attributes |= MPB_ATTRIB_BBM;
5948 } else
5949 mpb->attributes &= ~MPB_ATTRIB_BBM;
5950
5951 super->anchor->bbm_log_size = __cpu_to_le32(bbm_log_size);
5952 mpb_size += bbm_log_size;
5953 mpb->mpb_size = __cpu_to_le32(mpb_size);
5954
5955 #ifdef DEBUG
5956 assert(super->len == 0 || mpb_size <= super->len);
5957 #endif
5958
5959 /* recalculate checksum */
5960 sum = __gen_imsm_checksum(mpb);
5961 mpb->check_sum = __cpu_to_le32(sum);
5962
5963 if (super->clean_migration_record_by_mdmon) {
5964 clear_migration_record = 1;
5965 super->clean_migration_record_by_mdmon = 0;
5966 }
5967 if (clear_migration_record)
5968 memset(super->migr_rec_buf, 0,
5969 MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
5970
5971 if (sector_size == 4096)
5972 convert_to_4k(super);
5973
5974 /* write the mpb for disks that compose raid devices */
5975 for (d = super->disks; d ; d = d->next) {
5976 if (d->index < 0 || is_failed(&d->disk))
5977 continue;
5978
5979 if (clear_migration_record) {
5980 unsigned long long dsize;
5981
5982 get_dev_size(d->fd, NULL, &dsize);
5983 if (lseek64(d->fd, dsize - sector_size,
5984 SEEK_SET) >= 0) {
5985 if ((unsigned int)write(d->fd,
5986 super->migr_rec_buf,
5987 MIGR_REC_BUF_SECTORS*sector_size) !=
5988 MIGR_REC_BUF_SECTORS*sector_size)
5989 perror("Write migr_rec failed");
5990 }
5991 }
5992
5993 if (store_imsm_mpb(d->fd, mpb))
5994 fprintf(stderr,
5995 "failed for device %d:%d (fd: %d)%s\n",
5996 d->major, d->minor,
5997 d->fd, strerror(errno));
5998
5999 if (doclose) {
6000 close(d->fd);
6001 d->fd = -1;
6002 }
6003 }
6004
6005 if (spares)
6006 return write_super_imsm_spares(super, doclose);
6007
6008 return 0;
6009 }
6010
6011 static int create_array(struct supertype *st, int dev_idx)
6012 {
6013 size_t len;
6014 struct imsm_update_create_array *u;
6015 struct intel_super *super = st->sb;
6016 struct imsm_dev *dev = get_imsm_dev(super, dev_idx);
6017 struct imsm_map *map = get_imsm_map(dev, MAP_0);
6018 struct disk_info *inf;
6019 struct imsm_disk *disk;
6020 int i;
6021
6022 len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0) +
6023 sizeof(*inf) * map->num_members;
6024 u = xmalloc(len);
6025 u->type = update_create_array;
6026 u->dev_idx = dev_idx;
6027 imsm_copy_dev(&u->dev, dev);
6028 inf = get_disk_info(u);
6029 for (i = 0; i < map->num_members; i++) {
6030 int idx = get_imsm_disk_idx(dev, i, MAP_X);
6031
6032 disk = get_imsm_disk(super, idx);
6033 if (!disk)
6034 disk = get_imsm_missing(super, idx);
6035 serialcpy(inf[i].serial, disk->serial);
6036 }
6037 append_metadata_update(st, u, len);
6038
6039 return 0;
6040 }
6041
6042 static int mgmt_disk(struct supertype *st)
6043 {
6044 struct intel_super *super = st->sb;
6045 size_t len;
6046 struct imsm_update_add_remove_disk *u;
6047
6048 if (!super->disk_mgmt_list)
6049 return 0;
6050
6051 len = sizeof(*u);
6052 u = xmalloc(len);
6053 u->type = update_add_remove_disk;
6054 append_metadata_update(st, u, len);
6055
6056 return 0;
6057 }
6058
6059 __u32 crc32c_le(__u32 crc, unsigned char const *p, size_t len);
6060
6061 static int write_init_ppl_imsm(struct supertype *st, struct mdinfo *info, int fd)
6062 {
6063 struct intel_super *super = st->sb;
6064 void *buf;
6065 struct ppl_header *ppl_hdr;
6066 int ret;
6067
6068 /* first clear entire ppl space */
6069 ret = zero_disk_range(fd, info->ppl_sector, info->ppl_size);
6070 if (ret)
6071 return ret;
6072
6073 ret = posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE);
6074 if (ret) {
6075 pr_err("Failed to allocate PPL header buffer\n");
6076 return ret;
6077 }
6078
6079 memset(buf, 0, PPL_HEADER_SIZE);
6080 ppl_hdr = buf;
6081 memset(ppl_hdr->reserved, 0xff, PPL_HDR_RESERVED);
6082 ppl_hdr->signature = __cpu_to_le32(super->anchor->orig_family_num);
6083 ppl_hdr->checksum = __cpu_to_le32(~crc32c_le(~0, buf, PPL_HEADER_SIZE));
6084
6085 if (lseek64(fd, info->ppl_sector * 512, SEEK_SET) < 0) {
6086 ret = errno;
6087 perror("Failed to seek to PPL header location");
6088 }
6089
6090 if (!ret && write(fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
6091 ret = errno;
6092 perror("Write PPL header failed");
6093 }
6094
6095 if (!ret)
6096 fsync(fd);
6097
6098 free(buf);
6099 return ret;
6100 }
6101
6102 static int validate_ppl_imsm(struct supertype *st, struct mdinfo *info,
6103 struct mdinfo *disk)
6104 {
6105 struct intel_super *super = st->sb;
6106 struct dl *d;
6107 void *buf;
6108 int ret = 0;
6109 struct ppl_header *ppl_hdr;
6110 __u32 crc;
6111 struct imsm_dev *dev;
6112 struct imsm_map *map;
6113 __u32 idx;
6114 unsigned int i;
6115 unsigned long long ppl_offset = 0;
6116 unsigned long long prev_gen_num = 0;
6117
6118 if (disk->disk.raid_disk < 0)
6119 return 0;
6120
6121 if (posix_memalign(&buf, MAX_SECTOR_SIZE, PPL_HEADER_SIZE)) {
6122 pr_err("Failed to allocate PPL header buffer\n");
6123 return -1;
6124 }
6125
6126 dev = get_imsm_dev(super, info->container_member);
6127 map = get_imsm_map(dev, MAP_X);
6128 idx = get_imsm_disk_idx(dev, disk->disk.raid_disk, MAP_X);
6129 d = get_imsm_dl_disk(super, idx);
6130
6131 if (!d || d->index < 0 || is_failed(&d->disk))
6132 goto out;
6133
6134 ret = 1;
6135 while (ppl_offset < MULTIPLE_PPL_AREA_SIZE_IMSM) {
6136 dprintf("Checking potential PPL at offset: %llu\n", ppl_offset);
6137
6138 if (lseek64(d->fd, info->ppl_sector * 512 + ppl_offset,
6139 SEEK_SET) < 0) {
6140 perror("Failed to seek to PPL header location");
6141 ret = -1;
6142 goto out;
6143 }
6144
6145 if (read(d->fd, buf, PPL_HEADER_SIZE) != PPL_HEADER_SIZE) {
6146 perror("Read PPL header failed");
6147 ret = -1;
6148 goto out;
6149 }
6150
6151 ppl_hdr = buf;
6152
6153 crc = __le32_to_cpu(ppl_hdr->checksum);
6154 ppl_hdr->checksum = 0;
6155
6156 if (crc != ~crc32c_le(~0, buf, PPL_HEADER_SIZE)) {
6157 dprintf("Wrong PPL header checksum on %s\n",
6158 d->devname);
6159 goto out;
6160 }
6161
6162 if (prev_gen_num > __le64_to_cpu(ppl_hdr->generation)) {
6163 /* previous was newest, it was already checked */
6164 goto out;
6165 }
6166
6167 if ((__le32_to_cpu(ppl_hdr->signature) !=
6168 super->anchor->orig_family_num)) {
6169 dprintf("Wrong PPL header signature on %s\n",
6170 d->devname);
6171 ret = 1;
6172 goto out;
6173 }
6174
6175 ret = 0;
6176 prev_gen_num = __le64_to_cpu(ppl_hdr->generation);
6177
6178 ppl_offset += PPL_HEADER_SIZE;
6179 for (i = 0; i < __le32_to_cpu(ppl_hdr->entries_count); i++)
6180 ppl_offset +=
6181 __le32_to_cpu(ppl_hdr->entries[i].pp_size);
6182 }
6183
6184 out:
6185 free(buf);
6186
6187 if (ret == 1 && map->map_state == IMSM_T_STATE_UNINITIALIZED)
6188 return st->ss->write_init_ppl(st, info, d->fd);
6189
6190 return ret;
6191 }
6192
6193 static int write_init_ppl_imsm_all(struct supertype *st, struct mdinfo *info)
6194 {
6195 struct intel_super *super = st->sb;
6196 struct dl *d;
6197 int ret = 0;
6198
6199 if (info->consistency_policy != CONSISTENCY_POLICY_PPL ||
6200 info->array.level != 5)
6201 return 0;
6202
6203 for (d = super->disks; d ; d = d->next) {
6204 if (d->index < 0 || is_failed(&d->disk))
6205 continue;
6206
6207 ret = st->ss->write_init_ppl(st, info, d->fd);
6208 if (ret)
6209 break;
6210 }
6211
6212 return ret;
6213 }
6214
6215 static int write_init_super_imsm(struct supertype *st)
6216 {
6217 struct intel_super *super = st->sb;
6218 int current_vol = super->current_vol;
6219 int rv = 0;
6220 struct mdinfo info;
6221
6222 getinfo_super_imsm(st, &info, NULL);
6223
6224 /* we are done with current_vol reset it to point st at the container */
6225 super->current_vol = -1;
6226
6227 if (st->update_tail) {
6228 /* queue the recently created array / added disk
6229 * as a metadata update */
6230
6231 /* determine if we are creating a volume or adding a disk */
6232 if (current_vol < 0) {
6233 /* in the mgmt (add/remove) disk case we are running
6234 * in mdmon context, so don't close fd's
6235 */
6236 rv = mgmt_disk(st);
6237 } else {
6238 rv = write_init_ppl_imsm_all(st, &info);
6239 if (!rv)
6240 rv = create_array(st, current_vol);
6241 }
6242 } else {
6243 struct dl *d;
6244 for (d = super->disks; d; d = d->next)
6245 Kill(d->devname, NULL, 0, -1, 1);
6246 if (current_vol >= 0)
6247 rv = write_init_ppl_imsm_all(st, &info);
6248 if (!rv)
6249 rv = write_super_imsm(st, 1);
6250 }
6251
6252 return rv;
6253 }
6254
6255 static int store_super_imsm(struct supertype *st, int fd)
6256 {
6257 struct intel_super *super = st->sb;
6258 struct imsm_super *mpb = super ? super->anchor : NULL;
6259
6260 if (!mpb)
6261 return 1;
6262
6263 if (super->sector_size == 4096)
6264 convert_to_4k(super);
6265 return store_imsm_mpb(fd, mpb);
6266 }
6267
6268 static int validate_geometry_imsm_container(struct supertype *st, int level,
6269 int layout, int raiddisks, int chunk,
6270 unsigned long long size,
6271 unsigned long long data_offset,
6272 char *dev,
6273 unsigned long long *freesize,
6274 int verbose)
6275 {
6276 int fd;
6277 unsigned long long ldsize;
6278 struct intel_super *super;
6279 int rv = 0;
6280
6281 if (level != LEVEL_CONTAINER)
6282 return 0;
6283 if (!dev)
6284 return 1;
6285
6286 fd = open(dev, O_RDONLY|O_EXCL, 0);
6287 if (fd < 0) {
6288 if (verbose > 0)
6289 pr_err("imsm: Cannot open %s: %s\n",
6290 dev, strerror(errno));
6291 return 0;
6292 }
6293 if (!get_dev_size(fd, dev, &ldsize)) {
6294 close(fd);
6295 return 0;
6296 }
6297
6298 /* capabilities retrieve could be possible
6299 * note that there is no fd for the disks in array.
6300 */
6301 super = alloc_super();
6302 if (!super) {
6303 close(fd);
6304 return 0;
6305 }
6306 if (!get_dev_sector_size(fd, NULL, &super->sector_size)) {
6307 close(fd);
6308 free_imsm(super);
6309 return 0;
6310 }
6311
6312 rv = find_intel_hba_capability(fd, super, verbose > 0 ? dev : NULL);
6313 if (rv != 0) {
6314 #if DEBUG
6315 char str[256];
6316 fd2devname(fd, str);
6317 dprintf("fd: %d %s orom: %p rv: %d raiddisk: %d\n",
6318 fd, str, super->orom, rv, raiddisks);
6319 #endif
6320 /* no orom/efi or non-intel hba of the disk */
6321 close(fd);
6322 free_imsm(super);
6323 return 0;
6324 }
6325 close(fd);
6326 if (super->orom) {
6327 if (raiddisks > super->orom->tds) {
6328 if (verbose)
6329 pr_err("%d exceeds maximum number of platform supported disks: %d\n",
6330 raiddisks, super->orom->tds);
6331 free_imsm(super);
6332 return 0;
6333 }
6334 if ((super->orom->attr & IMSM_OROM_ATTR_2TB_DISK) == 0 &&
6335 (ldsize >> 9) >> 32 > 0) {
6336 if (verbose)
6337 pr_err("%s exceeds maximum platform supported size\n", dev);
6338 free_imsm(super);
6339 return 0;
6340 }
6341 }
6342
6343 *freesize = avail_size_imsm(st, ldsize >> 9, data_offset);
6344 free_imsm(super);
6345
6346 return 1;
6347 }
6348
6349 static unsigned long long find_size(struct extent *e, int *idx, int num_extents)
6350 {
6351 const unsigned long long base_start = e[*idx].start;
6352 unsigned long long end = base_start + e[*idx].size;
6353 int i;
6354
6355 if (base_start == end)
6356 return 0;
6357
6358 *idx = *idx + 1;
6359 for (i = *idx; i < num_extents; i++) {
6360 /* extend overlapping extents */
6361 if (e[i].start >= base_start &&
6362 e[i].start <= end) {
6363 if (e[i].size == 0)
6364 return 0;
6365 if (e[i].start + e[i].size > end)
6366 end = e[i].start + e[i].size;
6367 } else if (e[i].start > end) {
6368 *idx = i;
6369 break;
6370 }
6371 }
6372
6373 return end - base_start;
6374 }
6375
6376 static unsigned long long merge_extents(struct intel_super *super, int sum_extents)
6377 {
6378 /* build a composite disk with all known extents and generate a new
6379 * 'maxsize' given the "all disks in an array must share a common start
6380 * offset" constraint
6381 */
6382 struct extent *e = xcalloc(sum_extents, sizeof(*e));
6383 struct dl *dl;
6384 int i, j;
6385 int start_extent;
6386 unsigned long long pos;
6387 unsigned long long start = 0;
6388 unsigned long long maxsize;
6389 unsigned long reserve;
6390
6391 /* coalesce and sort all extents. also, check to see if we need to
6392 * reserve space between member arrays
6393 */
6394 j = 0;
6395 for (dl = super->disks; dl; dl = dl->next) {
6396 if (!dl->e)
6397 continue;
6398 for (i = 0; i < dl->extent_cnt; i++)
6399 e[j++] = dl->e[i];
6400 }
6401 qsort(e, sum_extents, sizeof(*e), cmp_extent);
6402
6403 /* merge extents */
6404 i = 0;
6405 j = 0;
6406 while (i < sum_extents) {
6407 e[j].start = e[i].start;
6408 e[j].size = find_size(e, &i, sum_extents);
6409 j++;
6410 if (e[j-1].size == 0)
6411 break;
6412 }
6413
6414 pos = 0;
6415 maxsize = 0;
6416 start_extent = 0;
6417 i = 0;
6418 do {
6419 unsigned long long esize;
6420
6421 esize = e[i].start - pos;
6422 if (esize >= maxsize) {
6423 maxsize = esize;
6424 start = pos;
6425 start_extent = i;
6426 }
6427 pos = e[i].start + e[i].size;
6428 i++;
6429 } while (e[i-1].size);
6430 free(e);
6431
6432 if (maxsize == 0)
6433 return 0;
6434
6435 /* FIXME assumes volume at offset 0 is the first volume in a
6436 * container
6437 */
6438 if (start_extent > 0)
6439 reserve = IMSM_RESERVED_SECTORS; /* gap between raid regions */
6440 else
6441 reserve = 0;
6442
6443 if (maxsize < reserve)
6444 return 0;
6445
6446 super->create_offset = ~((unsigned long long) 0);
6447 if (start + reserve > super->create_offset)
6448 return 0; /* start overflows create_offset */
6449 super->create_offset = start + reserve;
6450
6451 return maxsize - reserve;
6452 }
6453
6454 static int is_raid_level_supported(const struct imsm_orom *orom, int level, int raiddisks)
6455 {
6456 if (level < 0 || level == 6 || level == 4)
6457 return 0;
6458
6459 /* if we have an orom prevent invalid raid levels */
6460 if (orom)
6461 switch (level) {
6462 case 0: return imsm_orom_has_raid0(orom);
6463 case 1:
6464 if (raiddisks > 2)
6465 return imsm_orom_has_raid1e(orom);
6466 return imsm_orom_has_raid1(orom) && raiddisks == 2;
6467 case 10: return imsm_orom_has_raid10(orom) && raiddisks == 4;
6468 case 5: return imsm_orom_has_raid5(orom) && raiddisks > 2;
6469 }
6470 else
6471 return 1; /* not on an Intel RAID platform so anything goes */
6472
6473 return 0;
6474 }
6475
6476 static int
6477 active_arrays_by_format(char *name, char* hba, struct md_list **devlist,
6478 int dpa, int verbose)
6479 {
6480 struct mdstat_ent *mdstat = mdstat_read(0, 0);
6481 struct mdstat_ent *memb;
6482 int count = 0;
6483 int num = 0;
6484 struct md_list *dv;
6485 int found;
6486
6487 for (memb = mdstat ; memb ; memb = memb->next) {
6488 if (memb->metadata_version &&
6489 (strncmp(memb->metadata_version, "external:", 9) == 0) &&
6490 (strcmp(&memb->metadata_version[9], name) == 0) &&
6491 !is_subarray(memb->metadata_version+9) &&
6492 memb->members) {
6493 struct dev_member *dev = memb->members;
6494 int fd = -1;
6495 while(dev && (fd < 0)) {
6496 char *path = xmalloc(strlen(dev->name) + strlen("/dev/") + 1);
6497 num = sprintf(path, "%s%s", "/dev/", dev->name);
6498 if (num > 0)
6499 fd = open(path, O_RDONLY, 0);
6500 if (num <= 0 || fd < 0) {
6501 pr_vrb("Cannot open %s: %s\n",
6502 dev->name, strerror(errno));
6503 }
6504 free(path);
6505 dev = dev->next;
6506 }
6507 found = 0;
6508 if (fd >= 0 && disk_attached_to_hba(fd, hba)) {
6509 struct mdstat_ent *vol;
6510 for (vol = mdstat ; vol ; vol = vol->next) {
6511 if (vol->active > 0 &&
6512 vol->metadata_version &&
6513 is_container_member(vol, memb->devnm)) {
6514 found++;
6515 count++;
6516 }
6517 }
6518 if (*devlist && (found < dpa)) {
6519 dv = xcalloc(1, sizeof(*dv));
6520 dv->devname = xmalloc(strlen(memb->devnm) + strlen("/dev/") + 1);
6521 sprintf(dv->devname, "%s%s", "/dev/", memb->devnm);
6522 dv->found = found;
6523 dv->used = 0;
6524 dv->next = *devlist;
6525 *devlist = dv;
6526 }
6527 }
6528 if (fd >= 0)
6529 close(fd);
6530 }
6531 }
6532 free_mdstat(mdstat);
6533 return count;
6534 }
6535
6536 #ifdef DEBUG_LOOP
6537 static struct md_list*
6538 get_loop_devices(void)
6539 {
6540 int i;
6541 struct md_list *devlist = NULL;
6542 struct md_list *dv;
6543
6544 for(i = 0; i < 12; i++) {
6545 dv = xcalloc(1, sizeof(*dv));
6546 dv->devname = xmalloc(40);
6547 sprintf(dv->devname, "/dev/loop%d", i);
6548 dv->next = devlist;
6549 devlist = dv;
6550 }
6551 return devlist;
6552 }
6553 #endif
6554
6555 static struct md_list*
6556 get_devices(const char *hba_path)
6557 {
6558 struct md_list *devlist = NULL;
6559 struct md_list *dv;
6560 struct dirent *ent;
6561 DIR *dir;
6562 int err = 0;
6563
6564 #if DEBUG_LOOP
6565 devlist = get_loop_devices();
6566 return devlist;
6567 #endif
6568 /* scroll through /sys/dev/block looking for devices attached to
6569 * this hba
6570 */
6571 dir = opendir("/sys/dev/block");
6572 for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
6573 int fd;
6574 char buf[1024];
6575 int major, minor;
6576 char *path = NULL;
6577 if (sscanf(ent->d_name, "%d:%d", &major, &minor) != 2)
6578 continue;
6579 path = devt_to_devpath(makedev(major, minor));
6580 if (!path)
6581 continue;
6582 if (!path_attached_to_hba(path, hba_path)) {
6583 free(path);
6584 path = NULL;
6585 continue;
6586 }
6587 free(path);
6588 path = NULL;
6589 fd = dev_open(ent->d_name, O_RDONLY);
6590 if (fd >= 0) {
6591 fd2devname(fd, buf);
6592 close(fd);
6593 } else {
6594 pr_err("cannot open device: %s\n",
6595 ent->d_name);
6596 continue;
6597 }
6598
6599 dv = xcalloc(1, sizeof(*dv));
6600 dv->devname = xstrdup(buf);
6601 dv->next = devlist;
6602 devlist = dv;
6603 }
6604 if (err) {
6605 while(devlist) {
6606 dv = devlist;
6607 devlist = devlist->next;
6608 free(dv->devname);
6609 free(dv);
6610 }
6611 }
6612 closedir(dir);
6613 return devlist;
6614 }
6615
6616 static int
6617 count_volumes_list(struct md_list *devlist, char *homehost,
6618 int verbose, int *found)
6619 {
6620 struct md_list *tmpdev;
6621 int count = 0;
6622 struct supertype *st;
6623
6624 /* first walk the list of devices to find a consistent set
6625 * that match the criterea, if that is possible.
6626 * We flag the ones we like with 'used'.
6627 */
6628 *found = 0;
6629 st = match_metadata_desc_imsm("imsm");
6630 if (st == NULL) {
6631 pr_vrb("cannot allocate memory for imsm supertype\n");
6632 return 0;
6633 }
6634
6635 for (tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
6636 char *devname = tmpdev->devname;
6637 dev_t rdev;
6638 struct supertype *tst;
6639 int dfd;
6640 if (tmpdev->used > 1)
6641 continue;
6642 tst = dup_super(st);
6643 if (tst == NULL) {
6644 pr_vrb("cannot allocate memory for imsm supertype\n");
6645 goto err_1;
6646 }
6647 tmpdev->container = 0;
6648 dfd = dev_open(devname, O_RDONLY|O_EXCL);
6649 if (dfd < 0) {
6650 dprintf("cannot open device %s: %s\n",
6651 devname, strerror(errno));
6652 tmpdev->used = 2;
6653 } else if (!fstat_is_blkdev(dfd, devname, &rdev)) {
6654 tmpdev->used = 2;
6655 } else if (must_be_container(dfd)) {
6656 struct supertype *cst;
6657 cst = super_by_fd(dfd, NULL);
6658 if (cst == NULL) {
6659 dprintf("cannot recognize container type %s\n",
6660 devname);
6661 tmpdev->used = 2;
6662 } else if (tst->ss != st->ss) {
6663 dprintf("non-imsm container - ignore it: %s\n",
6664 devname);
6665 tmpdev->used = 2;
6666 } else if (!tst->ss->load_container ||
6667 tst->ss->load_container(tst, dfd, NULL))
6668 tmpdev->used = 2;
6669 else {
6670 tmpdev->container = 1;
6671 }
6672 if (cst)
6673 cst->ss->free_super(cst);
6674 } else {
6675 tmpdev->st_rdev = rdev;
6676 if (tst->ss->load_super(tst,dfd, NULL)) {
6677 dprintf("no RAID superblock on %s\n",
6678 devname);
6679 tmpdev->used = 2;
6680 } else if (tst->ss->compare_super == NULL) {
6681 dprintf("Cannot assemble %s metadata on %s\n",
6682 tst->ss->name, devname);
6683 tmpdev->used = 2;
6684 }
6685 }
6686 if (dfd >= 0)
6687 close(dfd);
6688 if (tmpdev->used == 2 || tmpdev->used == 4) {
6689 /* Ignore unrecognised devices during auto-assembly */
6690 goto loop;
6691 }
6692 else {
6693 struct mdinfo info;
6694 tst->ss->getinfo_super(tst, &info, NULL);
6695
6696 if (st->minor_version == -1)
6697 st->minor_version = tst->minor_version;
6698
6699 if (memcmp(info.uuid, uuid_zero,
6700 sizeof(int[4])) == 0) {
6701 /* this is a floating spare. It cannot define
6702 * an array unless there are no more arrays of
6703 * this type to be found. It can be included
6704 * in an array of this type though.
6705 */
6706 tmpdev->used = 3;
6707 goto loop;
6708 }
6709
6710 if (st->ss != tst->ss ||
6711 st->minor_version != tst->minor_version ||
6712 st->ss->compare_super(st, tst) != 0) {
6713 /* Some mismatch. If exactly one array matches this host,
6714 * we can resolve on that one.
6715 * Or, if we are auto assembling, we just ignore the second
6716 * for now.
6717 */
6718 dprintf("superblock on %s doesn't match others - assembly aborted\n",
6719 devname);
6720 goto loop;
6721 }
6722 tmpdev->used = 1;
6723 *found = 1;
6724 dprintf("found: devname: %s\n", devname);
6725 }
6726 loop:
6727 if (tst)
6728 tst->ss->free_super(tst);
6729 }
6730 if (*found != 0) {
6731 int err;
6732 if ((err = load_super_imsm_all(st, -1, &st->sb, NULL, devlist, 0)) == 0) {
6733 struct mdinfo *iter, *head = st->ss->container_content(st, NULL);
6734 for (iter = head; iter; iter = iter->next) {
6735 dprintf("content->text_version: %s vol\n",
6736 iter->text_version);
6737 if (iter->array.state & (1<<MD_SB_BLOCK_VOLUME)) {
6738 /* do not assemble arrays with unsupported
6739 configurations */
6740 dprintf("Cannot activate member %s.\n",
6741 iter->text_version);
6742 } else
6743 count++;
6744 }
6745 sysfs_free(head);
6746
6747 } else {
6748 dprintf("No valid super block on device list: err: %d %p\n",
6749 err, st->sb);
6750 }
6751 } else {
6752 dprintf("no more devices to examine\n");
6753 }
6754
6755 for (tmpdev = devlist; tmpdev; tmpdev = tmpdev->next) {
6756 if (tmpdev->used == 1 && tmpdev->found) {
6757 if (count) {
6758 if (count < tmpdev->found)
6759 count = 0;
6760 else
6761 count -= tmpdev->found;
6762 }
6763 }
6764 if (tmpdev->used == 1)
6765 tmpdev->used = 4;
6766 }
6767 err_1:
6768 if (st)
6769 st->ss->free_super(st);
6770 return count;
6771 }
6772
6773 static int __count_volumes(char *hba_path, int dpa, int verbose,
6774 int cmp_hba_path)
6775 {
6776 struct sys_dev *idev, *intel_devices = find_intel_devices();
6777 int count = 0;
6778 const struct orom_entry *entry;
6779 struct devid_list *dv, *devid_list;
6780
6781 if (!hba_path)
6782 return 0;
6783
6784 for (idev = intel_devices; idev; idev = idev->next) {
6785 if (strstr(idev->path, hba_path))
6786 break;
6787 }
6788
6789 if (!idev || !idev->dev_id)
6790 return 0;
6791
6792 entry = get_orom_entry_by_device_id(idev->dev_id);
6793
6794 if (!entry || !entry->devid_list)
6795 return 0;
6796
6797 devid_list = entry->devid_list;
6798 for (dv = devid_list; dv; dv = dv->next) {
6799 struct md_list *devlist;
6800 struct sys_dev *device = NULL;
6801 char *hpath;
6802 int found = 0;
6803
6804 if (cmp_hba_path)
6805 device = device_by_id_and_path(dv->devid, hba_path);
6806 else
6807 device = device_by_id(dv->devid);
6808
6809 if (device)
6810 hpath = device->path;
6811 else
6812 return 0;
6813
6814 devlist = get_devices(hpath);
6815 /* if no intel devices return zero volumes */
6816 if (devlist == NULL)
6817 return 0;
6818
6819 count += active_arrays_by_format("imsm", hpath, &devlist, dpa,
6820 verbose);
6821 dprintf("path: %s active arrays: %d\n", hpath, count);
6822 if (devlist == NULL)
6823 return 0;
6824 do {
6825 found = 0;
6826 count += count_volumes_list(devlist,
6827 NULL,
6828 verbose,
6829 &found);
6830 dprintf("found %d count: %d\n", found, count);
6831 } while (found);
6832
6833 dprintf("path: %s total number of volumes: %d\n", hpath, count);
6834
6835 while (devlist) {
6836 struct md_list *dv = devlist;
6837 devlist = devlist->next;
6838 free(dv->devname);
6839 free(dv);
6840 }
6841 }
6842 return count;
6843 }
6844
6845 static int count_volumes(struct intel_hba *hba, int dpa, int verbose)
6846 {
6847 if (!hba)
6848 return 0;
6849 if (hba->type == SYS_DEV_VMD) {
6850 struct sys_dev *dev;
6851 int count = 0;
6852
6853 for (dev = find_intel_devices(); dev; dev = dev->next) {
6854 if (dev->type == SYS_DEV_VMD)
6855 count += __count_volumes(dev->path, dpa,
6856 verbose, 1);
6857 }
6858 return count;
6859 }
6860 return __count_volumes(hba->path, dpa, verbose, 0);
6861 }
6862
6863 static int imsm_default_chunk(const struct imsm_orom *orom)
6864 {
6865 /* up to 512 if the plaform supports it, otherwise the platform max.
6866 * 128 if no platform detected
6867 */
6868 int fs = max(7, orom ? fls(orom->sss) : 0);
6869
6870 return min(512, (1 << fs));
6871 }
6872
6873 static int
6874 validate_geometry_imsm_orom(struct intel_super *super, int level, int layout,
6875 int raiddisks, int *chunk, unsigned long long size, int verbose)
6876 {
6877 /* check/set platform and metadata limits/defaults */
6878 if (super->orom && raiddisks > super->orom->dpa) {
6879 pr_vrb("platform supports a maximum of %d disks per array\n",
6880 super->orom->dpa);
6881 return 0;
6882 }
6883
6884 /* capabilities of OROM tested - copied from validate_geometry_imsm_volume */
6885 if (!is_raid_level_supported(super->orom, level, raiddisks)) {
6886 pr_vrb("platform does not support raid%d with %d disk%s\n",
6887 level, raiddisks, raiddisks > 1 ? "s" : "");
6888 return 0;
6889 }
6890
6891 if (*chunk == 0 || *chunk == UnSet)
6892 *chunk = imsm_default_chunk(super->orom);
6893
6894 if (super->orom && !imsm_orom_has_chunk(super->orom, *chunk)) {
6895 pr_vrb("platform does not support a chunk size of: %d\n", *chunk);
6896 return 0;
6897 }
6898
6899 if (layout != imsm_level_to_layout(level)) {
6900 if (level == 5)
6901 pr_vrb("imsm raid 5 only supports the left-asymmetric layout\n");
6902 else if (level == 10)
6903 pr_vrb("imsm raid 10 only supports the n2 layout\n");
6904 else
6905 pr_vrb("imsm unknown layout %#x for this raid level %d\n",
6906 layout, level);
6907 return 0;
6908 }
6909
6910 if (super->orom && (super->orom->attr & IMSM_OROM_ATTR_2TB) == 0 &&
6911 (calc_array_size(level, raiddisks, layout, *chunk, size) >> 32) > 0) {
6912 pr_vrb("platform does not support a volume size over 2TB\n");
6913 return 0;
6914 }
6915
6916 return 1;
6917 }
6918
6919 /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
6920 * FIX ME add ahci details
6921 */
6922 static int validate_geometry_imsm_volume(struct supertype *st, int level,
6923 int layout, int raiddisks, int *chunk,
6924 unsigned long long size,
6925 unsigned long long data_offset,
6926 char *dev,
6927 unsigned long long *freesize,
6928 int verbose)
6929 {
6930 dev_t rdev;
6931 struct intel_super *super = st->sb;
6932 struct imsm_super *mpb;
6933 struct dl *dl;
6934 unsigned long long pos = 0;
6935 unsigned long long maxsize;
6936 struct extent *e;
6937 int i;
6938
6939 /* We must have the container info already read in. */
6940 if (!super)
6941 return 0;
6942
6943 mpb = super->anchor;
6944
6945 if (!validate_geometry_imsm_orom(super, level, layout, raiddisks, chunk, size, verbose)) {
6946 pr_err("RAID gemetry validation failed. Cannot proceed with the action(s).\n");
6947 return 0;
6948 }
6949 if (!dev) {
6950 /* General test: make sure there is space for
6951 * 'raiddisks' device extents of size 'size' at a given
6952 * offset
6953 */
6954 unsigned long long minsize = size;
6955 unsigned long long start_offset = MaxSector;
6956 int dcnt = 0;
6957 if (minsize == 0)
6958 minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
6959 for (dl = super->disks; dl ; dl = dl->next) {
6960 int found = 0;
6961
6962 pos = 0;
6963 i = 0;
6964 e = get_extents(super, dl);
6965 if (!e) continue;
6966 do {
6967 unsigned long long esize;
6968 esize = e[i].start - pos;
6969 if (esize >= minsize)
6970 found = 1;
6971 if (found && start_offset == MaxSector) {
6972 start_offset = pos;
6973 break;
6974 } else if (found && pos != start_offset) {
6975 found = 0;
6976 break;
6977 }
6978 pos = e[i].start + e[i].size;
6979 i++;
6980 } while (e[i-1].size);
6981 if (found)
6982 dcnt++;
6983 free(e);
6984 }
6985 if (dcnt < raiddisks) {
6986 if (verbose)
6987 pr_err("imsm: Not enough devices with space for this array (%d < %d)\n",
6988 dcnt, raiddisks);
6989 return 0;
6990 }
6991 return 1;
6992 }
6993
6994 /* This device must be a member of the set */
6995 if (!stat_is_blkdev(dev, &rdev))
6996 return 0;
6997 for (dl = super->disks ; dl ; dl = dl->next) {
6998 if (dl->major == (int)major(rdev) &&
6999 dl->minor == (int)minor(rdev))
7000 break;
7001 }
7002 if (!dl) {
7003 if (verbose)
7004 pr_err("%s is not in the same imsm set\n", dev);
7005 return 0;
7006 } else if (super->orom && dl->index < 0 && mpb->num_raid_devs) {
7007 /* If a volume is present then the current creation attempt
7008 * cannot incorporate new spares because the orom may not
7009 * understand this configuration (all member disks must be
7010 * members of each array in the container).
7011 */
7012 pr_err("%s is a spare and a volume is already defined for this container\n", dev);
7013 pr_err("The option-rom requires all member disks to be a member of all volumes\n");
7014 return 0;
7015 } else if (super->orom && mpb->num_raid_devs > 0 &&
7016 mpb->num_disks != raiddisks) {
7017 pr_err("The option-rom requires all member disks to be a member of all volumes\n");
7018 return 0;
7019 }
7020
7021 /* retrieve the largest free space block */
7022 e = get_extents(super, dl);
7023 maxsize = 0;
7024 i = 0;
7025 if (e) {
7026 do {
7027 unsigned long long esize;
7028
7029 esize = e[i].start - pos;
7030 if (esize >= maxsize)
7031 maxsize = esize;
7032 pos = e[i].start + e[i].size;
7033 i++;
7034 } while (e[i-1].size);
7035 dl->e = e;
7036 dl->extent_cnt = i;
7037 } else {
7038 if (verbose)
7039 pr_err("unable to determine free space for: %s\n",
7040 dev);
7041 return 0;
7042 }
7043 if (maxsize < size) {
7044 if (verbose)
7045 pr_err("%s not enough space (%llu < %llu)\n",
7046 dev, maxsize, size);
7047 return 0;
7048 }
7049
7050 /* count total number of extents for merge */
7051 i = 0;
7052 for (dl = super->disks; dl; dl = dl->next)
7053 if (dl->e)
7054 i += dl->extent_cnt;
7055
7056 maxsize = merge_extents(super, i);
7057
7058 if (!check_env("IMSM_NO_PLATFORM") &&
7059 mpb->num_raid_devs > 0 && size && size != maxsize) {
7060 pr_err("attempting to create a second volume with size less then remaining space. Aborting...\n");
7061 return 0;
7062 }
7063
7064 if (maxsize < size || maxsize == 0) {
7065 if (verbose) {
7066 if (maxsize == 0)
7067 pr_err("no free space left on device. Aborting...\n");
7068 else
7069 pr_err("not enough space to create volume of given size (%llu < %llu). Aborting...\n",
7070 maxsize, size);
7071 }
7072 return 0;
7073 }
7074
7075 *freesize = maxsize;
7076
7077 if (super->orom) {
7078 int count = count_volumes(super->hba,
7079 super->orom->dpa, verbose);
7080 if (super->orom->vphba <= count) {
7081 pr_vrb("platform does not support more than %d raid volumes.\n",
7082 super->orom->vphba);
7083 return 0;
7084 }
7085 }
7086 return 1;
7087 }
7088
7089 static int imsm_get_free_size(struct supertype *st, int raiddisks,
7090 unsigned long long size, int chunk,
7091 unsigned long long *freesize)
7092 {
7093 struct intel_super *super = st->sb;
7094 struct imsm_super *mpb = super->anchor;
7095 struct dl *dl;
7096 int i;
7097 int extent_cnt;
7098 struct extent *e;
7099 unsigned long long maxsize;
7100 unsigned long long minsize;
7101 int cnt;
7102 int used;
7103
7104 /* find the largest common start free region of the possible disks */
7105 used = 0;
7106 extent_cnt = 0;
7107 cnt = 0;
7108 for (dl = super->disks; dl; dl = dl->next) {
7109 dl->raiddisk = -1;
7110
7111 if (dl->index >= 0)
7112 used++;
7113
7114 /* don't activate new spares if we are orom constrained
7115 * and there is already a volume active in the container
7116 */
7117 if (super->orom && dl->index < 0 && mpb->num_raid_devs)
7118 continue;
7119
7120 e = get_extents(super, dl);
7121 if (!e)
7122 continue;
7123 for (i = 1; e[i-1].size; i++)
7124 ;
7125 dl->e = e;
7126 dl->extent_cnt = i;
7127 extent_cnt += i;
7128 cnt++;
7129 }
7130
7131 maxsize = merge_extents(super, extent_cnt);
7132 minsize = size;
7133 if (size == 0)
7134 /* chunk is in K */
7135 minsize = chunk * 2;
7136
7137 if (cnt < raiddisks ||
7138 (super->orom && used && used != raiddisks) ||
7139 maxsize < minsize ||
7140 maxsize == 0) {
7141 pr_err("not enough devices with space to create array.\n");
7142 return 0; /* No enough free spaces large enough */
7143 }
7144
7145 if (size == 0) {
7146 size = maxsize;
7147 if (chunk) {
7148 size /= 2 * chunk;
7149 size *= 2 * chunk;
7150 }
7151 maxsize = size;
7152 }
7153 if (!check_env("IMSM_NO_PLATFORM") &&
7154 mpb->num_raid_devs > 0 && size && size != maxsize) {
7155 pr_err("attempting to create a second volume with size less then remaining space. Aborting...\n");
7156 return 0;
7157 }
7158 cnt = 0;
7159 for (dl = super->disks; dl; dl = dl->next)
7160 if (dl->e)
7161 dl->raiddisk = cnt++;
7162
7163 *freesize = size;
7164
7165 dprintf("imsm: imsm_get_free_size() returns : %llu\n", size);
7166
7167 return 1;
7168 }
7169
7170 static int reserve_space(struct supertype *st, int raiddisks,
7171 unsigned long long size, int chunk,
7172 unsigned long long *freesize)
7173 {
7174 struct intel_super *super = st->sb;
7175 struct dl *dl;
7176 int cnt;
7177 int rv = 0;
7178
7179 rv = imsm_get_free_size(st, raiddisks, size, chunk, freesize);
7180 if (rv) {
7181 cnt = 0;
7182 for (dl = super->disks; dl; dl = dl->next)
7183 if (dl->e)
7184 dl->raiddisk = cnt++;
7185 rv = 1;
7186 }
7187
7188 return rv;
7189 }
7190
7191 static int validate_geometry_imsm(struct supertype *st, int level, int layout,
7192 int raiddisks, int *chunk, unsigned long long size,
7193 unsigned long long data_offset,
7194 char *dev, unsigned long long *freesize,
7195 int consistency_policy, int verbose)
7196 {
7197 int fd, cfd;
7198 struct mdinfo *sra;
7199 int is_member = 0;
7200
7201 /* load capability
7202 * if given unused devices create a container
7203 * if given given devices in a container create a member volume
7204 */
7205 if (level == LEVEL_CONTAINER) {
7206 /* Must be a fresh device to add to a container */
7207 return validate_geometry_imsm_container(st, level, layout,
7208 raiddisks,
7209 *chunk,
7210 size, data_offset,
7211 dev, freesize,
7212 verbose);
7213 }
7214
7215 if (!dev) {
7216 if (st->sb) {
7217 struct intel_super *super = st->sb;
7218 if (!validate_geometry_imsm_orom(st->sb, level, layout,
7219 raiddisks, chunk, size,
7220 verbose))
7221 return 0;
7222 /* we are being asked to automatically layout a
7223 * new volume based on the current contents of
7224 * the container. If the the parameters can be
7225 * satisfied reserve_space will record the disks,
7226 * start offset, and size of the volume to be
7227 * created. add_to_super and getinfo_super
7228 * detect when autolayout is in progress.
7229 */
7230 /* assuming that freesize is always given when array is
7231 created */
7232 if (super->orom && freesize) {
7233 int count;
7234 count = count_volumes(super->hba,
7235 super->orom->dpa, verbose);
7236 if (super->orom->vphba <= count) {
7237 pr_vrb("platform does not support more than %d raid volumes.\n",
7238 super->orom->vphba);
7239 return 0;
7240 }
7241 }
7242 if (freesize)
7243 return reserve_space(st, raiddisks, size,
7244 *chunk, freesize);
7245 }
7246 return 1;
7247 }
7248 if (st->sb) {
7249 /* creating in a given container */
7250 return validate_geometry_imsm_volume(st, level, layout,
7251 raiddisks, chunk, size,
7252 data_offset,
7253 dev, freesize, verbose);
7254 }
7255
7256 /* This device needs to be a device in an 'imsm' container */
7257 fd = open(dev, O_RDONLY|O_EXCL, 0);
7258 if (fd >= 0) {
7259 if (verbose)
7260 pr_err("Cannot create this array on device %s\n",
7261 dev);
7262 close(fd);
7263 return 0;
7264 }
7265 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
7266 if (verbose)
7267 pr_err("Cannot open %s: %s\n",
7268 dev, strerror(errno));
7269 return 0;
7270 }
7271 /* Well, it is in use by someone, maybe an 'imsm' container. */
7272 cfd = open_container(fd);
7273 close(fd);
7274 if (cfd < 0) {
7275 if (verbose)
7276 pr_err("Cannot use %s: It is busy\n",
7277 dev);
7278 return 0;
7279 }
7280 sra = sysfs_read(cfd, NULL, GET_VERSION);
7281 if (sra && sra->array.major_version == -1 &&
7282 strcmp(sra->text_version, "imsm") == 0)
7283 is_member = 1;
7284 sysfs_free(sra);
7285 if (is_member) {
7286 /* This is a member of a imsm container. Load the container
7287 * and try to create a volume
7288 */
7289 struct intel_super *super;
7290
7291 if (load_super_imsm_all(st, cfd, (void **) &super, NULL, NULL, 1) == 0) {
7292 st->sb = super;
7293 strcpy(st->container_devnm, fd2devnm(cfd));
7294 close(cfd);
7295 return validate_geometry_imsm_volume(st, level, layout,
7296 raiddisks, chunk,
7297 size, data_offset, dev,
7298 freesize, 1)
7299 ? 1 : -1;
7300 }
7301 }
7302
7303 if (verbose)
7304 pr_err("failed container membership check\n");
7305
7306 close(cfd);
7307 return 0;
7308 }
7309
7310 static void default_geometry_imsm(struct supertype *st, int *level, int *layout, int *chunk)
7311 {
7312 struct intel_super *super = st->sb;
7313
7314 if (level && *level == UnSet)
7315 *level = LEVEL_CONTAINER;
7316
7317 if (level && layout && *layout == UnSet)
7318 *layout = imsm_level_to_layout(*level);
7319
7320 if (chunk && (*chunk == UnSet || *chunk == 0))
7321 *chunk = imsm_default_chunk(super->orom);
7322 }
7323
7324 static void handle_missing(struct intel_super *super, struct imsm_dev *dev);
7325
7326 static int kill_subarray_imsm(struct supertype *st)
7327 {
7328 /* remove the subarray currently referenced by ->current_vol */
7329 __u8 i;
7330 struct intel_dev **dp;
7331 struct intel_super *super = st->sb;
7332 __u8 current_vol = super->current_vol;
7333 struct imsm_super *mpb = super->anchor;
7334
7335 if (super->current_vol < 0)
7336 return 2;
7337 super->current_vol = -1; /* invalidate subarray cursor */
7338
7339 /* block deletions that would change the uuid of active subarrays
7340 *
7341 * FIXME when immutable ids are available, but note that we'll
7342 * also need to fixup the invalidated/active subarray indexes in
7343 * mdstat
7344 */
7345 for (i = 0; i < mpb->num_raid_devs; i++) {
7346 char subarray[4];
7347
7348 if (i < current_vol)
7349 continue;
7350 sprintf(subarray, "%u", i);
7351 if (is_subarray_active(subarray, st->devnm)) {
7352 pr_err("deleting subarray-%d would change the UUID of active subarray-%d, aborting\n",
7353 current_vol, i);
7354
7355 return 2;
7356 }
7357 }
7358
7359 if (st->update_tail) {
7360 struct imsm_update_kill_array *u = xmalloc(sizeof(*u));
7361
7362 u->type = update_kill_array;
7363 u->dev_idx = current_vol;
7364 append_metadata_update(st, u, sizeof(*u));
7365
7366 return 0;
7367 }
7368
7369 for (dp = &super->devlist; *dp;)
7370 if ((*dp)->index == current_vol) {
7371 *dp = (*dp)->next;
7372 } else {
7373 handle_missing(super, (*dp)->dev);
7374 if ((*dp)->index > current_vol)
7375 (*dp)->index--;
7376 dp = &(*dp)->next;
7377 }
7378
7379 /* no more raid devices, all active components are now spares,
7380 * but of course failed are still failed
7381 */
7382 if (--mpb->num_raid_devs == 0) {
7383 struct dl *d;
7384
7385 for (d = super->disks; d; d = d->next)
7386 if (d->index > -2)
7387 mark_spare(d);
7388 }
7389
7390 super->updates_pending++;
7391
7392 return 0;
7393 }
7394
7395 static int update_subarray_imsm(struct supertype *st, char *subarray,
7396 char *update, struct mddev_ident *ident)
7397 {
7398 /* update the subarray currently referenced by ->current_vol */
7399 struct intel_super *super = st->sb;
7400 struct imsm_super *mpb = super->anchor;
7401
7402 if (strcmp(update, "name") == 0) {
7403 char *name = ident->name;
7404 char *ep;
7405 int vol;
7406
7407 if (is_subarray_active(subarray, st->devnm)) {
7408 pr_err("Unable to update name of active subarray\n");
7409 return 2;
7410 }
7411
7412 if (!check_name(super, name, 0))
7413 return 2;
7414
7415 vol = strtoul(subarray, &ep, 10);
7416 if (*ep != '\0' || vol >= super->anchor->num_raid_devs)
7417 return 2;
7418
7419 if (st->update_tail) {
7420 struct imsm_update_rename_array *u = xmalloc(sizeof(*u));
7421
7422 u->type = update_rename_array;
7423 u->dev_idx = vol;
7424 strncpy((char *) u->name, name, MAX_RAID_SERIAL_LEN);
7425 u->name[MAX_RAID_SERIAL_LEN-1] = '\0';
7426 append_metadata_update(st, u, sizeof(*u));
7427 } else {
7428 struct imsm_dev *dev;
7429 int i;
7430
7431 dev = get_imsm_dev(super, vol);
7432 strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
7433 dev->volume[MAX_RAID_SERIAL_LEN-1] = '\0';
7434 for (i = 0; i < mpb->num_raid_devs; i++) {
7435 dev = get_imsm_dev(super, i);
7436 handle_missing(super, dev);
7437 }
7438 super->updates_pending++;
7439 }
7440 } else if (strcmp(update, "ppl") == 0 ||
7441 strcmp(update, "no-ppl") == 0) {
7442 int new_policy;
7443 char *ep;
7444 int vol = strtoul(subarray, &ep, 10);
7445
7446 if (*ep != '\0' || vol >= super->anchor->num_raid_devs)
7447 return 2;
7448
7449 if (strcmp(update, "ppl") == 0)
7450 new_policy = RWH_MULTIPLE_DISTRIBUTED;
7451 else
7452 new_policy = RWH_MULTIPLE_OFF;
7453
7454 if (st->update_tail) {
7455 struct imsm_update_rwh_policy *u = xmalloc(sizeof(*u));
7456
7457 u->type = update_rwh_policy;
7458 u->dev_idx = vol;
7459 u->new_policy = new_policy;
7460 append_metadata_update(st, u, sizeof(*u));
7461 } else {
7462 struct imsm_dev *dev;
7463
7464 dev = get_imsm_dev(super, vol);
7465 dev->rwh_policy = new_policy;
7466 super->updates_pending++;
7467 }
7468 } else
7469 return 2;
7470
7471 return 0;
7472 }
7473
7474 static int is_gen_migration(struct imsm_dev *dev)
7475 {
7476 if (dev == NULL)
7477 return 0;
7478
7479 if (!dev->vol.migr_state)
7480 return 0;
7481
7482 if (migr_type(dev) == MIGR_GEN_MIGR)
7483 return 1;
7484
7485 return 0;
7486 }
7487
7488 static int is_rebuilding(struct imsm_dev *dev)
7489 {
7490 struct imsm_map *migr_map;
7491
7492 if (!dev->vol.migr_state)
7493 return 0;
7494
7495 if (migr_type(dev) != MIGR_REBUILD)
7496 return 0;
7497
7498 migr_map = get_imsm_map(dev, MAP_1);
7499
7500 if (migr_map->map_state == IMSM_T_STATE_DEGRADED)
7501 return 1;
7502 else
7503 return 0;
7504 }
7505
7506 static int is_initializing(struct imsm_dev *dev)
7507 {
7508 struct imsm_map *migr_map;
7509
7510 if (!dev->vol.migr_state)
7511 return 0;
7512
7513 if (migr_type(dev) != MIGR_INIT)
7514 return 0;
7515
7516 migr_map = get_imsm_map(dev, MAP_1);
7517
7518 if (migr_map->map_state == IMSM_T_STATE_UNINITIALIZED)
7519 return 1;
7520
7521 return 0;
7522 }
7523
7524 static void update_recovery_start(struct intel_super *super,
7525 struct imsm_dev *dev,
7526 struct mdinfo *array)
7527 {
7528 struct mdinfo *rebuild = NULL;
7529 struct mdinfo *d;
7530 __u32 units;
7531
7532 if (!is_rebuilding(dev))
7533 return;
7534
7535 /* Find the rebuild target, but punt on the dual rebuild case */
7536 for (d = array->devs; d; d = d->next)
7537 if (d->recovery_start == 0) {
7538 if (rebuild)
7539 return;
7540 rebuild = d;
7541 }
7542
7543 if (!rebuild) {
7544 /* (?) none of the disks are marked with
7545 * IMSM_ORD_REBUILD, so assume they are missing and the
7546 * disk_ord_tbl was not correctly updated
7547 */
7548 dprintf("failed to locate out-of-sync disk\n");
7549 return;
7550 }
7551
7552 units = __le32_to_cpu(dev->vol.curr_migr_unit);
7553 rebuild->recovery_start = units * blocks_per_migr_unit(super, dev);
7554 }
7555
7556 static int recover_backup_imsm(struct supertype *st, struct mdinfo *info);
7557
7558 static struct mdinfo *container_content_imsm(struct supertype *st, char *subarray)
7559 {
7560 /* Given a container loaded by load_super_imsm_all,
7561 * extract information about all the arrays into
7562 * an mdinfo tree.
7563 * If 'subarray' is given, just extract info about that array.
7564 *
7565 * For each imsm_dev create an mdinfo, fill it in,
7566 * then look for matching devices in super->disks
7567 * and create appropriate device mdinfo.
7568 */
7569 struct intel_super *super = st->sb;
7570 struct imsm_super *mpb = super->anchor;
7571 struct mdinfo *rest = NULL;
7572 unsigned int i;
7573 int sb_errors = 0;
7574 struct dl *d;
7575 int spare_disks = 0;
7576
7577 /* do not assemble arrays when not all attributes are supported */
7578 if (imsm_check_attributes(mpb->attributes) == 0) {
7579 sb_errors = 1;
7580 pr_err("Unsupported attributes in IMSM metadata.Arrays activation is blocked.\n");
7581 }
7582
7583 /* count spare devices, not used in maps
7584 */
7585 for (d = super->disks; d; d = d->next)
7586 if (d->index == -1)
7587 spare_disks++;
7588
7589 for (i = 0; i < mpb->num_raid_devs; i++) {
7590 struct imsm_dev *dev;
7591 struct imsm_map *map;
7592 struct imsm_map *map2;
7593 struct mdinfo *this;
7594 int slot;
7595 int chunk;
7596 char *ep;
7597
7598 if (subarray &&
7599 (i != strtoul(subarray, &ep, 10) || *ep != '\0'))
7600 continue;
7601
7602 dev = get_imsm_dev(super, i);
7603 map = get_imsm_map(dev, MAP_0);
7604 map2 = get_imsm_map(dev, MAP_1);
7605
7606 /* do not publish arrays that are in the middle of an
7607 * unsupported migration
7608 */
7609 if (dev->vol.migr_state &&
7610 (migr_type(dev) == MIGR_STATE_CHANGE)) {
7611 pr_err("cannot assemble volume '%.16s': unsupported migration in progress\n",
7612 dev->volume);
7613 continue;
7614 }
7615 /* do not publish arrays that are not support by controller's
7616 * OROM/EFI
7617 */
7618
7619 this = xmalloc(sizeof(*this));
7620
7621 super->current_vol = i;
7622 getinfo_super_imsm_volume(st, this, NULL);
7623 this->next = rest;
7624 chunk = __le16_to_cpu(map->blocks_per_strip) >> 1;
7625 /* mdadm does not support all metadata features- set the bit in all arrays state */
7626 if (!validate_geometry_imsm_orom(super,
7627 get_imsm_raid_level(map), /* RAID level */
7628 imsm_level_to_layout(get_imsm_raid_level(map)),
7629 map->num_members, /* raid disks */
7630 &chunk, join_u32(dev->size_low, dev->size_high),
7631 1 /* verbose */)) {
7632 pr_err("IMSM RAID geometry validation failed. Array %s activation is blocked.\n",
7633 dev->volume);
7634 this->array.state |=
7635 (1<<MD_SB_BLOCK_CONTAINER_RESHAPE) |
7636 (1<<MD_SB_BLOCK_VOLUME);
7637 }
7638
7639 /* if array has bad blocks, set suitable bit in all arrays state */
7640 if (sb_errors)
7641 this->array.state |=
7642 (1<<MD_SB_BLOCK_CONTAINER_RESHAPE) |
7643 (1<<MD_SB_BLOCK_VOLUME);
7644
7645 for (slot = 0 ; slot < map->num_members; slot++) {
7646 unsigned long long recovery_start;
7647 struct mdinfo *info_d;
7648 struct dl *d;
7649 int idx;
7650 int skip;
7651 __u32 ord;
7652
7653 skip = 0;
7654 idx = get_imsm_disk_idx(dev, slot, MAP_0);
7655 ord = get_imsm_ord_tbl_ent(dev, slot, MAP_X);
7656 for (d = super->disks; d ; d = d->next)
7657 if (d->index == idx)
7658 break;
7659
7660 recovery_start = MaxSector;
7661 if (d == NULL)
7662 skip = 1;
7663 if (d && is_failed(&d->disk))
7664 skip = 1;
7665 if (ord & IMSM_ORD_REBUILD)
7666 recovery_start = 0;
7667
7668 /*
7669 * if we skip some disks the array will be assmebled degraded;
7670 * reset resync start to avoid a dirty-degraded
7671 * situation when performing the intial sync
7672 *
7673 * FIXME handle dirty degraded
7674 */
7675 if ((skip || recovery_start == 0) &&
7676 !(dev->vol.dirty & RAIDVOL_DIRTY))
7677 this->resync_start = MaxSector;
7678 if (skip)
7679 continue;
7680
7681 info_d = xcalloc(1, sizeof(*info_d));
7682 info_d->next = this->devs;
7683 this->devs = info_d;
7684
7685 info_d->disk.number = d->index;
7686 info_d->disk.major = d->major;
7687 info_d->disk.minor = d->minor;
7688 info_d->disk.raid_disk = slot;
7689 info_d->recovery_start = recovery_start;
7690 if (map2) {
7691 if (slot < map2->num_members)
7692 info_d->disk.state = (1 << MD_DISK_ACTIVE);
7693 else
7694 this->array.spare_disks++;
7695 } else {
7696 if (slot < map->num_members)
7697 info_d->disk.state = (1 << MD_DISK_ACTIVE);
7698 else
7699 this->array.spare_disks++;
7700 }
7701 if (info_d->recovery_start == MaxSector)
7702 this->array.working_disks++;
7703
7704 info_d->events = __le32_to_cpu(mpb->generation_num);
7705 info_d->data_offset = pba_of_lba0(map);
7706
7707 if (map->raid_level == 5) {
7708 info_d->component_size =
7709 num_data_stripes(map) *
7710 map->blocks_per_strip;
7711 info_d->ppl_sector = this->ppl_sector;
7712 info_d->ppl_size = this->ppl_size;
7713 } else {
7714 info_d->component_size = blocks_per_member(map);
7715 }
7716
7717 info_d->bb.supported = 1;
7718 get_volume_badblocks(super->bbm_log, ord_to_idx(ord),
7719 info_d->data_offset,
7720 info_d->component_size,
7721 &info_d->bb);
7722 }
7723 /* now that the disk list is up-to-date fixup recovery_start */
7724 update_recovery_start(super, dev, this);
7725 this->array.spare_disks += spare_disks;
7726
7727 /* check for reshape */
7728 if (this->reshape_active == 1)
7729 recover_backup_imsm(st, this);
7730 rest = this;
7731 }
7732
7733 return rest;
7734 }
7735
7736 static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev,
7737 int failed, int look_in_map)
7738 {
7739 struct imsm_map *map;
7740
7741 map = get_imsm_map(dev, look_in_map);
7742
7743 if (!failed)
7744 return map->map_state == IMSM_T_STATE_UNINITIALIZED ?
7745 IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL;
7746
7747 switch (get_imsm_raid_level(map)) {
7748 case 0:
7749 return IMSM_T_STATE_FAILED;
7750 break;
7751 case 1:
7752 if (failed < map->num_members)
7753 return IMSM_T_STATE_DEGRADED;
7754 else
7755 return IMSM_T_STATE_FAILED;
7756 break;
7757 case 10:
7758 {
7759 /**
7760 * check to see if any mirrors have failed, otherwise we
7761 * are degraded. Even numbered slots are mirrored on
7762 * slot+1
7763 */
7764 int i;
7765 /* gcc -Os complains that this is unused */
7766 int insync = insync;
7767
7768 for (i = 0; i < map->num_members; i++) {
7769 __u32 ord = get_imsm_ord_tbl_ent(dev, i, MAP_X);
7770 int idx = ord_to_idx(ord);
7771 struct imsm_disk *disk;
7772
7773 /* reset the potential in-sync count on even-numbered
7774 * slots. num_copies is always 2 for imsm raid10
7775 */
7776 if ((i & 1) == 0)
7777 insync = 2;
7778
7779 disk = get_imsm_disk(super, idx);
7780 if (!disk || is_failed(disk) || ord & IMSM_ORD_REBUILD)
7781 insync--;
7782
7783 /* no in-sync disks left in this mirror the
7784 * array has failed
7785 */
7786 if (insync == 0)
7787 return IMSM_T_STATE_FAILED;
7788 }
7789
7790 return IMSM_T_STATE_DEGRADED;
7791 }
7792 case 5:
7793 if (failed < 2)
7794 return IMSM_T_STATE_DEGRADED;
7795 else
7796 return IMSM_T_STATE_FAILED;
7797 break;
7798 default:
7799 break;
7800 }
7801
7802 return map->map_state;
7803 }
7804
7805 static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev,
7806 int look_in_map)
7807 {
7808 int i;
7809 int failed = 0;
7810 struct imsm_disk *disk;
7811 struct imsm_map *map = get_imsm_map(dev, MAP_0);
7812 struct imsm_map *prev = get_imsm_map(dev, MAP_1);
7813 struct imsm_map *map_for_loop;
7814 __u32 ord;
7815 int idx;
7816 int idx_1;
7817
7818 /* at the beginning of migration we set IMSM_ORD_REBUILD on
7819 * disks that are being rebuilt. New failures are recorded to
7820 * map[0]. So we look through all the disks we started with and
7821 * see if any failures are still present, or if any new ones
7822 * have arrived
7823 */
7824 map_for_loop = map;
7825 if (prev && (map->num_members < prev->num_members))
7826 map_for_loop = prev;
7827
7828 for (i = 0; i < map_for_loop->num_members; i++) {
7829 idx_1 = -255;
7830 /* when MAP_X is passed both maps failures are counted
7831 */
7832 if (prev &&
7833 (look_in_map == MAP_1 || look_in_map == MAP_X) &&
7834 i < prev->num_members) {
7835 ord = __le32_to_cpu(prev->disk_ord_tbl[i]);
7836 idx_1 = ord_to_idx(ord);
7837
7838 disk = get_imsm_disk(super, idx_1);
7839 if (!disk || is_failed(disk) || ord & IMSM_ORD_REBUILD)
7840 failed++;
7841 }
7842 if ((look_in_map == MAP_0 || look_in_map == MAP_X) &&
7843 i < map->num_members) {
7844 ord = __le32_to_cpu(map->disk_ord_tbl[i]);
7845 idx = ord_to_idx(ord);
7846
7847 if (idx != idx_1) {
7848 disk = get_imsm_disk(super, idx);
7849 if (!disk || is_failed(disk) ||
7850 ord & IMSM_ORD_REBUILD)
7851 failed++;
7852 }
7853 }
7854 }
7855
7856 return failed;
7857 }
7858
7859 static int imsm_open_new(struct supertype *c, struct active_array *a,
7860 char *inst)
7861 {
7862 struct intel_super *super = c->sb;
7863 struct imsm_super *mpb = super->anchor;
7864 struct imsm_update_prealloc_bb_mem u;
7865
7866 if (atoi(inst) >= mpb->num_raid_devs) {
7867 pr_err("subarry index %d, out of range\n", atoi(inst));
7868 return -ENODEV;
7869 }
7870
7871 dprintf("imsm: open_new %s\n", inst);
7872 a->info.container_member = atoi(inst);
7873
7874 u.type = update_prealloc_badblocks_mem;
7875 imsm_update_metadata_locally(c, &u, sizeof(u));
7876
7877 return 0;
7878 }
7879
7880 static int is_resyncing(struct imsm_dev *dev)
7881 {
7882 struct imsm_map *migr_map;
7883
7884 if (!dev->vol.migr_state)
7885 return 0;
7886
7887 if (migr_type(dev) == MIGR_INIT ||
7888 migr_type(dev) == MIGR_REPAIR)
7889 return 1;
7890
7891 if (migr_type(dev) == MIGR_GEN_MIGR)
7892 return 0;
7893
7894 migr_map = get_imsm_map(dev, MAP_1);
7895
7896 if (migr_map->map_state == IMSM_T_STATE_NORMAL &&
7897 dev->vol.migr_type != MIGR_GEN_MIGR)
7898 return 1;
7899 else
7900 return 0;
7901 }
7902
7903 /* return true if we recorded new information */
7904 static int mark_failure(struct intel_super *super,
7905 struct imsm_dev *dev, struct imsm_disk *disk, int idx)
7906 {
7907 __u32 ord;
7908 int slot;
7909 struct imsm_map *map;
7910 char buf[MAX_RAID_SERIAL_LEN+3];
7911 unsigned int len, shift = 0;
7912
7913 /* new failures are always set in map[0] */
7914 map = get_imsm_map(dev, MAP_0);
7915
7916 slot = get_imsm_disk_slot(map, idx);
7917 if (slot < 0)
7918 return 0;
7919
7920 ord = __le32_to_cpu(map->disk_ord_tbl[slot]);
7921 if (is_failed(disk) && (ord & IMSM_ORD_REBUILD))
7922 return 0;
7923
7924 memcpy(buf, disk->serial, MAX_RAID_SERIAL_LEN);
7925 buf[MAX_RAID_SERIAL_LEN] = '\000';
7926 strcat(buf, ":0");
7927 if ((len = strlen(buf)) >= MAX_RAID_SERIAL_LEN)
7928 shift = len - MAX_RAID_SERIAL_LEN + 1;
7929 strncpy((char *)disk->serial, &buf[shift], MAX_RAID_SERIAL_LEN);
7930
7931 disk->status |= FAILED_DISK;
7932 set_imsm_ord_tbl_ent(map, slot, idx | IMSM_ORD_REBUILD);
7933 /* mark failures in second map if second map exists and this disk
7934 * in this slot.
7935 * This is valid for migration, initialization and rebuild
7936 */
7937 if (dev->vol.migr_state) {
7938 struct imsm_map *map2 = get_imsm_map(dev, MAP_1);
7939 int slot2 = get_imsm_disk_slot(map2, idx);
7940
7941 if (slot2 < map2->num_members && slot2 >= 0)
7942 set_imsm_ord_tbl_ent(map2, slot2,
7943 idx | IMSM_ORD_REBUILD);
7944 }
7945 if (map->failed_disk_num == 0xff)
7946 map->failed_disk_num = slot;
7947
7948 clear_disk_badblocks(super->bbm_log, ord_to_idx(ord));
7949
7950 return 1;
7951 }
7952
7953 static void mark_missing(struct intel_super *super,
7954 struct imsm_dev *dev, struct imsm_disk *disk, int idx)
7955 {
7956 mark_failure(super, dev, disk, idx);
7957
7958 if (disk->scsi_id == __cpu_to_le32(~(__u32)0))
7959 return;
7960
7961 disk->scsi_id = __cpu_to_le32(~(__u32)0);
7962 memmove(&disk->serial[0], &disk->serial[1], MAX_RAID_SERIAL_LEN - 1);
7963 }
7964
7965 static void handle_missing(struct intel_super *super, struct imsm_dev *dev)
7966 {
7967 struct dl *dl;
7968
7969 if (!super->missing)
7970 return;
7971
7972 /* When orom adds replacement for missing disk it does
7973 * not remove entry of missing disk, but just updates map with
7974 * new added disk. So it is not enough just to test if there is
7975 * any missing disk, we have to look if there are any failed disks
7976 * in map to stop migration */
7977
7978 dprintf("imsm: mark missing\n");
7979 /* end process for initialization and rebuild only
7980 */
7981 if (is_gen_migration(dev) == 0) {
7982 int failed = imsm_count_failed(super, dev, MAP_0);
7983
7984 if (failed) {
7985 __u8 map_state;
7986 struct imsm_map *map = get_imsm_map(dev, MAP_0);
7987 struct imsm_map *map1;
7988 int i, ord, ord_map1;
7989 int rebuilt = 1;
7990
7991 for (i = 0; i < map->num_members; i++) {
7992 ord = get_imsm_ord_tbl_ent(dev, i, MAP_0);
7993 if (!(ord & IMSM_ORD_REBUILD))
7994 continue;
7995
7996 map1 = get_imsm_map(dev, MAP_1);
7997 if (!map1)
7998 continue;
7999
8000 ord_map1 = __le32_to_cpu(map1->disk_ord_tbl[i]);
8001 if (ord_map1 & IMSM_ORD_REBUILD)
8002 rebuilt = 0;
8003 }
8004
8005 if (rebuilt) {
8006 map_state = imsm_check_degraded(super, dev,
8007 failed, MAP_0);
8008 end_migration(dev, super, map_state);
8009 }
8010 }
8011 }
8012 for (dl = super->missing; dl; dl = dl->next)
8013 mark_missing(super, dev, &dl->disk, dl->index);
8014 super->updates_pending++;
8015 }
8016
8017 static unsigned long long imsm_set_array_size(struct imsm_dev *dev,
8018 long long new_size)
8019 {
8020 int used_disks = imsm_num_data_members(dev, MAP_0);
8021 unsigned long long array_blocks;
8022 struct imsm_map *map;
8023
8024 if (used_disks == 0) {
8025 /* when problems occures
8026 * return current array_blocks value
8027 */
8028 array_blocks = __le32_to_cpu(dev->size_high);
8029 array_blocks = array_blocks << 32;
8030 array_blocks += __le32_to_cpu(dev->size_low);
8031
8032 return array_blocks;
8033 }
8034
8035 /* set array size in metadata
8036 */
8037 if (new_size <= 0) {
8038 /* OLCE size change is caused by added disks
8039 */
8040 map = get_imsm_map(dev, MAP_0);
8041 array_blocks = blocks_per_member(map) * used_disks;
8042 } else {
8043 /* Online Volume Size Change
8044 * Using available free space
8045 */
8046 array_blocks = new_size;
8047 }
8048
8049 array_blocks = round_size_to_mb(array_blocks, used_disks);
8050 dev->size_low = __cpu_to_le32((__u32)array_blocks);
8051 dev->size_high = __cpu_to_le32((__u32)(array_blocks >> 32));
8052
8053 return array_blocks;
8054 }
8055
8056 static void imsm_set_disk(struct active_array *a, int n, int state);
8057
8058 static void imsm_progress_container_reshape(struct intel_super *super)
8059 {
8060 /* if no device has a migr_state, but some device has a
8061 * different number of members than the previous device, start
8062 * changing the number of devices in this device to match
8063 * previous.
8064 */
8065 struct imsm_super *mpb = super->anchor;
8066 int prev_disks = -1;
8067 int i;
8068 int copy_map_size;
8069
8070 for (i = 0; i < mpb->num_raid_devs; i++) {
8071 struct imsm_dev *dev = get_imsm_dev(super, i);
8072 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8073 struct imsm_map *map2;
8074 int prev_num_members;
8075
8076 if (dev->vol.migr_state)
8077 return;
8078
8079 if (prev_disks == -1)
8080 prev_disks = map->num_members;
8081 if (prev_disks == map->num_members)
8082 continue;
8083
8084 /* OK, this array needs to enter reshape mode.
8085 * i.e it needs a migr_state
8086 */
8087
8088 copy_map_size = sizeof_imsm_map(map);
8089 prev_num_members = map->num_members;
8090 map->num_members = prev_disks;
8091 dev->vol.migr_state = 1;
8092 dev->vol.curr_migr_unit = 0;
8093 set_migr_type(dev, MIGR_GEN_MIGR);
8094 for (i = prev_num_members;
8095 i < map->num_members; i++)
8096 set_imsm_ord_tbl_ent(map, i, i);
8097 map2 = get_imsm_map(dev, MAP_1);
8098 /* Copy the current map */
8099 memcpy(map2, map, copy_map_size);
8100 map2->num_members = prev_num_members;
8101
8102 imsm_set_array_size(dev, -1);
8103 super->clean_migration_record_by_mdmon = 1;
8104 super->updates_pending++;
8105 }
8106 }
8107
8108 /* Handle dirty -> clean transititions, resync and reshape. Degraded and rebuild
8109 * states are handled in imsm_set_disk() with one exception, when a
8110 * resync is stopped due to a new failure this routine will set the
8111 * 'degraded' state for the array.
8112 */
8113 static int imsm_set_array_state(struct active_array *a, int consistent)
8114 {
8115 int inst = a->info.container_member;
8116 struct intel_super *super = a->container->sb;
8117 struct imsm_dev *dev = get_imsm_dev(super, inst);
8118 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8119 int failed = imsm_count_failed(super, dev, MAP_0);
8120 __u8 map_state = imsm_check_degraded(super, dev, failed, MAP_0);
8121 __u32 blocks_per_unit;
8122
8123 if (dev->vol.migr_state &&
8124 dev->vol.migr_type == MIGR_GEN_MIGR) {
8125 /* array state change is blocked due to reshape action
8126 * We might need to
8127 * - abort the reshape (if last_checkpoint is 0 and action!= reshape)
8128 * - finish the reshape (if last_checkpoint is big and action != reshape)
8129 * - update curr_migr_unit
8130 */
8131 if (a->curr_action == reshape) {
8132 /* still reshaping, maybe update curr_migr_unit */
8133 goto mark_checkpoint;
8134 } else {
8135 if (a->last_checkpoint == 0 && a->prev_action == reshape) {
8136 /* for some reason we aborted the reshape.
8137 *
8138 * disable automatic metadata rollback
8139 * user action is required to recover process
8140 */
8141 if (0) {
8142 struct imsm_map *map2 =
8143 get_imsm_map(dev, MAP_1);
8144 dev->vol.migr_state = 0;
8145 set_migr_type(dev, 0);
8146 dev->vol.curr_migr_unit = 0;
8147 memcpy(map, map2,
8148 sizeof_imsm_map(map2));
8149 super->updates_pending++;
8150 }
8151 }
8152 if (a->last_checkpoint >= a->info.component_size) {
8153 unsigned long long array_blocks;
8154 int used_disks;
8155 struct mdinfo *mdi;
8156
8157 used_disks = imsm_num_data_members(dev, MAP_0);
8158 if (used_disks > 0) {
8159 array_blocks =
8160 blocks_per_member(map) *
8161 used_disks;
8162 array_blocks =
8163 round_size_to_mb(array_blocks,
8164 used_disks);
8165 a->info.custom_array_size = array_blocks;
8166 /* encourage manager to update array
8167 * size
8168 */
8169
8170 a->check_reshape = 1;
8171 }
8172 /* finalize online capacity expansion/reshape */
8173 for (mdi = a->info.devs; mdi; mdi = mdi->next)
8174 imsm_set_disk(a,
8175 mdi->disk.raid_disk,
8176 mdi->curr_state);
8177
8178 imsm_progress_container_reshape(super);
8179 }
8180 }
8181 }
8182
8183 /* before we activate this array handle any missing disks */
8184 if (consistent == 2)
8185 handle_missing(super, dev);
8186
8187 if (consistent == 2 &&
8188 (!is_resync_complete(&a->info) ||
8189 map_state != IMSM_T_STATE_NORMAL ||
8190 dev->vol.migr_state))
8191 consistent = 0;
8192
8193 if (is_resync_complete(&a->info)) {
8194 /* complete intialization / resync,
8195 * recovery and interrupted recovery is completed in
8196 * ->set_disk
8197 */
8198 if (is_resyncing(dev)) {
8199 dprintf("imsm: mark resync done\n");
8200 end_migration(dev, super, map_state);
8201 super->updates_pending++;
8202 a->last_checkpoint = 0;
8203 }
8204 } else if ((!is_resyncing(dev) && !failed) &&
8205 (imsm_reshape_blocks_arrays_changes(super) == 0)) {
8206 /* mark the start of the init process if nothing is failed */
8207 dprintf("imsm: mark resync start\n");
8208 if (map->map_state == IMSM_T_STATE_UNINITIALIZED)
8209 migrate(dev, super, IMSM_T_STATE_NORMAL, MIGR_INIT);
8210 else
8211 migrate(dev, super, IMSM_T_STATE_NORMAL, MIGR_REPAIR);
8212 super->updates_pending++;
8213 }
8214
8215 mark_checkpoint:
8216 /* skip checkpointing for general migration,
8217 * it is controlled in mdadm
8218 */
8219 if (is_gen_migration(dev))
8220 goto skip_mark_checkpoint;
8221
8222 /* check if we can update curr_migr_unit from resync_start, recovery_start */
8223 blocks_per_unit = blocks_per_migr_unit(super, dev);
8224 if (blocks_per_unit) {
8225 __u32 units32;
8226 __u64 units;
8227
8228 units = a->last_checkpoint / blocks_per_unit;
8229 units32 = units;
8230
8231 /* check that we did not overflow 32-bits, and that
8232 * curr_migr_unit needs updating
8233 */
8234 if (units32 == units &&
8235 units32 != 0 &&
8236 __le32_to_cpu(dev->vol.curr_migr_unit) != units32) {
8237 dprintf("imsm: mark checkpoint (%u)\n", units32);
8238 dev->vol.curr_migr_unit = __cpu_to_le32(units32);
8239 super->updates_pending++;
8240 }
8241 }
8242
8243 skip_mark_checkpoint:
8244 /* mark dirty / clean */
8245 if (((dev->vol.dirty & RAIDVOL_DIRTY) && consistent) ||
8246 (!(dev->vol.dirty & RAIDVOL_DIRTY) && !consistent)) {
8247 dprintf("imsm: mark '%s'\n", consistent ? "clean" : "dirty");
8248 if (consistent) {
8249 dev->vol.dirty = RAIDVOL_CLEAN;
8250 } else {
8251 dev->vol.dirty = RAIDVOL_DIRTY;
8252 if (dev->rwh_policy == RWH_DISTRIBUTED ||
8253 dev->rwh_policy == RWH_MULTIPLE_DISTRIBUTED)
8254 dev->vol.dirty |= RAIDVOL_DSRECORD_VALID;
8255 }
8256 super->updates_pending++;
8257 }
8258
8259 return consistent;
8260 }
8261
8262 static int imsm_disk_slot_to_ord(struct active_array *a, int slot)
8263 {
8264 int inst = a->info.container_member;
8265 struct intel_super *super = a->container->sb;
8266 struct imsm_dev *dev = get_imsm_dev(super, inst);
8267 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8268
8269 if (slot > map->num_members) {
8270 pr_err("imsm: imsm_disk_slot_to_ord %d out of range 0..%d\n",
8271 slot, map->num_members - 1);
8272 return -1;
8273 }
8274
8275 if (slot < 0)
8276 return -1;
8277
8278 return get_imsm_ord_tbl_ent(dev, slot, MAP_0);
8279 }
8280
8281 static void imsm_set_disk(struct active_array *a, int n, int state)
8282 {
8283 int inst = a->info.container_member;
8284 struct intel_super *super = a->container->sb;
8285 struct imsm_dev *dev = get_imsm_dev(super, inst);
8286 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8287 struct imsm_disk *disk;
8288 struct mdinfo *mdi;
8289 int recovery_not_finished = 0;
8290 int failed;
8291 int ord;
8292 __u8 map_state;
8293 int rebuild_done = 0;
8294 int i;
8295
8296 ord = get_imsm_ord_tbl_ent(dev, n, MAP_X);
8297 if (ord < 0)
8298 return;
8299
8300 dprintf("imsm: set_disk %d:%x\n", n, state);
8301 disk = get_imsm_disk(super, ord_to_idx(ord));
8302
8303 /* check for new failures */
8304 if (state & DS_FAULTY) {
8305 if (mark_failure(super, dev, disk, ord_to_idx(ord)))
8306 super->updates_pending++;
8307 }
8308
8309 /* check if in_sync */
8310 if (state & DS_INSYNC && ord & IMSM_ORD_REBUILD && is_rebuilding(dev)) {
8311 struct imsm_map *migr_map = get_imsm_map(dev, MAP_1);
8312
8313 set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord));
8314 rebuild_done = 1;
8315 super->updates_pending++;
8316 }
8317
8318 failed = imsm_count_failed(super, dev, MAP_0);
8319 map_state = imsm_check_degraded(super, dev, failed, MAP_0);
8320
8321 /* check if recovery complete, newly degraded, or failed */
8322 dprintf("imsm: Detected transition to state ");
8323 switch (map_state) {
8324 case IMSM_T_STATE_NORMAL: /* transition to normal state */
8325 dprintf("normal: ");
8326 if (is_rebuilding(dev)) {
8327 dprintf_cont("while rebuilding");
8328 /* check if recovery is really finished */
8329 for (mdi = a->info.devs; mdi ; mdi = mdi->next)
8330 if (mdi->recovery_start != MaxSector) {
8331 recovery_not_finished = 1;
8332 break;
8333 }
8334 if (recovery_not_finished) {
8335 dprintf_cont("\n");
8336 dprintf("Rebuild has not finished yet, state not changed");
8337 if (a->last_checkpoint < mdi->recovery_start) {
8338 a->last_checkpoint = mdi->recovery_start;
8339 super->updates_pending++;
8340 }
8341 break;
8342 }
8343 end_migration(dev, super, map_state);
8344 map = get_imsm_map(dev, MAP_0);
8345 map->failed_disk_num = ~0;
8346 super->updates_pending++;
8347 a->last_checkpoint = 0;
8348 break;
8349 }
8350 if (is_gen_migration(dev)) {
8351 dprintf_cont("while general migration");
8352 if (a->last_checkpoint >= a->info.component_size)
8353 end_migration(dev, super, map_state);
8354 else
8355 map->map_state = map_state;
8356 map = get_imsm_map(dev, MAP_0);
8357 map->failed_disk_num = ~0;
8358 super->updates_pending++;
8359 break;
8360 }
8361 break;
8362 case IMSM_T_STATE_DEGRADED: /* transition to degraded state */
8363 dprintf_cont("degraded: ");
8364 if (map->map_state != map_state && !dev->vol.migr_state) {
8365 dprintf_cont("mark degraded");
8366 map->map_state = map_state;
8367 super->updates_pending++;
8368 a->last_checkpoint = 0;
8369 break;
8370 }
8371 if (is_rebuilding(dev)) {
8372 dprintf_cont("while rebuilding.");
8373 if (map->map_state != map_state) {
8374 dprintf_cont(" Map state change");
8375 end_migration(dev, super, map_state);
8376 super->updates_pending++;
8377 } else if (!rebuild_done) {
8378 break;
8379 }
8380
8381 /* check if recovery is really finished */
8382 for (mdi = a->info.devs; mdi ; mdi = mdi->next)
8383 if (mdi->recovery_start != MaxSector) {
8384 recovery_not_finished = 1;
8385 break;
8386 }
8387 if (recovery_not_finished) {
8388 dprintf_cont("\n");
8389 dprintf("Rebuild has not finished yet, state not changed");
8390 if (a->last_checkpoint < mdi->recovery_start) {
8391 a->last_checkpoint =
8392 mdi->recovery_start;
8393 super->updates_pending++;
8394 }
8395 break;
8396 }
8397
8398 dprintf_cont(" Rebuild done, still degraded");
8399 dev->vol.migr_state = 0;
8400 set_migr_type(dev, 0);
8401 dev->vol.curr_migr_unit = 0;
8402
8403 for (i = 0; i < map->num_members; i++) {
8404 int idx = get_imsm_ord_tbl_ent(dev, i, MAP_0);
8405
8406 if (idx & IMSM_ORD_REBUILD)
8407 map->failed_disk_num = i;
8408 }
8409 super->updates_pending++;
8410 break;
8411 }
8412 if (is_gen_migration(dev)) {
8413 dprintf_cont("while general migration");
8414 if (a->last_checkpoint >= a->info.component_size)
8415 end_migration(dev, super, map_state);
8416 else {
8417 map->map_state = map_state;
8418 manage_second_map(super, dev);
8419 }
8420 super->updates_pending++;
8421 break;
8422 }
8423 if (is_initializing(dev)) {
8424 dprintf_cont("while initialization.");
8425 map->map_state = map_state;
8426 super->updates_pending++;
8427 break;
8428 }
8429 break;
8430 case IMSM_T_STATE_FAILED: /* transition to failed state */
8431 dprintf_cont("failed: ");
8432 if (is_gen_migration(dev)) {
8433 dprintf_cont("while general migration");
8434 map->map_state = map_state;
8435 super->updates_pending++;
8436 break;
8437 }
8438 if (map->map_state != map_state) {
8439 dprintf_cont("mark failed");
8440 end_migration(dev, super, map_state);
8441 super->updates_pending++;
8442 a->last_checkpoint = 0;
8443 break;
8444 }
8445 break;
8446 default:
8447 dprintf_cont("state %i\n", map_state);
8448 }
8449 dprintf_cont("\n");
8450 }
8451
8452 static int store_imsm_mpb(int fd, struct imsm_super *mpb)
8453 {
8454 void *buf = mpb;
8455 __u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
8456 unsigned long long dsize;
8457 unsigned long long sectors;
8458 unsigned int sector_size;
8459
8460 get_dev_sector_size(fd, NULL, &sector_size);
8461 get_dev_size(fd, NULL, &dsize);
8462
8463 if (mpb_size > sector_size) {
8464 /* -1 to account for anchor */
8465 sectors = mpb_sectors(mpb, sector_size) - 1;
8466
8467 /* write the extended mpb to the sectors preceeding the anchor */
8468 if (lseek64(fd, dsize - (sector_size * (2 + sectors)),
8469 SEEK_SET) < 0)
8470 return 1;
8471
8472 if ((unsigned long long)write(fd, buf + sector_size,
8473 sector_size * sectors) != sector_size * sectors)
8474 return 1;
8475 }
8476
8477 /* first block is stored on second to last sector of the disk */
8478 if (lseek64(fd, dsize - (sector_size * 2), SEEK_SET) < 0)
8479 return 1;
8480
8481 if ((unsigned int)write(fd, buf, sector_size) != sector_size)
8482 return 1;
8483
8484 return 0;
8485 }
8486
8487 static void imsm_sync_metadata(struct supertype *container)
8488 {
8489 struct intel_super *super = container->sb;
8490
8491 dprintf("sync metadata: %d\n", super->updates_pending);
8492 if (!super->updates_pending)
8493 return;
8494
8495 write_super_imsm(container, 0);
8496
8497 super->updates_pending = 0;
8498 }
8499
8500 static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a)
8501 {
8502 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
8503 int i = get_imsm_disk_idx(dev, idx, MAP_X);
8504 struct dl *dl;
8505
8506 for (dl = super->disks; dl; dl = dl->next)
8507 if (dl->index == i)
8508 break;
8509
8510 if (dl && is_failed(&dl->disk))
8511 dl = NULL;
8512
8513 if (dl)
8514 dprintf("found %x:%x\n", dl->major, dl->minor);
8515
8516 return dl;
8517 }
8518
8519 static struct dl *imsm_add_spare(struct intel_super *super, int slot,
8520 struct active_array *a, int activate_new,
8521 struct mdinfo *additional_test_list)
8522 {
8523 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
8524 int idx = get_imsm_disk_idx(dev, slot, MAP_X);
8525 struct imsm_super *mpb = super->anchor;
8526 struct imsm_map *map;
8527 unsigned long long pos;
8528 struct mdinfo *d;
8529 struct extent *ex;
8530 int i, j;
8531 int found;
8532 __u32 array_start = 0;
8533 __u32 array_end = 0;
8534 struct dl *dl;
8535 struct mdinfo *test_list;
8536
8537 for (dl = super->disks; dl; dl = dl->next) {
8538 /* If in this array, skip */
8539 for (d = a->info.devs ; d ; d = d->next)
8540 if (d->state_fd >= 0 &&
8541 d->disk.major == dl->major &&
8542 d->disk.minor == dl->minor) {
8543 dprintf("%x:%x already in array\n",
8544 dl->major, dl->minor);
8545 break;
8546 }
8547 if (d)
8548 continue;
8549 test_list = additional_test_list;
8550 while (test_list) {
8551 if (test_list->disk.major == dl->major &&
8552 test_list->disk.minor == dl->minor) {
8553 dprintf("%x:%x already in additional test list\n",
8554 dl->major, dl->minor);
8555 break;
8556 }
8557 test_list = test_list->next;
8558 }
8559 if (test_list)
8560 continue;
8561
8562 /* skip in use or failed drives */
8563 if (is_failed(&dl->disk) || idx == dl->index ||
8564 dl->index == -2) {
8565 dprintf("%x:%x status (failed: %d index: %d)\n",
8566 dl->major, dl->minor, is_failed(&dl->disk), idx);
8567 continue;
8568 }
8569
8570 /* skip pure spares when we are looking for partially
8571 * assimilated drives
8572 */
8573 if (dl->index == -1 && !activate_new)
8574 continue;
8575
8576 if (!drive_validate_sector_size(super, dl))
8577 continue;
8578
8579 /* Does this unused device have the requisite free space?
8580 * It needs to be able to cover all member volumes
8581 */
8582 ex = get_extents(super, dl);
8583 if (!ex) {
8584 dprintf("cannot get extents\n");
8585 continue;
8586 }
8587 for (i = 0; i < mpb->num_raid_devs; i++) {
8588 dev = get_imsm_dev(super, i);
8589 map = get_imsm_map(dev, MAP_0);
8590
8591 /* check if this disk is already a member of
8592 * this array
8593 */
8594 if (get_imsm_disk_slot(map, dl->index) >= 0)
8595 continue;
8596
8597 found = 0;
8598 j = 0;
8599 pos = 0;
8600 array_start = pba_of_lba0(map);
8601 array_end = array_start +
8602 blocks_per_member(map) - 1;
8603
8604 do {
8605 /* check that we can start at pba_of_lba0 with
8606 * blocks_per_member of space
8607 */
8608 if (array_start >= pos && array_end < ex[j].start) {
8609 found = 1;
8610 break;
8611 }
8612 pos = ex[j].start + ex[j].size;
8613 j++;
8614 } while (ex[j-1].size);
8615
8616 if (!found)
8617 break;
8618 }
8619
8620 free(ex);
8621 if (i < mpb->num_raid_devs) {
8622 dprintf("%x:%x does not have %u to %u available\n",
8623 dl->major, dl->minor, array_start, array_end);
8624 /* No room */
8625 continue;
8626 }
8627 return dl;
8628 }
8629
8630 return dl;
8631 }
8632
8633 static int imsm_rebuild_allowed(struct supertype *cont, int dev_idx, int failed)
8634 {
8635 struct imsm_dev *dev2;
8636 struct imsm_map *map;
8637 struct dl *idisk;
8638 int slot;
8639 int idx;
8640 __u8 state;
8641
8642 dev2 = get_imsm_dev(cont->sb, dev_idx);
8643 if (dev2) {
8644 state = imsm_check_degraded(cont->sb, dev2, failed, MAP_0);
8645 if (state == IMSM_T_STATE_FAILED) {
8646 map = get_imsm_map(dev2, MAP_0);
8647 if (!map)
8648 return 1;
8649 for (slot = 0; slot < map->num_members; slot++) {
8650 /*
8651 * Check if failed disks are deleted from intel
8652 * disk list or are marked to be deleted
8653 */
8654 idx = get_imsm_disk_idx(dev2, slot, MAP_X);
8655 idisk = get_imsm_dl_disk(cont->sb, idx);
8656 /*
8657 * Do not rebuild the array if failed disks
8658 * from failed sub-array are not removed from
8659 * container.
8660 */
8661 if (idisk &&
8662 is_failed(&idisk->disk) &&
8663 (idisk->action != DISK_REMOVE))
8664 return 0;
8665 }
8666 }
8667 }
8668 return 1;
8669 }
8670
8671 static struct mdinfo *imsm_activate_spare(struct active_array *a,
8672 struct metadata_update **updates)
8673 {
8674 /**
8675 * Find a device with unused free space and use it to replace a
8676 * failed/vacant region in an array. We replace failed regions one a
8677 * array at a time. The result is that a new spare disk will be added
8678 * to the first failed array and after the monitor has finished
8679 * propagating failures the remainder will be consumed.
8680 *
8681 * FIXME add a capability for mdmon to request spares from another
8682 * container.
8683 */
8684
8685 struct intel_super *super = a->container->sb;
8686 int inst = a->info.container_member;
8687 struct imsm_dev *dev = get_imsm_dev(super, inst);
8688 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8689 int failed = a->info.array.raid_disks;
8690 struct mdinfo *rv = NULL;
8691 struct mdinfo *d;
8692 struct mdinfo *di;
8693 struct metadata_update *mu;
8694 struct dl *dl;
8695 struct imsm_update_activate_spare *u;
8696 int num_spares = 0;
8697 int i;
8698 int allowed;
8699
8700 for (d = a->info.devs ; d ; d = d->next) {
8701 if ((d->curr_state & DS_FAULTY) &&
8702 d->state_fd >= 0)
8703 /* wait for Removal to happen */
8704 return NULL;
8705 if (d->state_fd >= 0)
8706 failed--;
8707 }
8708
8709 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
8710 inst, failed, a->info.array.raid_disks, a->info.array.level);
8711
8712 if (imsm_reshape_blocks_arrays_changes(super))
8713 return NULL;
8714
8715 /* Cannot activate another spare if rebuild is in progress already
8716 */
8717 if (is_rebuilding(dev)) {
8718 dprintf("imsm: No spare activation allowed. Rebuild in progress already.\n");
8719 return NULL;
8720 }
8721
8722 if (a->info.array.level == 4)
8723 /* No repair for takeovered array
8724 * imsm doesn't support raid4
8725 */
8726 return NULL;
8727
8728 if (imsm_check_degraded(super, dev, failed, MAP_0) !=
8729 IMSM_T_STATE_DEGRADED)
8730 return NULL;
8731
8732 if (get_imsm_map(dev, MAP_0)->map_state == IMSM_T_STATE_UNINITIALIZED) {
8733 dprintf("imsm: No spare activation allowed. Volume is not initialized.\n");
8734 return NULL;
8735 }
8736
8737 /*
8738 * If there are any failed disks check state of the other volume.
8739 * Block rebuild if the another one is failed until failed disks
8740 * are removed from container.
8741 */
8742 if (failed) {
8743 dprintf("found failed disks in %.*s, check if there anotherfailed sub-array.\n",
8744 MAX_RAID_SERIAL_LEN, dev->volume);
8745 /* check if states of the other volumes allow for rebuild */
8746 for (i = 0; i < super->anchor->num_raid_devs; i++) {
8747 if (i != inst) {
8748 allowed = imsm_rebuild_allowed(a->container,
8749 i, failed);
8750 if (!allowed)
8751 return NULL;
8752 }
8753 }
8754 }
8755
8756 /* For each slot, if it is not working, find a spare */
8757 for (i = 0; i < a->info.array.raid_disks; i++) {
8758 for (d = a->info.devs ; d ; d = d->next)
8759 if (d->disk.raid_disk == i)
8760 break;
8761 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
8762 if (d && (d->state_fd >= 0))
8763 continue;
8764
8765 /*
8766 * OK, this device needs recovery. Try to re-add the
8767 * previous occupant of this slot, if this fails see if
8768 * we can continue the assimilation of a spare that was
8769 * partially assimilated, finally try to activate a new
8770 * spare.
8771 */
8772 dl = imsm_readd(super, i, a);
8773 if (!dl)
8774 dl = imsm_add_spare(super, i, a, 0, rv);
8775 if (!dl)
8776 dl = imsm_add_spare(super, i, a, 1, rv);
8777 if (!dl)
8778 continue;
8779
8780 /* found a usable disk with enough space */
8781 di = xcalloc(1, sizeof(*di));
8782
8783 /* dl->index will be -1 in the case we are activating a
8784 * pristine spare. imsm_process_update() will create a
8785 * new index in this case. Once a disk is found to be
8786 * failed in all member arrays it is kicked from the
8787 * metadata
8788 */
8789 di->disk.number = dl->index;
8790
8791 /* (ab)use di->devs to store a pointer to the device
8792 * we chose
8793 */
8794 di->devs = (struct mdinfo *) dl;
8795
8796 di->disk.raid_disk = i;
8797 di->disk.major = dl->major;
8798 di->disk.minor = dl->minor;
8799 di->disk.state = 0;
8800 di->recovery_start = 0;
8801 di->data_offset = pba_of_lba0(map);
8802 di->component_size = a->info.component_size;
8803 di->container_member = inst;
8804 di->bb.supported = 1;
8805 if (a->info.consistency_policy == CONSISTENCY_POLICY_PPL) {
8806 di->ppl_sector = get_ppl_sector(super, inst);
8807 di->ppl_size = MULTIPLE_PPL_AREA_SIZE_IMSM >> 9;
8808 }
8809 super->random = random32();
8810 di->next = rv;
8811 rv = di;
8812 num_spares++;
8813 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
8814 i, di->data_offset);
8815 }
8816
8817 if (!rv)
8818 /* No spares found */
8819 return rv;
8820 /* Now 'rv' has a list of devices to return.
8821 * Create a metadata_update record to update the
8822 * disk_ord_tbl for the array
8823 */
8824 mu = xmalloc(sizeof(*mu));
8825 mu->buf = xcalloc(num_spares,
8826 sizeof(struct imsm_update_activate_spare));
8827 mu->space = NULL;
8828 mu->space_list = NULL;
8829 mu->len = sizeof(struct imsm_update_activate_spare) * num_spares;
8830 mu->next = *updates;
8831 u = (struct imsm_update_activate_spare *) mu->buf;
8832
8833 for (di = rv ; di ; di = di->next) {
8834 u->type = update_activate_spare;
8835 u->dl = (struct dl *) di->devs;
8836 di->devs = NULL;
8837 u->slot = di->disk.raid_disk;
8838 u->array = inst;
8839 u->next = u + 1;
8840 u++;
8841 }
8842 (u-1)->next = NULL;
8843 *updates = mu;
8844
8845 return rv;
8846 }
8847
8848 static int disks_overlap(struct intel_super *super, int idx, struct imsm_update_create_array *u)
8849 {
8850 struct imsm_dev *dev = get_imsm_dev(super, idx);
8851 struct imsm_map *map = get_imsm_map(dev, MAP_0);
8852 struct imsm_map *new_map = get_imsm_map(&u->dev, MAP_0);
8853 struct disk_info *inf = get_disk_info(u);
8854 struct imsm_disk *disk;
8855 int i;
8856 int j;
8857
8858 for (i = 0; i < map->num_members; i++) {
8859 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i, MAP_X));
8860 for (j = 0; j < new_map->num_members; j++)
8861 if (serialcmp(disk->serial, inf[j].serial) == 0)
8862 return 1;
8863 }
8864
8865 return 0;
8866 }
8867
8868 static struct dl *get_disk_super(struct intel_super *super, int major, int minor)
8869 {
8870 struct dl *dl;
8871
8872 for (dl = super->disks; dl; dl = dl->next)
8873 if (dl->major == major && dl->minor == minor)
8874 return dl;
8875 return NULL;
8876 }
8877
8878 static int remove_disk_super(struct intel_super *super, int major, int minor)
8879 {
8880 struct dl *prev;
8881 struct dl *dl;
8882
8883 prev = NULL;
8884 for (dl = super->disks; dl; dl = dl->next) {
8885 if (dl->major == major && dl->minor == minor) {
8886 /* remove */
8887 if (prev)
8888 prev->next = dl->next;
8889 else
8890 super->disks = dl->next;
8891 dl->next = NULL;
8892 __free_imsm_disk(dl);
8893 dprintf("removed %x:%x\n", major, minor);
8894 break;
8895 }
8896 prev = dl;
8897 }
8898 return 0;
8899 }
8900
8901 static void imsm_delete(struct intel_super *super, struct dl **dlp, unsigned index);
8902
8903 static int add_remove_disk_update(struct intel_super *super)
8904 {
8905 int check_degraded = 0;
8906 struct dl *disk;
8907
8908 /* add/remove some spares to/from the metadata/contrainer */
8909 while (super->disk_mgmt_list) {
8910 struct dl *disk_cfg;
8911
8912 disk_cfg = super->disk_mgmt_list;
8913 super->disk_mgmt_list = disk_cfg->next;
8914 disk_cfg->next = NULL;
8915
8916 if (disk_cfg->action == DISK_ADD) {
8917 disk_cfg->next = super->disks;
8918 super->disks = disk_cfg;
8919 check_degraded = 1;
8920 dprintf("added %x:%x\n",
8921 disk_cfg->major, disk_cfg->minor);
8922 } else if (disk_cfg->action == DISK_REMOVE) {
8923 dprintf("Disk remove action processed: %x.%x\n",
8924 disk_cfg->major, disk_cfg->minor);
8925 disk = get_disk_super(super,
8926 disk_cfg->major,
8927 disk_cfg->minor);
8928 if (disk) {
8929 /* store action status */
8930 disk->action = DISK_REMOVE;
8931 /* remove spare disks only */
8932 if (disk->index == -1) {
8933 remove_disk_super(super,
8934 disk_cfg->major,
8935 disk_cfg->minor);
8936 }
8937 }
8938 /* release allocate disk structure */
8939 __free_imsm_disk(disk_cfg);
8940 }
8941 }
8942 return check_degraded;
8943 }
8944
8945 static int apply_reshape_migration_update(struct imsm_update_reshape_migration *u,
8946 struct intel_super *super,
8947 void ***space_list)
8948 {
8949 struct intel_dev *id;
8950 void **tofree = NULL;
8951 int ret_val = 0;
8952
8953 dprintf("(enter)\n");
8954 if (u->subdev < 0 || u->subdev > 1) {
8955 dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
8956 return ret_val;
8957 }
8958 if (space_list == NULL || *space_list == NULL) {
8959 dprintf("imsm: Error: Memory is not allocated\n");
8960 return ret_val;
8961 }
8962
8963 for (id = super->devlist ; id; id = id->next) {
8964 if (id->index == (unsigned)u->subdev) {
8965 struct imsm_dev *dev = get_imsm_dev(super, u->subdev);
8966 struct imsm_map *map;
8967 struct imsm_dev *new_dev =
8968 (struct imsm_dev *)*space_list;
8969 struct imsm_map *migr_map = get_imsm_map(dev, MAP_1);
8970 int to_state;
8971 struct dl *new_disk;
8972
8973 if (new_dev == NULL)
8974 return ret_val;
8975 *space_list = **space_list;
8976 memcpy(new_dev, dev, sizeof_imsm_dev(dev, 0));
8977 map = get_imsm_map(new_dev, MAP_0);
8978 if (migr_map) {
8979 dprintf("imsm: Error: migration in progress");
8980 return ret_val;
8981 }
8982
8983 to_state = map->map_state;
8984 if ((u->new_level == 5) && (map->raid_level == 0)) {
8985 map->num_members++;
8986 /* this should not happen */
8987 if (u->new_disks[0] < 0) {
8988 map->failed_disk_num =
8989 map->num_members - 1;
8990 to_state = IMSM_T_STATE_DEGRADED;
8991 } else
8992 to_state = IMSM_T_STATE_NORMAL;
8993 }
8994 migrate(new_dev, super, to_state, MIGR_GEN_MIGR);
8995 if (u->new_level > -1)
8996 map->raid_level = u->new_level;
8997 migr_map = get_imsm_map(new_dev, MAP_1);
8998 if ((u->new_level == 5) &&
8999 (migr_map->raid_level == 0)) {
9000 int ord = map->num_members - 1;
9001 migr_map->num_members--;
9002 if (u->new_disks[0] < 0)
9003 ord |= IMSM_ORD_REBUILD;
9004 set_imsm_ord_tbl_ent(map,
9005 map->num_members - 1,
9006 ord);
9007 }
9008 id->dev = new_dev;
9009 tofree = (void **)dev;
9010
9011 /* update chunk size
9012 */
9013 if (u->new_chunksize > 0) {
9014 unsigned long long num_data_stripes;
9015 int used_disks =
9016 imsm_num_data_members(dev, MAP_0);
9017
9018 if (used_disks == 0)
9019 return ret_val;
9020
9021 map->blocks_per_strip =
9022 __cpu_to_le16(u->new_chunksize * 2);
9023 num_data_stripes =
9024 (join_u32(dev->size_low, dev->size_high)
9025 / used_disks);
9026 num_data_stripes /= map->blocks_per_strip;
9027 num_data_stripes /= map->num_domains;
9028 set_num_data_stripes(map, num_data_stripes);
9029 }
9030
9031 /* add disk
9032 */
9033 if (u->new_level != 5 || migr_map->raid_level != 0 ||
9034 migr_map->raid_level == map->raid_level)
9035 goto skip_disk_add;
9036
9037 if (u->new_disks[0] >= 0) {
9038 /* use passes spare
9039 */
9040 new_disk = get_disk_super(super,
9041 major(u->new_disks[0]),
9042 minor(u->new_disks[0]));
9043 dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
9044 major(u->new_disks[0]),
9045 minor(u->new_disks[0]),
9046 new_disk, new_disk->index);
9047 if (new_disk == NULL)
9048 goto error_disk_add;
9049
9050 new_disk->index = map->num_members - 1;
9051 /* slot to fill in autolayout
9052 */
9053 new_disk->raiddisk = new_disk->index;
9054 new_disk->disk.status |= CONFIGURED_DISK;
9055 new_disk->disk.status &= ~SPARE_DISK;
9056 } else
9057 goto error_disk_add;
9058
9059 skip_disk_add:
9060 *tofree = *space_list;
9061 /* calculate new size
9062 */
9063 imsm_set_array_size(new_dev, -1);
9064
9065 ret_val = 1;
9066 }
9067 }
9068
9069 if (tofree)
9070 *space_list = tofree;
9071 return ret_val;
9072
9073 error_disk_add:
9074 dprintf("Error: imsm: Cannot find disk.\n");
9075 return ret_val;
9076 }
9077
9078 static int apply_size_change_update(struct imsm_update_size_change *u,
9079 struct intel_super *super)
9080 {
9081 struct intel_dev *id;
9082 int ret_val = 0;
9083
9084 dprintf("(enter)\n");
9085 if (u->subdev < 0 || u->subdev > 1) {
9086 dprintf("imsm: Error: Wrong subdev: %i\n", u->subdev);
9087 return ret_val;
9088 }
9089
9090 for (id = super->devlist ; id; id = id->next) {
9091 if (id->index == (unsigned)u->subdev) {
9092 struct imsm_dev *dev = get_imsm_dev(super, u->subdev);
9093 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9094 int used_disks = imsm_num_data_members(dev, MAP_0);
9095 unsigned long long blocks_per_member;
9096 unsigned long long num_data_stripes;
9097
9098 /* calculate new size
9099 */
9100 blocks_per_member = u->new_size / used_disks;
9101 num_data_stripes = blocks_per_member /
9102 map->blocks_per_strip;
9103 num_data_stripes /= map->num_domains;
9104 dprintf("(size: %llu, blocks per member: %llu, num_data_stipes: %llu)\n",
9105 u->new_size, blocks_per_member,
9106 num_data_stripes);
9107 set_blocks_per_member(map, blocks_per_member);
9108 set_num_data_stripes(map, num_data_stripes);
9109 imsm_set_array_size(dev, u->new_size);
9110
9111 ret_val = 1;
9112 break;
9113 }
9114 }
9115
9116 return ret_val;
9117 }
9118
9119 static int apply_update_activate_spare(struct imsm_update_activate_spare *u,
9120 struct intel_super *super,
9121 struct active_array *active_array)
9122 {
9123 struct imsm_super *mpb = super->anchor;
9124 struct imsm_dev *dev = get_imsm_dev(super, u->array);
9125 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9126 struct imsm_map *migr_map;
9127 struct active_array *a;
9128 struct imsm_disk *disk;
9129 __u8 to_state;
9130 struct dl *dl;
9131 unsigned int found;
9132 int failed;
9133 int victim;
9134 int i;
9135 int second_map_created = 0;
9136
9137 for (; u; u = u->next) {
9138 victim = get_imsm_disk_idx(dev, u->slot, MAP_X);
9139
9140 if (victim < 0)
9141 return 0;
9142
9143 for (dl = super->disks; dl; dl = dl->next)
9144 if (dl == u->dl)
9145 break;
9146
9147 if (!dl) {
9148 pr_err("error: imsm_activate_spare passed an unknown disk (index: %d)\n",
9149 u->dl->index);
9150 return 0;
9151 }
9152
9153 /* count failures (excluding rebuilds and the victim)
9154 * to determine map[0] state
9155 */
9156 failed = 0;
9157 for (i = 0; i < map->num_members; i++) {
9158 if (i == u->slot)
9159 continue;
9160 disk = get_imsm_disk(super,
9161 get_imsm_disk_idx(dev, i, MAP_X));
9162 if (!disk || is_failed(disk))
9163 failed++;
9164 }
9165
9166 /* adding a pristine spare, assign a new index */
9167 if (dl->index < 0) {
9168 dl->index = super->anchor->num_disks;
9169 super->anchor->num_disks++;
9170 }
9171 disk = &dl->disk;
9172 disk->status |= CONFIGURED_DISK;
9173 disk->status &= ~SPARE_DISK;
9174
9175 /* mark rebuild */
9176 to_state = imsm_check_degraded(super, dev, failed, MAP_0);
9177 if (!second_map_created) {
9178 second_map_created = 1;
9179 map->map_state = IMSM_T_STATE_DEGRADED;
9180 migrate(dev, super, to_state, MIGR_REBUILD);
9181 } else
9182 map->map_state = to_state;
9183 migr_map = get_imsm_map(dev, MAP_1);
9184 set_imsm_ord_tbl_ent(map, u->slot, dl->index);
9185 set_imsm_ord_tbl_ent(migr_map, u->slot,
9186 dl->index | IMSM_ORD_REBUILD);
9187
9188 /* update the family_num to mark a new container
9189 * generation, being careful to record the existing
9190 * family_num in orig_family_num to clean up after
9191 * earlier mdadm versions that neglected to set it.
9192 */
9193 if (mpb->orig_family_num == 0)
9194 mpb->orig_family_num = mpb->family_num;
9195 mpb->family_num += super->random;
9196
9197 /* count arrays using the victim in the metadata */
9198 found = 0;
9199 for (a = active_array; a ; a = a->next) {
9200 dev = get_imsm_dev(super, a->info.container_member);
9201 map = get_imsm_map(dev, MAP_0);
9202
9203 if (get_imsm_disk_slot(map, victim) >= 0)
9204 found++;
9205 }
9206
9207 /* delete the victim if it is no longer being
9208 * utilized anywhere
9209 */
9210 if (!found) {
9211 struct dl **dlp;
9212
9213 /* We know that 'manager' isn't touching anything,
9214 * so it is safe to delete
9215 */
9216 for (dlp = &super->disks; *dlp; dlp = &(*dlp)->next)
9217 if ((*dlp)->index == victim)
9218 break;
9219
9220 /* victim may be on the missing list */
9221 if (!*dlp)
9222 for (dlp = &super->missing; *dlp;
9223 dlp = &(*dlp)->next)
9224 if ((*dlp)->index == victim)
9225 break;
9226 imsm_delete(super, dlp, victim);
9227 }
9228 }
9229
9230 return 1;
9231 }
9232
9233 static int apply_reshape_container_disks_update(struct imsm_update_reshape *u,
9234 struct intel_super *super,
9235 void ***space_list)
9236 {
9237 struct dl *new_disk;
9238 struct intel_dev *id;
9239 int i;
9240 int delta_disks = u->new_raid_disks - u->old_raid_disks;
9241 int disk_count = u->old_raid_disks;
9242 void **tofree = NULL;
9243 int devices_to_reshape = 1;
9244 struct imsm_super *mpb = super->anchor;
9245 int ret_val = 0;
9246 unsigned int dev_id;
9247
9248 dprintf("(enter)\n");
9249
9250 /* enable spares to use in array */
9251 for (i = 0; i < delta_disks; i++) {
9252 new_disk = get_disk_super(super,
9253 major(u->new_disks[i]),
9254 minor(u->new_disks[i]));
9255 dprintf("imsm: new disk for reshape is: %i:%i (%p, index = %i)\n",
9256 major(u->new_disks[i]), minor(u->new_disks[i]),
9257 new_disk, new_disk->index);
9258 if (new_disk == NULL ||
9259 (new_disk->index >= 0 &&
9260 new_disk->index < u->old_raid_disks))
9261 goto update_reshape_exit;
9262 new_disk->index = disk_count++;
9263 /* slot to fill in autolayout
9264 */
9265 new_disk->raiddisk = new_disk->index;
9266 new_disk->disk.status |=
9267 CONFIGURED_DISK;
9268 new_disk->disk.status &= ~SPARE_DISK;
9269 }
9270
9271 dprintf("imsm: volume set mpb->num_raid_devs = %i\n",
9272 mpb->num_raid_devs);
9273 /* manage changes in volume
9274 */
9275 for (dev_id = 0; dev_id < mpb->num_raid_devs; dev_id++) {
9276 void **sp = *space_list;
9277 struct imsm_dev *newdev;
9278 struct imsm_map *newmap, *oldmap;
9279
9280 for (id = super->devlist ; id; id = id->next) {
9281 if (id->index == dev_id)
9282 break;
9283 }
9284 if (id == NULL)
9285 break;
9286 if (!sp)
9287 continue;
9288 *space_list = *sp;
9289 newdev = (void*)sp;
9290 /* Copy the dev, but not (all of) the map */
9291 memcpy(newdev, id->dev, sizeof(*newdev));
9292 oldmap = get_imsm_map(id->dev, MAP_0);
9293 newmap = get_imsm_map(newdev, MAP_0);
9294 /* Copy the current map */
9295 memcpy(newmap, oldmap, sizeof_imsm_map(oldmap));
9296 /* update one device only
9297 */
9298 if (devices_to_reshape) {
9299 dprintf("imsm: modifying subdev: %i\n",
9300 id->index);
9301 devices_to_reshape--;
9302 newdev->vol.migr_state = 1;
9303 newdev->vol.curr_migr_unit = 0;
9304 set_migr_type(newdev, MIGR_GEN_MIGR);
9305 newmap->num_members = u->new_raid_disks;
9306 for (i = 0; i < delta_disks; i++) {
9307 set_imsm_ord_tbl_ent(newmap,
9308 u->old_raid_disks + i,
9309 u->old_raid_disks + i);
9310 }
9311 /* New map is correct, now need to save old map
9312 */
9313 newmap = get_imsm_map(newdev, MAP_1);
9314 memcpy(newmap, oldmap, sizeof_imsm_map(oldmap));
9315
9316 imsm_set_array_size(newdev, -1);
9317 }
9318
9319 sp = (void **)id->dev;
9320 id->dev = newdev;
9321 *sp = tofree;
9322 tofree = sp;
9323
9324 /* Clear migration record */
9325 memset(super->migr_rec, 0, sizeof(struct migr_record));
9326 }
9327 if (tofree)
9328 *space_list = tofree;
9329 ret_val = 1;
9330
9331 update_reshape_exit:
9332
9333 return ret_val;
9334 }
9335
9336 static int apply_takeover_update(struct imsm_update_takeover *u,
9337 struct intel_super *super,
9338 void ***space_list)
9339 {
9340 struct imsm_dev *dev = NULL;
9341 struct intel_dev *dv;
9342 struct imsm_dev *dev_new;
9343 struct imsm_map *map;
9344 struct dl *dm, *du;
9345 int i;
9346
9347 for (dv = super->devlist; dv; dv = dv->next)
9348 if (dv->index == (unsigned int)u->subarray) {
9349 dev = dv->dev;
9350 break;
9351 }
9352
9353 if (dev == NULL)
9354 return 0;
9355
9356 map = get_imsm_map(dev, MAP_0);
9357
9358 if (u->direction == R10_TO_R0) {
9359 unsigned long long num_data_stripes;
9360
9361 map->num_domains = 1;
9362 num_data_stripes = blocks_per_member(map);
9363 num_data_stripes /= map->blocks_per_strip;
9364 num_data_stripes /= map->num_domains;
9365 set_num_data_stripes(map, num_data_stripes);
9366
9367 /* Number of failed disks must be half of initial disk number */
9368 if (imsm_count_failed(super, dev, MAP_0) !=
9369 (map->num_members / 2))
9370 return 0;
9371
9372 /* iterate through devices to mark removed disks as spare */
9373 for (dm = super->disks; dm; dm = dm->next) {
9374 if (dm->disk.status & FAILED_DISK) {
9375 int idx = dm->index;
9376 /* update indexes on the disk list */
9377 /* FIXME this loop-with-the-loop looks wrong, I'm not convinced
9378 the index values will end up being correct.... NB */
9379 for (du = super->disks; du; du = du->next)
9380 if (du->index > idx)
9381 du->index--;
9382 /* mark as spare disk */
9383 mark_spare(dm);
9384 }
9385 }
9386 /* update map */
9387 map->num_members = map->num_members / 2;
9388 map->map_state = IMSM_T_STATE_NORMAL;
9389 map->num_domains = 1;
9390 map->raid_level = 0;
9391 map->failed_disk_num = -1;
9392 }
9393
9394 if (u->direction == R0_TO_R10) {
9395 void **space;
9396 /* update slots in current disk list */
9397 for (dm = super->disks; dm; dm = dm->next) {
9398 if (dm->index >= 0)
9399 dm->index *= 2;
9400 }
9401 /* create new *missing* disks */
9402 for (i = 0; i < map->num_members; i++) {
9403 space = *space_list;
9404 if (!space)
9405 continue;
9406 *space_list = *space;
9407 du = (void *)space;
9408 memcpy(du, super->disks, sizeof(*du));
9409 du->fd = -1;
9410 du->minor = 0;
9411 du->major = 0;
9412 du->index = (i * 2) + 1;
9413 sprintf((char *)du->disk.serial,
9414 " MISSING_%d", du->index);
9415 sprintf((char *)du->serial,
9416 "MISSING_%d", du->index);
9417 du->next = super->missing;
9418 super->missing = du;
9419 }
9420 /* create new dev and map */
9421 space = *space_list;
9422 if (!space)
9423 return 0;
9424 *space_list = *space;
9425 dev_new = (void *)space;
9426 memcpy(dev_new, dev, sizeof(*dev));
9427 /* update new map */
9428 map = get_imsm_map(dev_new, MAP_0);
9429 map->num_members = map->num_members * 2;
9430 map->map_state = IMSM_T_STATE_DEGRADED;
9431 map->num_domains = 2;
9432 map->raid_level = 1;
9433 /* replace dev<->dev_new */
9434 dv->dev = dev_new;
9435 }
9436 /* update disk order table */
9437 for (du = super->disks; du; du = du->next)
9438 if (du->index >= 0)
9439 set_imsm_ord_tbl_ent(map, du->index, du->index);
9440 for (du = super->missing; du; du = du->next)
9441 if (du->index >= 0) {
9442 set_imsm_ord_tbl_ent(map, du->index, du->index);
9443 mark_missing(super, dv->dev, &du->disk, du->index);
9444 }
9445
9446 return 1;
9447 }
9448
9449 static void imsm_process_update(struct supertype *st,
9450 struct metadata_update *update)
9451 {
9452 /**
9453 * crack open the metadata_update envelope to find the update record
9454 * update can be one of:
9455 * update_reshape_container_disks - all the arrays in the container
9456 * are being reshaped to have more devices. We need to mark
9457 * the arrays for general migration and convert selected spares
9458 * into active devices.
9459 * update_activate_spare - a spare device has replaced a failed
9460 * device in an array, update the disk_ord_tbl. If this disk is
9461 * present in all member arrays then also clear the SPARE_DISK
9462 * flag
9463 * update_create_array
9464 * update_kill_array
9465 * update_rename_array
9466 * update_add_remove_disk
9467 */
9468 struct intel_super *super = st->sb;
9469 struct imsm_super *mpb;
9470 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
9471
9472 /* update requires a larger buf but the allocation failed */
9473 if (super->next_len && !super->next_buf) {
9474 super->next_len = 0;
9475 return;
9476 }
9477
9478 if (super->next_buf) {
9479 memcpy(super->next_buf, super->buf, super->len);
9480 free(super->buf);
9481 super->len = super->next_len;
9482 super->buf = super->next_buf;
9483
9484 super->next_len = 0;
9485 super->next_buf = NULL;
9486 }
9487
9488 mpb = super->anchor;
9489
9490 switch (type) {
9491 case update_general_migration_checkpoint: {
9492 struct intel_dev *id;
9493 struct imsm_update_general_migration_checkpoint *u =
9494 (void *)update->buf;
9495
9496 dprintf("called for update_general_migration_checkpoint\n");
9497
9498 /* find device under general migration */
9499 for (id = super->devlist ; id; id = id->next) {
9500 if (is_gen_migration(id->dev)) {
9501 id->dev->vol.curr_migr_unit =
9502 __cpu_to_le32(u->curr_migr_unit);
9503 super->updates_pending++;
9504 }
9505 }
9506 break;
9507 }
9508 case update_takeover: {
9509 struct imsm_update_takeover *u = (void *)update->buf;
9510 if (apply_takeover_update(u, super, &update->space_list)) {
9511 imsm_update_version_info(super);
9512 super->updates_pending++;
9513 }
9514 break;
9515 }
9516
9517 case update_reshape_container_disks: {
9518 struct imsm_update_reshape *u = (void *)update->buf;
9519 if (apply_reshape_container_disks_update(
9520 u, super, &update->space_list))
9521 super->updates_pending++;
9522 break;
9523 }
9524 case update_reshape_migration: {
9525 struct imsm_update_reshape_migration *u = (void *)update->buf;
9526 if (apply_reshape_migration_update(
9527 u, super, &update->space_list))
9528 super->updates_pending++;
9529 break;
9530 }
9531 case update_size_change: {
9532 struct imsm_update_size_change *u = (void *)update->buf;
9533 if (apply_size_change_update(u, super))
9534 super->updates_pending++;
9535 break;
9536 }
9537 case update_activate_spare: {
9538 struct imsm_update_activate_spare *u = (void *) update->buf;
9539 if (apply_update_activate_spare(u, super, st->arrays))
9540 super->updates_pending++;
9541 break;
9542 }
9543 case update_create_array: {
9544 /* someone wants to create a new array, we need to be aware of
9545 * a few races/collisions:
9546 * 1/ 'Create' called by two separate instances of mdadm
9547 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
9548 * devices that have since been assimilated via
9549 * activate_spare.
9550 * In the event this update can not be carried out mdadm will
9551 * (FIX ME) notice that its update did not take hold.
9552 */
9553 struct imsm_update_create_array *u = (void *) update->buf;
9554 struct intel_dev *dv;
9555 struct imsm_dev *dev;
9556 struct imsm_map *map, *new_map;
9557 unsigned long long start, end;
9558 unsigned long long new_start, new_end;
9559 int i;
9560 struct disk_info *inf;
9561 struct dl *dl;
9562
9563 /* handle racing creates: first come first serve */
9564 if (u->dev_idx < mpb->num_raid_devs) {
9565 dprintf("subarray %d already defined\n", u->dev_idx);
9566 goto create_error;
9567 }
9568
9569 /* check update is next in sequence */
9570 if (u->dev_idx != mpb->num_raid_devs) {
9571 dprintf("can not create array %d expected index %d\n",
9572 u->dev_idx, mpb->num_raid_devs);
9573 goto create_error;
9574 }
9575
9576 new_map = get_imsm_map(&u->dev, MAP_0);
9577 new_start = pba_of_lba0(new_map);
9578 new_end = new_start + blocks_per_member(new_map);
9579 inf = get_disk_info(u);
9580
9581 /* handle activate_spare versus create race:
9582 * check to make sure that overlapping arrays do not include
9583 * overalpping disks
9584 */
9585 for (i = 0; i < mpb->num_raid_devs; i++) {
9586 dev = get_imsm_dev(super, i);
9587 map = get_imsm_map(dev, MAP_0);
9588 start = pba_of_lba0(map);
9589 end = start + blocks_per_member(map);
9590 if ((new_start >= start && new_start <= end) ||
9591 (start >= new_start && start <= new_end))
9592 /* overlap */;
9593 else
9594 continue;
9595
9596 if (disks_overlap(super, i, u)) {
9597 dprintf("arrays overlap\n");
9598 goto create_error;
9599 }
9600 }
9601
9602 /* check that prepare update was successful */
9603 if (!update->space) {
9604 dprintf("prepare update failed\n");
9605 goto create_error;
9606 }
9607
9608 /* check that all disks are still active before committing
9609 * changes. FIXME: could we instead handle this by creating a
9610 * degraded array? That's probably not what the user expects,
9611 * so better to drop this update on the floor.
9612 */
9613 for (i = 0; i < new_map->num_members; i++) {
9614 dl = serial_to_dl(inf[i].serial, super);
9615 if (!dl) {
9616 dprintf("disk disappeared\n");
9617 goto create_error;
9618 }
9619 }
9620
9621 super->updates_pending++;
9622
9623 /* convert spares to members and fixup ord_tbl */
9624 for (i = 0; i < new_map->num_members; i++) {
9625 dl = serial_to_dl(inf[i].serial, super);
9626 if (dl->index == -1) {
9627 dl->index = mpb->num_disks;
9628 mpb->num_disks++;
9629 dl->disk.status |= CONFIGURED_DISK;
9630 dl->disk.status &= ~SPARE_DISK;
9631 }
9632 set_imsm_ord_tbl_ent(new_map, i, dl->index);
9633 }
9634
9635 dv = update->space;
9636 dev = dv->dev;
9637 update->space = NULL;
9638 imsm_copy_dev(dev, &u->dev);
9639 dv->index = u->dev_idx;
9640 dv->next = super->devlist;
9641 super->devlist = dv;
9642 mpb->num_raid_devs++;
9643
9644 imsm_update_version_info(super);
9645 break;
9646 create_error:
9647 /* mdmon knows how to release update->space, but not
9648 * ((struct intel_dev *) update->space)->dev
9649 */
9650 if (update->space) {
9651 dv = update->space;
9652 free(dv->dev);
9653 }
9654 break;
9655 }
9656 case update_kill_array: {
9657 struct imsm_update_kill_array *u = (void *) update->buf;
9658 int victim = u->dev_idx;
9659 struct active_array *a;
9660 struct intel_dev **dp;
9661 struct imsm_dev *dev;
9662
9663 /* sanity check that we are not affecting the uuid of
9664 * active arrays, or deleting an active array
9665 *
9666 * FIXME when immutable ids are available, but note that
9667 * we'll also need to fixup the invalidated/active
9668 * subarray indexes in mdstat
9669 */
9670 for (a = st->arrays; a; a = a->next)
9671 if (a->info.container_member >= victim)
9672 break;
9673 /* by definition if mdmon is running at least one array
9674 * is active in the container, so checking
9675 * mpb->num_raid_devs is just extra paranoia
9676 */
9677 dev = get_imsm_dev(super, victim);
9678 if (a || !dev || mpb->num_raid_devs == 1) {
9679 dprintf("failed to delete subarray-%d\n", victim);
9680 break;
9681 }
9682
9683 for (dp = &super->devlist; *dp;)
9684 if ((*dp)->index == (unsigned)super->current_vol) {
9685 *dp = (*dp)->next;
9686 } else {
9687 if ((*dp)->index > (unsigned)victim)
9688 (*dp)->index--;
9689 dp = &(*dp)->next;
9690 }
9691 mpb->num_raid_devs--;
9692 super->updates_pending++;
9693 break;
9694 }
9695 case update_rename_array: {
9696 struct imsm_update_rename_array *u = (void *) update->buf;
9697 char name[MAX_RAID_SERIAL_LEN+1];
9698 int target = u->dev_idx;
9699 struct active_array *a;
9700 struct imsm_dev *dev;
9701
9702 /* sanity check that we are not affecting the uuid of
9703 * an active array
9704 */
9705 snprintf(name, MAX_RAID_SERIAL_LEN, "%s", (char *) u->name);
9706 name[MAX_RAID_SERIAL_LEN] = '\0';
9707 for (a = st->arrays; a; a = a->next)
9708 if (a->info.container_member == target)
9709 break;
9710 dev = get_imsm_dev(super, u->dev_idx);
9711 if (a || !dev || !check_name(super, name, 1)) {
9712 dprintf("failed to rename subarray-%d\n", target);
9713 break;
9714 }
9715
9716 snprintf((char *) dev->volume, MAX_RAID_SERIAL_LEN, "%s", name);
9717 super->updates_pending++;
9718 break;
9719 }
9720 case update_add_remove_disk: {
9721 /* we may be able to repair some arrays if disks are
9722 * being added, check the status of add_remove_disk
9723 * if discs has been added.
9724 */
9725 if (add_remove_disk_update(super)) {
9726 struct active_array *a;
9727
9728 super->updates_pending++;
9729 for (a = st->arrays; a; a = a->next)
9730 a->check_degraded = 1;
9731 }
9732 break;
9733 }
9734 case update_prealloc_badblocks_mem:
9735 break;
9736 case update_rwh_policy: {
9737 struct imsm_update_rwh_policy *u = (void *)update->buf;
9738 int target = u->dev_idx;
9739 struct imsm_dev *dev = get_imsm_dev(super, target);
9740 if (!dev) {
9741 dprintf("could not find subarray-%d\n", target);
9742 break;
9743 }
9744
9745 if (dev->rwh_policy != u->new_policy) {
9746 dev->rwh_policy = u->new_policy;
9747 super->updates_pending++;
9748 }
9749 break;
9750 }
9751 default:
9752 pr_err("error: unsuported process update type:(type: %d)\n", type);
9753 }
9754 }
9755
9756 static struct mdinfo *get_spares_for_grow(struct supertype *st);
9757
9758 static int imsm_prepare_update(struct supertype *st,
9759 struct metadata_update *update)
9760 {
9761 /**
9762 * Allocate space to hold new disk entries, raid-device entries or a new
9763 * mpb if necessary. The manager synchronously waits for updates to
9764 * complete in the monitor, so new mpb buffers allocated here can be
9765 * integrated by the monitor thread without worrying about live pointers
9766 * in the manager thread.
9767 */
9768 enum imsm_update_type type;
9769 struct intel_super *super = st->sb;
9770 unsigned int sector_size = super->sector_size;
9771 struct imsm_super *mpb = super->anchor;
9772 size_t buf_len;
9773 size_t len = 0;
9774
9775 if (update->len < (int)sizeof(type))
9776 return 0;
9777
9778 type = *(enum imsm_update_type *) update->buf;
9779
9780 switch (type) {
9781 case update_general_migration_checkpoint:
9782 if (update->len < (int)sizeof(struct imsm_update_general_migration_checkpoint))
9783 return 0;
9784 dprintf("called for update_general_migration_checkpoint\n");
9785 break;
9786 case update_takeover: {
9787 struct imsm_update_takeover *u = (void *)update->buf;
9788 if (update->len < (int)sizeof(*u))
9789 return 0;
9790 if (u->direction == R0_TO_R10) {
9791 void **tail = (void **)&update->space_list;
9792 struct imsm_dev *dev = get_imsm_dev(super, u->subarray);
9793 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9794 int num_members = map->num_members;
9795 void *space;
9796 int size, i;
9797 /* allocate memory for added disks */
9798 for (i = 0; i < num_members; i++) {
9799 size = sizeof(struct dl);
9800 space = xmalloc(size);
9801 *tail = space;
9802 tail = space;
9803 *tail = NULL;
9804 }
9805 /* allocate memory for new device */
9806 size = sizeof_imsm_dev(super->devlist->dev, 0) +
9807 (num_members * sizeof(__u32));
9808 space = xmalloc(size);
9809 *tail = space;
9810 tail = space;
9811 *tail = NULL;
9812 len = disks_to_mpb_size(num_members * 2);
9813 }
9814
9815 break;
9816 }
9817 case update_reshape_container_disks: {
9818 /* Every raid device in the container is about to
9819 * gain some more devices, and we will enter a
9820 * reconfiguration.
9821 * So each 'imsm_map' will be bigger, and the imsm_vol
9822 * will now hold 2 of them.
9823 * Thus we need new 'struct imsm_dev' allocations sized
9824 * as sizeof_imsm_dev but with more devices in both maps.
9825 */
9826 struct imsm_update_reshape *u = (void *)update->buf;
9827 struct intel_dev *dl;
9828 void **space_tail = (void**)&update->space_list;
9829
9830 if (update->len < (int)sizeof(*u))
9831 return 0;
9832
9833 dprintf("for update_reshape\n");
9834
9835 for (dl = super->devlist; dl; dl = dl->next) {
9836 int size = sizeof_imsm_dev(dl->dev, 1);
9837 void *s;
9838 if (u->new_raid_disks > u->old_raid_disks)
9839 size += sizeof(__u32)*2*
9840 (u->new_raid_disks - u->old_raid_disks);
9841 s = xmalloc(size);
9842 *space_tail = s;
9843 space_tail = s;
9844 *space_tail = NULL;
9845 }
9846
9847 len = disks_to_mpb_size(u->new_raid_disks);
9848 dprintf("New anchor length is %llu\n", (unsigned long long)len);
9849 break;
9850 }
9851 case update_reshape_migration: {
9852 /* for migration level 0->5 we need to add disks
9853 * so the same as for container operation we will copy
9854 * device to the bigger location.
9855 * in memory prepared device and new disk area are prepared
9856 * for usage in process update
9857 */
9858 struct imsm_update_reshape_migration *u = (void *)update->buf;
9859 struct intel_dev *id;
9860 void **space_tail = (void **)&update->space_list;
9861 int size;
9862 void *s;
9863 int current_level = -1;
9864
9865 if (update->len < (int)sizeof(*u))
9866 return 0;
9867
9868 dprintf("for update_reshape\n");
9869
9870 /* add space for bigger array in update
9871 */
9872 for (id = super->devlist; id; id = id->next) {
9873 if (id->index == (unsigned)u->subdev) {
9874 size = sizeof_imsm_dev(id->dev, 1);
9875 if (u->new_raid_disks > u->old_raid_disks)
9876 size += sizeof(__u32)*2*
9877 (u->new_raid_disks - u->old_raid_disks);
9878 s = xmalloc(size);
9879 *space_tail = s;
9880 space_tail = s;
9881 *space_tail = NULL;
9882 break;
9883 }
9884 }
9885 if (update->space_list == NULL)
9886 break;
9887
9888 /* add space for disk in update
9889 */
9890 size = sizeof(struct dl);
9891 s = xmalloc(size);
9892 *space_tail = s;
9893 space_tail = s;
9894 *space_tail = NULL;
9895
9896 /* add spare device to update
9897 */
9898 for (id = super->devlist ; id; id = id->next)
9899 if (id->index == (unsigned)u->subdev) {
9900 struct imsm_dev *dev;
9901 struct imsm_map *map;
9902
9903 dev = get_imsm_dev(super, u->subdev);
9904 map = get_imsm_map(dev, MAP_0);
9905 current_level = map->raid_level;
9906 break;
9907 }
9908 if (u->new_level == 5 && u->new_level != current_level) {
9909 struct mdinfo *spares;
9910
9911 spares = get_spares_for_grow(st);
9912 if (spares) {
9913 struct dl *dl;
9914 struct mdinfo *dev;
9915
9916 dev = spares->devs;
9917 if (dev) {
9918 u->new_disks[0] =
9919 makedev(dev->disk.major,
9920 dev->disk.minor);
9921 dl = get_disk_super(super,
9922 dev->disk.major,
9923 dev->disk.minor);
9924 dl->index = u->old_raid_disks;
9925 dev = dev->next;
9926 }
9927 sysfs_free(spares);
9928 }
9929 }
9930 len = disks_to_mpb_size(u->new_raid_disks);
9931 dprintf("New anchor length is %llu\n", (unsigned long long)len);
9932 break;
9933 }
9934 case update_size_change: {
9935 if (update->len < (int)sizeof(struct imsm_update_size_change))
9936 return 0;
9937 break;
9938 }
9939 case update_activate_spare: {
9940 if (update->len < (int)sizeof(struct imsm_update_activate_spare))
9941 return 0;
9942 break;
9943 }
9944 case update_create_array: {
9945 struct imsm_update_create_array *u = (void *) update->buf;
9946 struct intel_dev *dv;
9947 struct imsm_dev *dev = &u->dev;
9948 struct imsm_map *map = get_imsm_map(dev, MAP_0);
9949 struct dl *dl;
9950 struct disk_info *inf;
9951 int i;
9952 int activate = 0;
9953
9954 if (update->len < (int)sizeof(*u))
9955 return 0;
9956
9957 inf = get_disk_info(u);
9958 len = sizeof_imsm_dev(dev, 1);
9959 /* allocate a new super->devlist entry */
9960 dv = xmalloc(sizeof(*dv));
9961 dv->dev = xmalloc(len);
9962 update->space = dv;
9963
9964 /* count how many spares will be converted to members */
9965 for (i = 0; i < map->num_members; i++) {
9966 dl = serial_to_dl(inf[i].serial, super);
9967 if (!dl) {
9968 /* hmm maybe it failed?, nothing we can do about
9969 * it here
9970 */
9971 continue;
9972 }
9973 if (count_memberships(dl, super) == 0)
9974 activate++;
9975 }
9976 len += activate * sizeof(struct imsm_disk);
9977 break;
9978 }
9979 case update_kill_array: {
9980 if (update->len < (int)sizeof(struct imsm_update_kill_array))
9981 return 0;
9982 break;
9983 }
9984 case update_rename_array: {
9985 if (update->len < (int)sizeof(struct imsm_update_rename_array))
9986 return 0;
9987 break;
9988 }
9989 case update_add_remove_disk:
9990 /* no update->len needed */
9991 break;
9992 case update_prealloc_badblocks_mem:
9993 super->extra_space += sizeof(struct bbm_log) -
9994 get_imsm_bbm_log_size(super->bbm_log);
9995 break;
9996 case update_rwh_policy: {
9997 if (update->len < (int)sizeof(struct imsm_update_rwh_policy))
9998 return 0;
9999 break;
10000 }
10001 default:
10002 return 0;
10003 }
10004
10005 /* check if we need a larger metadata buffer */
10006 if (super->next_buf)
10007 buf_len = super->next_len;
10008 else
10009 buf_len = super->len;
10010
10011 if (__le32_to_cpu(mpb->mpb_size) + super->extra_space + len > buf_len) {
10012 /* ok we need a larger buf than what is currently allocated
10013 * if this allocation fails process_update will notice that
10014 * ->next_len is set and ->next_buf is NULL
10015 */
10016 buf_len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) +
10017 super->extra_space + len, sector_size);
10018 if (super->next_buf)
10019 free(super->next_buf);
10020
10021 super->next_len = buf_len;
10022 if (posix_memalign(&super->next_buf, sector_size, buf_len) == 0)
10023 memset(super->next_buf, 0, buf_len);
10024 else
10025 super->next_buf = NULL;
10026 }
10027 return 1;
10028 }
10029
10030 /* must be called while manager is quiesced */
10031 static void imsm_delete(struct intel_super *super, struct dl **dlp, unsigned index)
10032 {
10033 struct imsm_super *mpb = super->anchor;
10034 struct dl *iter;
10035 struct imsm_dev *dev;
10036 struct imsm_map *map;
10037 unsigned int i, j, num_members;
10038 __u32 ord, ord_map0;
10039 struct bbm_log *log = super->bbm_log;
10040
10041 dprintf("deleting device[%d] from imsm_super\n", index);
10042
10043 /* shift all indexes down one */
10044 for (iter = super->disks; iter; iter = iter->next)
10045 if (iter->index > (int)index)
10046 iter->index--;
10047 for (iter = super->missing; iter; iter = iter->next)
10048 if (iter->index > (int)index)
10049 iter->index--;
10050
10051 for (i = 0; i < mpb->num_raid_devs; i++) {
10052 dev = get_imsm_dev(super, i);
10053 map = get_imsm_map(dev, MAP_0);
10054 num_members = map->num_members;
10055 for (j = 0; j < num_members; j++) {
10056 /* update ord entries being careful not to propagate
10057 * ord-flags to the first map
10058 */
10059 ord = get_imsm_ord_tbl_ent(dev, j, MAP_X);
10060 ord_map0 = get_imsm_ord_tbl_ent(dev, j, MAP_0);
10061
10062 if (ord_to_idx(ord) <= index)
10063 continue;
10064
10065 map = get_imsm_map(dev, MAP_0);
10066 set_imsm_ord_tbl_ent(map, j, ord_map0 - 1);
10067 map = get_imsm_map(dev, MAP_1);
10068 if (map)
10069 set_imsm_ord_tbl_ent(map, j, ord - 1);
10070 }
10071 }
10072
10073 for (i = 0; i < log->entry_count; i++) {
10074 struct bbm_log_entry *entry = &log->marked_block_entries[i];
10075
10076 if (entry->disk_ordinal <= index)
10077 continue;
10078 entry->disk_ordinal--;
10079 }
10080
10081 mpb->num_disks--;
10082 super->updates_pending++;
10083 if (*dlp) {
10084 struct dl *dl = *dlp;
10085
10086 *dlp = (*dlp)->next;
10087 __free_imsm_disk(dl);
10088 }
10089 }
10090
10091 static void close_targets(int *targets, int new_disks)
10092 {
10093 int i;
10094
10095 if (!targets)
10096 return;
10097
10098 for (i = 0; i < new_disks; i++) {
10099 if (targets[i] >= 0) {
10100 close(targets[i]);
10101 targets[i] = -1;
10102 }
10103 }
10104 }
10105
10106 static int imsm_get_allowed_degradation(int level, int raid_disks,
10107 struct intel_super *super,
10108 struct imsm_dev *dev)
10109 {
10110 switch (level) {
10111 case 1:
10112 case 10:{
10113 int ret_val = 0;
10114 struct imsm_map *map;
10115 int i;
10116
10117 ret_val = raid_disks/2;
10118 /* check map if all disks pairs not failed
10119 * in both maps
10120 */
10121 map = get_imsm_map(dev, MAP_0);
10122 for (i = 0; i < ret_val; i++) {
10123 int degradation = 0;
10124 if (get_imsm_disk(super, i) == NULL)
10125 degradation++;
10126 if (get_imsm_disk(super, i + 1) == NULL)
10127 degradation++;
10128 if (degradation == 2)
10129 return 0;
10130 }
10131 map = get_imsm_map(dev, MAP_1);
10132 /* if there is no second map
10133 * result can be returned
10134 */
10135 if (map == NULL)
10136 return ret_val;
10137 /* check degradation in second map
10138 */
10139 for (i = 0; i < ret_val; i++) {
10140 int degradation = 0;
10141 if (get_imsm_disk(super, i) == NULL)
10142 degradation++;
10143 if (get_imsm_disk(super, i + 1) == NULL)
10144 degradation++;
10145 if (degradation == 2)
10146 return 0;
10147 }
10148 return ret_val;
10149 }
10150 case 5:
10151 return 1;
10152 case 6:
10153 return 2;
10154 default:
10155 return 0;
10156 }
10157 }
10158
10159 /*******************************************************************************
10160 * Function: open_backup_targets
10161 * Description: Function opens file descriptors for all devices given in
10162 * info->devs
10163 * Parameters:
10164 * info : general array info
10165 * raid_disks : number of disks
10166 * raid_fds : table of device's file descriptors
10167 * super : intel super for raid10 degradation check
10168 * dev : intel device for raid10 degradation check
10169 * Returns:
10170 * 0 : success
10171 * -1 : fail
10172 ******************************************************************************/
10173 int open_backup_targets(struct mdinfo *info, int raid_disks, int *raid_fds,
10174 struct intel_super *super, struct imsm_dev *dev)
10175 {
10176 struct mdinfo *sd;
10177 int i;
10178 int opened = 0;
10179
10180 for (i = 0; i < raid_disks; i++)
10181 raid_fds[i] = -1;
10182
10183 for (sd = info->devs ; sd ; sd = sd->next) {
10184 char *dn;
10185
10186 if (sd->disk.state & (1<<MD_DISK_FAULTY)) {
10187 dprintf("disk is faulty!!\n");
10188 continue;
10189 }
10190
10191 if (sd->disk.raid_disk >= raid_disks || sd->disk.raid_disk < 0)
10192 continue;
10193
10194 dn = map_dev(sd->disk.major,
10195 sd->disk.minor, 1);
10196 raid_fds[sd->disk.raid_disk] = dev_open(dn, O_RDWR);
10197 if (raid_fds[sd->disk.raid_disk] < 0) {
10198 pr_err("cannot open component\n");
10199 continue;
10200 }
10201 opened++;
10202 }
10203 /* check if maximum array degradation level is not exceeded
10204 */
10205 if ((raid_disks - opened) >
10206 imsm_get_allowed_degradation(info->new_level, raid_disks,
10207 super, dev)) {
10208 pr_err("Not enough disks can be opened.\n");
10209 close_targets(raid_fds, raid_disks);
10210 return -2;
10211 }
10212 return 0;
10213 }
10214
10215 /*******************************************************************************
10216 * Function: validate_container_imsm
10217 * Description: This routine validates container after assemble,
10218 * eg. if devices in container are under the same controller.
10219 *
10220 * Parameters:
10221 * info : linked list with info about devices used in array
10222 * Returns:
10223 * 1 : HBA mismatch
10224 * 0 : Success
10225 ******************************************************************************/
10226 int validate_container_imsm(struct mdinfo *info)
10227 {
10228 if (check_env("IMSM_NO_PLATFORM"))
10229 return 0;
10230
10231 struct sys_dev *idev;
10232 struct sys_dev *hba = NULL;
10233 struct sys_dev *intel_devices = find_intel_devices();
10234 char *dev_path = devt_to_devpath(makedev(info->disk.major,
10235 info->disk.minor));
10236
10237 for (idev = intel_devices; idev; idev = idev->next) {
10238 if (dev_path && strstr(dev_path, idev->path)) {
10239 hba = idev;
10240 break;
10241 }
10242 }
10243 if (dev_path)
10244 free(dev_path);
10245
10246 if (!hba) {
10247 pr_err("WARNING - Cannot detect HBA for device %s!\n",
10248 devid2kname(makedev(info->disk.major, info->disk.minor)));
10249 return 1;
10250 }
10251
10252 const struct imsm_orom *orom = get_orom_by_device_id(hba->dev_id);
10253 struct mdinfo *dev;
10254
10255 for (dev = info->next; dev; dev = dev->next) {
10256 dev_path = devt_to_devpath(makedev(dev->disk.major, dev->disk.minor));
10257
10258 struct sys_dev *hba2 = NULL;
10259 for (idev = intel_devices; idev; idev = idev->next) {
10260 if (dev_path && strstr(dev_path, idev->path)) {
10261 hba2 = idev;
10262 break;
10263 }
10264 }
10265 if (dev_path)
10266 free(dev_path);
10267
10268 const struct imsm_orom *orom2 = hba2 == NULL ? NULL :
10269 get_orom_by_device_id(hba2->dev_id);
10270
10271 if (hba2 && hba->type != hba2->type) {
10272 pr_err("WARNING - HBAs of devices do not match %s != %s\n",
10273 get_sys_dev_type(hba->type), get_sys_dev_type(hba2->type));
10274 return 1;
10275 }
10276
10277 if (orom != orom2) {
10278 pr_err("WARNING - IMSM container assembled with disks under different HBAs!\n"
10279 " This operation is not supported and can lead to data loss.\n");
10280 return 1;
10281 }
10282
10283 if (!orom) {
10284 pr_err("WARNING - IMSM container assembled with disks under HBAs without IMSM platform support!\n"
10285 " This operation is not supported and can lead to data loss.\n");
10286 return 1;
10287 }
10288 }
10289
10290 return 0;
10291 }
10292
10293 /*******************************************************************************
10294 * Function: imsm_record_badblock
10295 * Description: This routine stores new bad block record in BBM log
10296 *
10297 * Parameters:
10298 * a : array containing a bad block
10299 * slot : disk number containing a bad block
10300 * sector : bad block sector
10301 * length : bad block sectors range
10302 * Returns:
10303 * 1 : Success
10304 * 0 : Error
10305 ******************************************************************************/
10306 static int imsm_record_badblock(struct active_array *a, int slot,
10307 unsigned long long sector, int length)
10308 {
10309 struct intel_super *super = a->container->sb;
10310 int ord;
10311 int ret;
10312
10313 ord = imsm_disk_slot_to_ord(a, slot);
10314 if (ord < 0)
10315 return 0;
10316
10317 ret = record_new_badblock(super->bbm_log, ord_to_idx(ord), sector,
10318 length);
10319 if (ret)
10320 super->updates_pending++;
10321
10322 return ret;
10323 }
10324 /*******************************************************************************
10325 * Function: imsm_clear_badblock
10326 * Description: This routine clears bad block record from BBM log
10327 *
10328 * Parameters:
10329 * a : array containing a bad block
10330 * slot : disk number containing a bad block
10331 * sector : bad block sector
10332 * length : bad block sectors range
10333 * Returns:
10334 * 1 : Success
10335 * 0 : Error
10336 ******************************************************************************/
10337 static int imsm_clear_badblock(struct active_array *a, int slot,
10338 unsigned long long sector, int length)
10339 {
10340 struct intel_super *super = a->container->sb;
10341 int ord;
10342 int ret;
10343
10344 ord = imsm_disk_slot_to_ord(a, slot);
10345 if (ord < 0)
10346 return 0;
10347
10348 ret = clear_badblock(super->bbm_log, ord_to_idx(ord), sector, length);
10349 if (ret)
10350 super->updates_pending++;
10351
10352 return ret;
10353 }
10354 /*******************************************************************************
10355 * Function: imsm_get_badblocks
10356 * Description: This routine get list of bad blocks for an array
10357 *
10358 * Parameters:
10359 * a : array
10360 * slot : disk number
10361 * Returns:
10362 * bb : structure containing bad blocks
10363 * NULL : error
10364 ******************************************************************************/
10365 static struct md_bb *imsm_get_badblocks(struct active_array *a, int slot)
10366 {
10367 int inst = a->info.container_member;
10368 struct intel_super *super = a->container->sb;
10369 struct imsm_dev *dev = get_imsm_dev(super, inst);
10370 struct imsm_map *map = get_imsm_map(dev, MAP_0);
10371 int ord;
10372
10373 ord = imsm_disk_slot_to_ord(a, slot);
10374 if (ord < 0)
10375 return NULL;
10376
10377 get_volume_badblocks(super->bbm_log, ord_to_idx(ord), pba_of_lba0(map),
10378 blocks_per_member(map), &super->bb);
10379
10380 return &super->bb;
10381 }
10382 /*******************************************************************************
10383 * Function: examine_badblocks_imsm
10384 * Description: Prints list of bad blocks on a disk to the standard output
10385 *
10386 * Parameters:
10387 * st : metadata handler
10388 * fd : open file descriptor for device
10389 * devname : device name
10390 * Returns:
10391 * 0 : Success
10392 * 1 : Error
10393 ******************************************************************************/
10394 static int examine_badblocks_imsm(struct supertype *st, int fd, char *devname)
10395 {
10396 struct intel_super *super = st->sb;
10397 struct bbm_log *log = super->bbm_log;
10398 struct dl *d = NULL;
10399 int any = 0;
10400
10401 for (d = super->disks; d ; d = d->next) {
10402 if (strcmp(d->devname, devname) == 0)
10403 break;
10404 }
10405
10406 if ((d == NULL) || (d->index < 0)) { /* serial mismatch probably */
10407 pr_err("%s doesn't appear to be part of a raid array\n",
10408 devname);
10409 return 1;
10410 }
10411
10412 if (log != NULL) {
10413 unsigned int i;
10414 struct bbm_log_entry *entry = &log->marked_block_entries[0];
10415
10416 for (i = 0; i < log->entry_count; i++) {
10417 if (entry[i].disk_ordinal == d->index) {
10418 unsigned long long sector = __le48_to_cpu(
10419 &entry[i].defective_block_start);
10420 int cnt = entry[i].marked_count + 1;
10421
10422 if (!any) {
10423 printf("Bad-blocks on %s:\n", devname);
10424 any = 1;
10425 }
10426
10427 printf("%20llu for %d sectors\n", sector, cnt);
10428 }
10429 }
10430 }
10431
10432 if (!any)
10433 printf("No bad-blocks list configured on %s\n", devname);
10434
10435 return 0;
10436 }
10437 /*******************************************************************************
10438 * Function: init_migr_record_imsm
10439 * Description: Function inits imsm migration record
10440 * Parameters:
10441 * super : imsm internal array info
10442 * dev : device under migration
10443 * info : general array info to find the smallest device
10444 * Returns:
10445 * none
10446 ******************************************************************************/
10447 void init_migr_record_imsm(struct supertype *st, struct imsm_dev *dev,
10448 struct mdinfo *info)
10449 {
10450 struct intel_super *super = st->sb;
10451 struct migr_record *migr_rec = super->migr_rec;
10452 int new_data_disks;
10453 unsigned long long dsize, dev_sectors;
10454 long long unsigned min_dev_sectors = -1LLU;
10455 struct mdinfo *sd;
10456 char nm[30];
10457 int fd;
10458 struct imsm_map *map_dest = get_imsm_map(dev, MAP_0);
10459 struct imsm_map *map_src = get_imsm_map(dev, MAP_1);
10460 unsigned long long num_migr_units;
10461 unsigned long long array_blocks;
10462
10463 memset(migr_rec, 0, sizeof(struct migr_record));
10464 migr_rec->family_num = __cpu_to_le32(super->anchor->family_num);
10465
10466 /* only ascending reshape supported now */
10467 migr_rec->ascending_migr = __cpu_to_le32(1);
10468
10469 migr_rec->dest_depth_per_unit = GEN_MIGR_AREA_SIZE /
10470 max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
10471 migr_rec->dest_depth_per_unit *=
10472 max(map_dest->blocks_per_strip, map_src->blocks_per_strip);
10473 new_data_disks = imsm_num_data_members(dev, MAP_0);
10474 migr_rec->blocks_per_unit =
10475 __cpu_to_le32(migr_rec->dest_depth_per_unit * new_data_disks);
10476 migr_rec->dest_depth_per_unit =
10477 __cpu_to_le32(migr_rec->dest_depth_per_unit);
10478 array_blocks = info->component_size * new_data_disks;
10479 num_migr_units =
10480 array_blocks / __le32_to_cpu(migr_rec->blocks_per_unit);
10481
10482 if (array_blocks % __le32_to_cpu(migr_rec->blocks_per_unit))
10483 num_migr_units++;
10484 migr_rec->num_migr_units = __cpu_to_le32(num_migr_units);
10485
10486 migr_rec->post_migr_vol_cap = dev->size_low;
10487 migr_rec->post_migr_vol_cap_hi = dev->size_high;
10488
10489 /* Find the smallest dev */
10490 for (sd = info->devs ; sd ; sd = sd->next) {
10491 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
10492 fd = dev_open(nm, O_RDONLY);
10493 if (fd < 0)
10494 continue;
10495 get_dev_size(fd, NULL, &dsize);
10496 dev_sectors = dsize / 512;
10497 if (dev_sectors < min_dev_sectors)
10498 min_dev_sectors = dev_sectors;
10499 close(fd);
10500 }
10501 migr_rec->ckpt_area_pba = __cpu_to_le32(min_dev_sectors -
10502 RAID_DISK_RESERVED_BLOCKS_IMSM_HI);
10503
10504 write_imsm_migr_rec(st);
10505
10506 return;
10507 }
10508
10509 /*******************************************************************************
10510 * Function: save_backup_imsm
10511 * Description: Function saves critical data stripes to Migration Copy Area
10512 * and updates the current migration unit status.
10513 * Use restore_stripes() to form a destination stripe,
10514 * and to write it to the Copy Area.
10515 * Parameters:
10516 * st : supertype information
10517 * dev : imsm device that backup is saved for
10518 * info : general array info
10519 * buf : input buffer
10520 * length : length of data to backup (blocks_per_unit)
10521 * Returns:
10522 * 0 : success
10523 *, -1 : fail
10524 ******************************************************************************/
10525 int save_backup_imsm(struct supertype *st,
10526 struct imsm_dev *dev,
10527 struct mdinfo *info,
10528 void *buf,
10529 int length)
10530 {
10531 int rv = -1;
10532 struct intel_super *super = st->sb;
10533 unsigned long long *target_offsets;
10534 int *targets;
10535 int i;
10536 struct imsm_map *map_dest = get_imsm_map(dev, MAP_0);
10537 int new_disks = map_dest->num_members;
10538 int dest_layout = 0;
10539 int dest_chunk;
10540 unsigned long long start;
10541 int data_disks = imsm_num_data_members(dev, MAP_0);
10542
10543 targets = xmalloc(new_disks * sizeof(int));
10544
10545 for (i = 0; i < new_disks; i++)
10546 targets[i] = -1;
10547
10548 target_offsets = xcalloc(new_disks, sizeof(unsigned long long));
10549
10550 start = info->reshape_progress * 512;
10551 for (i = 0; i < new_disks; i++) {
10552 target_offsets[i] = (unsigned long long)
10553 __le32_to_cpu(super->migr_rec->ckpt_area_pba) * 512;
10554 /* move back copy area adderss, it will be moved forward
10555 * in restore_stripes() using start input variable
10556 */
10557 target_offsets[i] -= start/data_disks;
10558 }
10559
10560 if (open_backup_targets(info, new_disks, targets,
10561 super, dev))
10562 goto abort;
10563
10564 dest_layout = imsm_level_to_layout(map_dest->raid_level);
10565 dest_chunk = __le16_to_cpu(map_dest->blocks_per_strip) * 512;
10566
10567 if (restore_stripes(targets, /* list of dest devices */
10568 target_offsets, /* migration record offsets */
10569 new_disks,
10570 dest_chunk,
10571 map_dest->raid_level,
10572 dest_layout,
10573 -1, /* source backup file descriptor */
10574 0, /* input buf offset
10575 * always 0 buf is already offseted */
10576 start,
10577 length,
10578 buf) != 0) {
10579 pr_err("Error restoring stripes\n");
10580 goto abort;
10581 }
10582
10583 rv = 0;
10584
10585 abort:
10586 if (targets) {
10587 close_targets(targets, new_disks);
10588 free(targets);
10589 }
10590 free(target_offsets);
10591
10592 return rv;
10593 }
10594
10595 /*******************************************************************************
10596 * Function: save_checkpoint_imsm
10597 * Description: Function called for current unit status update
10598 * in the migration record. It writes it to disk.
10599 * Parameters:
10600 * super : imsm internal array info
10601 * info : general array info
10602 * Returns:
10603 * 0: success
10604 * 1: failure
10605 * 2: failure, means no valid migration record
10606 * / no general migration in progress /
10607 ******************************************************************************/
10608 int save_checkpoint_imsm(struct supertype *st, struct mdinfo *info, int state)
10609 {
10610 struct intel_super *super = st->sb;
10611 unsigned long long blocks_per_unit;
10612 unsigned long long curr_migr_unit;
10613
10614 if (load_imsm_migr_rec(super, info) != 0) {
10615 dprintf("imsm: ERROR: Cannot read migration record for checkpoint save.\n");
10616 return 1;
10617 }
10618
10619 blocks_per_unit = __le32_to_cpu(super->migr_rec->blocks_per_unit);
10620 if (blocks_per_unit == 0) {
10621 dprintf("imsm: no migration in progress.\n");
10622 return 2;
10623 }
10624 curr_migr_unit = info->reshape_progress / blocks_per_unit;
10625 /* check if array is alligned to copy area
10626 * if it is not alligned, add one to current migration unit value
10627 * this can happend on array reshape finish only
10628 */
10629 if (info->reshape_progress % blocks_per_unit)
10630 curr_migr_unit++;
10631
10632 super->migr_rec->curr_migr_unit =
10633 __cpu_to_le32(curr_migr_unit);
10634 super->migr_rec->rec_status = __cpu_to_le32(state);
10635 super->migr_rec->dest_1st_member_lba =
10636 __cpu_to_le32(curr_migr_unit *
10637 __le32_to_cpu(super->migr_rec->dest_depth_per_unit));
10638 if (write_imsm_migr_rec(st) < 0) {
10639 dprintf("imsm: Cannot write migration record outside backup area\n");
10640 return 1;
10641 }
10642
10643 return 0;
10644 }
10645
10646 /*******************************************************************************
10647 * Function: recover_backup_imsm
10648 * Description: Function recovers critical data from the Migration Copy Area
10649 * while assembling an array.
10650 * Parameters:
10651 * super : imsm internal array info
10652 * info : general array info
10653 * Returns:
10654 * 0 : success (or there is no data to recover)
10655 * 1 : fail
10656 ******************************************************************************/
10657 int recover_backup_imsm(struct supertype *st, struct mdinfo *info)
10658 {
10659 struct intel_super *super = st->sb;
10660 struct migr_record *migr_rec = super->migr_rec;
10661 struct imsm_map *map_dest;
10662 struct intel_dev *id = NULL;
10663 unsigned long long read_offset;
10664 unsigned long long write_offset;
10665 unsigned unit_len;
10666 int *targets = NULL;
10667 int new_disks, i, err;
10668 char *buf = NULL;
10669 int retval = 1;
10670 unsigned int sector_size = super->sector_size;
10671 unsigned long curr_migr_unit = __le32_to_cpu(migr_rec->curr_migr_unit);
10672 unsigned long num_migr_units = __le32_to_cpu(migr_rec->num_migr_units);
10673 char buffer[20];
10674 int skipped_disks = 0;
10675
10676 err = sysfs_get_str(info, NULL, "array_state", (char *)buffer, 20);
10677 if (err < 1)
10678 return 1;
10679
10680 /* recover data only during assemblation */
10681 if (strncmp(buffer, "inactive", 8) != 0)
10682 return 0;
10683 /* no data to recover */
10684 if (__le32_to_cpu(migr_rec->rec_status) == UNIT_SRC_NORMAL)
10685 return 0;
10686 if (curr_migr_unit >= num_migr_units)
10687 return 1;
10688
10689 /* find device during reshape */
10690 for (id = super->devlist; id; id = id->next)
10691 if (is_gen_migration(id->dev))
10692 break;
10693 if (id == NULL)
10694 return 1;
10695
10696 map_dest = get_imsm_map(id->dev, MAP_0);
10697 new_disks = map_dest->num_members;
10698
10699 read_offset = (unsigned long long)
10700 __le32_to_cpu(migr_rec->ckpt_area_pba) * 512;
10701
10702 write_offset = ((unsigned long long)
10703 __le32_to_cpu(migr_rec->dest_1st_member_lba) +
10704 pba_of_lba0(map_dest)) * 512;
10705
10706 unit_len = __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
10707 if (posix_memalign((void **)&buf, sector_size, unit_len) != 0)
10708 goto abort;
10709 targets = xcalloc(new_disks, sizeof(int));
10710
10711 if (open_backup_targets(info, new_disks, targets, super, id->dev)) {
10712 pr_err("Cannot open some devices belonging to array.\n");
10713 goto abort;
10714 }
10715
10716 for (i = 0; i < new_disks; i++) {
10717 if (targets[i] < 0) {
10718 skipped_disks++;
10719 continue;
10720 }
10721 if (lseek64(targets[i], read_offset, SEEK_SET) < 0) {
10722 pr_err("Cannot seek to block: %s\n",
10723 strerror(errno));
10724 skipped_disks++;
10725 continue;
10726 }
10727 if ((unsigned)read(targets[i], buf, unit_len) != unit_len) {
10728 pr_err("Cannot read copy area block: %s\n",
10729 strerror(errno));
10730 skipped_disks++;
10731 continue;
10732 }
10733 if (lseek64(targets[i], write_offset, SEEK_SET) < 0) {
10734 pr_err("Cannot seek to block: %s\n",
10735 strerror(errno));
10736 skipped_disks++;
10737 continue;
10738 }
10739 if ((unsigned)write(targets[i], buf, unit_len) != unit_len) {
10740 pr_err("Cannot restore block: %s\n",
10741 strerror(errno));
10742 skipped_disks++;
10743 continue;
10744 }
10745 }
10746
10747 if (skipped_disks > imsm_get_allowed_degradation(info->new_level,
10748 new_disks,
10749 super,
10750 id->dev)) {
10751 pr_err("Cannot restore data from backup. Too many failed disks\n");
10752 goto abort;
10753 }
10754
10755 if (save_checkpoint_imsm(st, info, UNIT_SRC_NORMAL)) {
10756 /* ignore error == 2, this can mean end of reshape here
10757 */
10758 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL) during restart\n");
10759 } else
10760 retval = 0;
10761
10762 abort:
10763 if (targets) {
10764 for (i = 0; i < new_disks; i++)
10765 if (targets[i])
10766 close(targets[i]);
10767 free(targets);
10768 }
10769 free(buf);
10770 return retval;
10771 }
10772
10773 static char disk_by_path[] = "/dev/disk/by-path/";
10774
10775 static const char *imsm_get_disk_controller_domain(const char *path)
10776 {
10777 char disk_path[PATH_MAX];
10778 char *drv=NULL;
10779 struct stat st;
10780
10781 strcpy(disk_path, disk_by_path);
10782 strncat(disk_path, path, PATH_MAX - strlen(disk_path) - 1);
10783 if (stat(disk_path, &st) == 0) {
10784 struct sys_dev* hba;
10785 char *path;
10786
10787 path = devt_to_devpath(st.st_rdev);
10788 if (path == NULL)
10789 return "unknown";
10790 hba = find_disk_attached_hba(-1, path);
10791 if (hba && hba->type == SYS_DEV_SAS)
10792 drv = "isci";
10793 else if (hba && hba->type == SYS_DEV_SATA)
10794 drv = "ahci";
10795 else if (hba && hba->type == SYS_DEV_VMD)
10796 drv = "vmd";
10797 else if (hba && hba->type == SYS_DEV_NVME)
10798 drv = "nvme";
10799 else
10800 drv = "unknown";
10801 dprintf("path: %s hba: %s attached: %s\n",
10802 path, (hba) ? hba->path : "NULL", drv);
10803 free(path);
10804 }
10805 return drv;
10806 }
10807
10808 static char *imsm_find_array_devnm_by_subdev(int subdev, char *container)
10809 {
10810 static char devnm[32];
10811 char subdev_name[20];
10812 struct mdstat_ent *mdstat;
10813
10814 sprintf(subdev_name, "%d", subdev);
10815 mdstat = mdstat_by_subdev(subdev_name, container);
10816 if (!mdstat)
10817 return NULL;
10818
10819 strcpy(devnm, mdstat->devnm);
10820 free_mdstat(mdstat);
10821 return devnm;
10822 }
10823
10824 static int imsm_reshape_is_allowed_on_container(struct supertype *st,
10825 struct geo_params *geo,
10826 int *old_raid_disks,
10827 int direction)
10828 {
10829 /* currently we only support increasing the number of devices
10830 * for a container. This increases the number of device for each
10831 * member array. They must all be RAID0 or RAID5.
10832 */
10833 int ret_val = 0;
10834 struct mdinfo *info, *member;
10835 int devices_that_can_grow = 0;
10836
10837 dprintf("imsm: imsm_reshape_is_allowed_on_container(ENTER): st->devnm = (%s)\n", st->devnm);
10838
10839 if (geo->size > 0 ||
10840 geo->level != UnSet ||
10841 geo->layout != UnSet ||
10842 geo->chunksize != 0 ||
10843 geo->raid_disks == UnSet) {
10844 dprintf("imsm: Container operation is allowed for raid disks number change only.\n");
10845 return ret_val;
10846 }
10847
10848 if (direction == ROLLBACK_METADATA_CHANGES) {
10849 dprintf("imsm: Metadata changes rollback is not supported for container operation.\n");
10850 return ret_val;
10851 }
10852
10853 info = container_content_imsm(st, NULL);
10854 for (member = info; member; member = member->next) {
10855 char *result;
10856
10857 dprintf("imsm: checking device_num: %i\n",
10858 member->container_member);
10859
10860 if (geo->raid_disks <= member->array.raid_disks) {
10861 /* we work on container for Online Capacity Expansion
10862 * only so raid_disks has to grow
10863 */
10864 dprintf("imsm: for container operation raid disks increase is required\n");
10865 break;
10866 }
10867
10868 if (info->array.level != 0 && info->array.level != 5) {
10869 /* we cannot use this container with other raid level
10870 */
10871 dprintf("imsm: for container operation wrong raid level (%i) detected\n",
10872 info->array.level);
10873 break;
10874 } else {
10875 /* check for platform support
10876 * for this raid level configuration
10877 */
10878 struct intel_super *super = st->sb;
10879 if (!is_raid_level_supported(super->orom,
10880 member->array.level,
10881 geo->raid_disks)) {
10882 dprintf("platform does not support raid%d with %d disk%s\n",
10883 info->array.level,
10884 geo->raid_disks,
10885 geo->raid_disks > 1 ? "s" : "");
10886 break;
10887 }
10888 /* check if component size is aligned to chunk size
10889 */
10890 if (info->component_size %
10891 (info->array.chunk_size/512)) {
10892 dprintf("Component size is not aligned to chunk size\n");
10893 break;
10894 }
10895 }
10896
10897 if (*old_raid_disks &&
10898 info->array.raid_disks != *old_raid_disks)
10899 break;
10900 *old_raid_disks = info->array.raid_disks;
10901
10902 /* All raid5 and raid0 volumes in container
10903 * have to be ready for Online Capacity Expansion
10904 * so they need to be assembled. We have already
10905 * checked that no recovery etc is happening.
10906 */
10907 result = imsm_find_array_devnm_by_subdev(member->container_member,
10908 st->container_devnm);
10909 if (result == NULL) {
10910 dprintf("imsm: cannot find array\n");
10911 break;
10912 }
10913 devices_that_can_grow++;
10914 }
10915 sysfs_free(info);
10916 if (!member && devices_that_can_grow)
10917 ret_val = 1;
10918
10919 if (ret_val)
10920 dprintf("Container operation allowed\n");
10921 else
10922 dprintf("Error: %i\n", ret_val);
10923
10924 return ret_val;
10925 }
10926
10927 /* Function: get_spares_for_grow
10928 * Description: Allocates memory and creates list of spare devices
10929 * avaliable in container. Checks if spare drive size is acceptable.
10930 * Parameters: Pointer to the supertype structure
10931 * Returns: Pointer to the list of spare devices (mdinfo structure) on success,
10932 * NULL if fail
10933 */
10934 static struct mdinfo *get_spares_for_grow(struct supertype *st)
10935 {
10936 struct spare_criteria sc;
10937
10938 get_spare_criteria_imsm(st, &sc);
10939 return container_choose_spares(st, &sc, NULL, NULL, NULL, 0);
10940 }
10941
10942 /******************************************************************************
10943 * function: imsm_create_metadata_update_for_reshape
10944 * Function creates update for whole IMSM container.
10945 *
10946 ******************************************************************************/
10947 static int imsm_create_metadata_update_for_reshape(
10948 struct supertype *st,
10949 struct geo_params *geo,
10950 int old_raid_disks,
10951 struct imsm_update_reshape **updatep)
10952 {
10953 struct intel_super *super = st->sb;
10954 struct imsm_super *mpb = super->anchor;
10955 int update_memory_size;
10956 struct imsm_update_reshape *u;
10957 struct mdinfo *spares;
10958 int i;
10959 int delta_disks;
10960 struct mdinfo *dev;
10961
10962 dprintf("(enter) raid_disks = %i\n", geo->raid_disks);
10963
10964 delta_disks = geo->raid_disks - old_raid_disks;
10965
10966 /* size of all update data without anchor */
10967 update_memory_size = sizeof(struct imsm_update_reshape);
10968
10969 /* now add space for spare disks that we need to add. */
10970 update_memory_size += sizeof(u->new_disks[0]) * (delta_disks - 1);
10971
10972 u = xcalloc(1, update_memory_size);
10973 u->type = update_reshape_container_disks;
10974 u->old_raid_disks = old_raid_disks;
10975 u->new_raid_disks = geo->raid_disks;
10976
10977 /* now get spare disks list
10978 */
10979 spares = get_spares_for_grow(st);
10980
10981 if (spares == NULL || delta_disks > spares->array.spare_disks) {
10982 pr_err("imsm: ERROR: Cannot get spare devices for %s.\n", geo->dev_name);
10983 i = -1;
10984 goto abort;
10985 }
10986
10987 /* we have got spares
10988 * update disk list in imsm_disk list table in anchor
10989 */
10990 dprintf("imsm: %i spares are available.\n\n",
10991 spares->array.spare_disks);
10992
10993 dev = spares->devs;
10994 for (i = 0; i < delta_disks; i++) {
10995 struct dl *dl;
10996
10997 if (dev == NULL)
10998 break;
10999 u->new_disks[i] = makedev(dev->disk.major,
11000 dev->disk.minor);
11001 dl = get_disk_super(super, dev->disk.major, dev->disk.minor);
11002 dl->index = mpb->num_disks;
11003 mpb->num_disks++;
11004 dev = dev->next;
11005 }
11006
11007 abort:
11008 /* free spares
11009 */
11010 sysfs_free(spares);
11011
11012 dprintf("imsm: reshape update preparation :");
11013 if (i == delta_disks) {
11014 dprintf_cont(" OK\n");
11015 *updatep = u;
11016 return update_memory_size;
11017 }
11018 free(u);
11019 dprintf_cont(" Error\n");
11020
11021 return 0;
11022 }
11023
11024 /******************************************************************************
11025 * function: imsm_create_metadata_update_for_size_change()
11026 * Creates update for IMSM array for array size change.
11027 *
11028 ******************************************************************************/
11029 static int imsm_create_metadata_update_for_size_change(
11030 struct supertype *st,
11031 struct geo_params *geo,
11032 struct imsm_update_size_change **updatep)
11033 {
11034 struct intel_super *super = st->sb;
11035 int update_memory_size;
11036 struct imsm_update_size_change *u;
11037
11038 dprintf("(enter) New size = %llu\n", geo->size);
11039
11040 /* size of all update data without anchor */
11041 update_memory_size = sizeof(struct imsm_update_size_change);
11042
11043 u = xcalloc(1, update_memory_size);
11044 u->type = update_size_change;
11045 u->subdev = super->current_vol;
11046 u->new_size = geo->size;
11047
11048 dprintf("imsm: reshape update preparation : OK\n");
11049 *updatep = u;
11050
11051 return update_memory_size;
11052 }
11053
11054 /******************************************************************************
11055 * function: imsm_create_metadata_update_for_migration()
11056 * Creates update for IMSM array.
11057 *
11058 ******************************************************************************/
11059 static int imsm_create_metadata_update_for_migration(
11060 struct supertype *st,
11061 struct geo_params *geo,
11062 struct imsm_update_reshape_migration **updatep)
11063 {
11064 struct intel_super *super = st->sb;
11065 int update_memory_size;
11066 struct imsm_update_reshape_migration *u;
11067 struct imsm_dev *dev;
11068 int previous_level = -1;
11069
11070 dprintf("(enter) New Level = %i\n", geo->level);
11071
11072 /* size of all update data without anchor */
11073 update_memory_size = sizeof(struct imsm_update_reshape_migration);
11074
11075 u = xcalloc(1, update_memory_size);
11076 u->type = update_reshape_migration;
11077 u->subdev = super->current_vol;
11078 u->new_level = geo->level;
11079 u->new_layout = geo->layout;
11080 u->new_raid_disks = u->old_raid_disks = geo->raid_disks;
11081 u->new_disks[0] = -1;
11082 u->new_chunksize = -1;
11083
11084 dev = get_imsm_dev(super, u->subdev);
11085 if (dev) {
11086 struct imsm_map *map;
11087
11088 map = get_imsm_map(dev, MAP_0);
11089 if (map) {
11090 int current_chunk_size =
11091 __le16_to_cpu(map->blocks_per_strip) / 2;
11092
11093 if (geo->chunksize != current_chunk_size) {
11094 u->new_chunksize = geo->chunksize / 1024;
11095 dprintf("imsm: chunk size change from %i to %i\n",
11096 current_chunk_size, u->new_chunksize);
11097 }
11098 previous_level = map->raid_level;
11099 }
11100 }
11101 if (geo->level == 5 && previous_level == 0) {
11102 struct mdinfo *spares = NULL;
11103
11104 u->new_raid_disks++;
11105 spares = get_spares_for_grow(st);
11106 if (spares == NULL || spares->array.spare_disks < 1) {
11107 free(u);
11108 sysfs_free(spares);
11109 update_memory_size = 0;
11110 pr_err("cannot get spare device for requested migration\n");
11111 return 0;
11112 }
11113 sysfs_free(spares);
11114 }
11115 dprintf("imsm: reshape update preparation : OK\n");
11116 *updatep = u;
11117
11118 return update_memory_size;
11119 }
11120
11121 static void imsm_update_metadata_locally(struct supertype *st,
11122 void *buf, int len)
11123 {
11124 struct metadata_update mu;
11125
11126 mu.buf = buf;
11127 mu.len = len;
11128 mu.space = NULL;
11129 mu.space_list = NULL;
11130 mu.next = NULL;
11131 if (imsm_prepare_update(st, &mu))
11132 imsm_process_update(st, &mu);
11133
11134 while (mu.space_list) {
11135 void **space = mu.space_list;
11136 mu.space_list = *space;
11137 free(space);
11138 }
11139 }
11140
11141 /***************************************************************************
11142 * Function: imsm_analyze_change
11143 * Description: Function analyze change for single volume
11144 * and validate if transition is supported
11145 * Parameters: Geometry parameters, supertype structure,
11146 * metadata change direction (apply/rollback)
11147 * Returns: Operation type code on success, -1 if fail
11148 ****************************************************************************/
11149 enum imsm_reshape_type imsm_analyze_change(struct supertype *st,
11150 struct geo_params *geo,
11151 int direction)
11152 {
11153 struct mdinfo info;
11154 int change = -1;
11155 int check_devs = 0;
11156 int chunk;
11157 /* number of added/removed disks in operation result */
11158 int devNumChange = 0;
11159 /* imsm compatible layout value for array geometry verification */
11160 int imsm_layout = -1;
11161 int data_disks;
11162 struct imsm_dev *dev;
11163 struct intel_super *super;
11164 unsigned long long current_size;
11165 unsigned long long free_size;
11166 unsigned long long max_size;
11167 int rv;
11168
11169 getinfo_super_imsm_volume(st, &info, NULL);
11170 if (geo->level != info.array.level && geo->level >= 0 &&
11171 geo->level != UnSet) {
11172 switch (info.array.level) {
11173 case 0:
11174 if (geo->level == 5) {
11175 change = CH_MIGRATION;
11176 if (geo->layout != ALGORITHM_LEFT_ASYMMETRIC) {
11177 pr_err("Error. Requested Layout not supported (left-asymmetric layout is supported only)!\n");
11178 change = -1;
11179 goto analyse_change_exit;
11180 }
11181 imsm_layout = geo->layout;
11182 check_devs = 1;
11183 devNumChange = 1; /* parity disk added */
11184 } else if (geo->level == 10) {
11185 change = CH_TAKEOVER;
11186 check_devs = 1;
11187 devNumChange = 2; /* two mirrors added */
11188 imsm_layout = 0x102; /* imsm supported layout */
11189 }
11190 break;
11191 case 1:
11192 case 10:
11193 if (geo->level == 0) {
11194 change = CH_TAKEOVER;
11195 check_devs = 1;
11196 devNumChange = -(geo->raid_disks/2);
11197 imsm_layout = 0; /* imsm raid0 layout */
11198 }
11199 break;
11200 }
11201 if (change == -1) {
11202 pr_err("Error. Level Migration from %d to %d not supported!\n",
11203 info.array.level, geo->level);
11204 goto analyse_change_exit;
11205 }
11206 } else
11207 geo->level = info.array.level;
11208
11209 if (geo->layout != info.array.layout &&
11210 (geo->layout != UnSet && geo->layout != -1)) {
11211 change = CH_MIGRATION;
11212 if (info.array.layout == 0 && info.array.level == 5 &&
11213 geo->layout == 5) {
11214 /* reshape 5 -> 4 */
11215 } else if (info.array.layout == 5 && info.array.level == 5 &&
11216 geo->layout == 0) {
11217 /* reshape 4 -> 5 */
11218 geo->layout = 0;
11219 geo->level = 5;
11220 } else {
11221 pr_err("Error. Layout Migration from %d to %d not supported!\n",
11222 info.array.layout, geo->layout);
11223 change = -1;
11224 goto analyse_change_exit;
11225 }
11226 } else {
11227 geo->layout = info.array.layout;
11228 if (imsm_layout == -1)
11229 imsm_layout = info.array.layout;
11230 }
11231
11232 if (geo->chunksize > 0 && geo->chunksize != UnSet &&
11233 geo->chunksize != info.array.chunk_size) {
11234 if (info.array.level == 10) {
11235 pr_err("Error. Chunk size change for RAID 10 is not supported.\n");
11236 change = -1;
11237 goto analyse_change_exit;
11238 } else if (info.component_size % (geo->chunksize/512)) {
11239 pr_err("New chunk size (%dK) does not evenly divide device size (%lluk). Aborting...\n",
11240 geo->chunksize/1024, info.component_size/2);
11241 change = -1;
11242 goto analyse_change_exit;
11243 }
11244 change = CH_MIGRATION;
11245 } else {
11246 geo->chunksize = info.array.chunk_size;
11247 }
11248
11249 chunk = geo->chunksize / 1024;
11250
11251 super = st->sb;
11252 dev = get_imsm_dev(super, super->current_vol);
11253 data_disks = imsm_num_data_members(dev , MAP_0);
11254 /* compute current size per disk member
11255 */
11256 current_size = info.custom_array_size / data_disks;
11257
11258 if (geo->size > 0 && geo->size != MAX_SIZE) {
11259 /* align component size
11260 */
11261 geo->size = imsm_component_size_aligment_check(
11262 get_imsm_raid_level(dev->vol.map),
11263 chunk * 1024, super->sector_size,
11264 geo->size * 2);
11265 if (geo->size == 0) {
11266 pr_err("Error. Size expansion is supported only (current size is %llu, requested size /rounded/ is 0).\n",
11267 current_size);
11268 goto analyse_change_exit;
11269 }
11270 }
11271
11272 if (current_size != geo->size && geo->size > 0) {
11273 if (change != -1) {
11274 pr_err("Error. Size change should be the only one at a time.\n");
11275 change = -1;
11276 goto analyse_change_exit;
11277 }
11278 if ((super->current_vol + 1) != super->anchor->num_raid_devs) {
11279 pr_err("Error. The last volume in container can be expanded only (%i/%s).\n",
11280 super->current_vol, st->devnm);
11281 goto analyse_change_exit;
11282 }
11283 /* check the maximum available size
11284 */
11285 rv = imsm_get_free_size(st, dev->vol.map->num_members,
11286 0, chunk, &free_size);
11287 if (rv == 0)
11288 /* Cannot find maximum available space
11289 */
11290 max_size = 0;
11291 else {
11292 max_size = free_size + current_size;
11293 /* align component size
11294 */
11295 max_size = imsm_component_size_aligment_check(
11296 get_imsm_raid_level(dev->vol.map),
11297 chunk * 1024, super->sector_size,
11298 max_size);
11299 }
11300 if (geo->size == MAX_SIZE) {
11301 /* requested size change to the maximum available size
11302 */
11303 if (max_size == 0) {
11304 pr_err("Error. Cannot find maximum available space.\n");
11305 change = -1;
11306 goto analyse_change_exit;
11307 } else
11308 geo->size = max_size;
11309 }
11310
11311 if (direction == ROLLBACK_METADATA_CHANGES) {
11312 /* accept size for rollback only
11313 */
11314 } else {
11315 /* round size due to metadata compatibility
11316 */
11317 geo->size = (geo->size >> SECT_PER_MB_SHIFT)
11318 << SECT_PER_MB_SHIFT;
11319 dprintf("Prepare update for size change to %llu\n",
11320 geo->size );
11321 if (current_size >= geo->size) {
11322 pr_err("Error. Size expansion is supported only (current size is %llu, requested size /rounded/ is %llu).\n",
11323 current_size, geo->size);
11324 goto analyse_change_exit;
11325 }
11326 if (max_size && geo->size > max_size) {
11327 pr_err("Error. Requested size is larger than maximum available size (maximum available size is %llu, requested size /rounded/ is %llu).\n",
11328 max_size, geo->size);
11329 goto analyse_change_exit;
11330 }
11331 }
11332 geo->size *= data_disks;
11333 geo->raid_disks = dev->vol.map->num_members;
11334 change = CH_ARRAY_SIZE;
11335 }
11336 if (!validate_geometry_imsm(st,
11337 geo->level,
11338 imsm_layout,
11339 geo->raid_disks + devNumChange,
11340 &chunk,
11341 geo->size, INVALID_SECTORS,
11342 0, 0, info.consistency_policy, 1))
11343 change = -1;
11344
11345 if (check_devs) {
11346 struct intel_super *super = st->sb;
11347 struct imsm_super *mpb = super->anchor;
11348
11349 if (mpb->num_raid_devs > 1) {
11350 pr_err("Error. Cannot perform operation on %s- for this operation it MUST be single array in container\n",
11351 geo->dev_name);
11352 change = -1;
11353 }
11354 }
11355
11356 analyse_change_exit:
11357 if (direction == ROLLBACK_METADATA_CHANGES &&
11358 (change == CH_MIGRATION || change == CH_TAKEOVER)) {
11359 dprintf("imsm: Metadata changes rollback is not supported for migration and takeover operations.\n");
11360 change = -1;
11361 }
11362 return change;
11363 }
11364
11365 int imsm_takeover(struct supertype *st, struct geo_params *geo)
11366 {
11367 struct intel_super *super = st->sb;
11368 struct imsm_update_takeover *u;
11369
11370 u = xmalloc(sizeof(struct imsm_update_takeover));
11371
11372 u->type = update_takeover;
11373 u->subarray = super->current_vol;
11374
11375 /* 10->0 transition */
11376 if (geo->level == 0)
11377 u->direction = R10_TO_R0;
11378
11379 /* 0->10 transition */
11380 if (geo->level == 10)
11381 u->direction = R0_TO_R10;
11382
11383 /* update metadata locally */
11384 imsm_update_metadata_locally(st, u,
11385 sizeof(struct imsm_update_takeover));
11386 /* and possibly remotely */
11387 if (st->update_tail)
11388 append_metadata_update(st, u,
11389 sizeof(struct imsm_update_takeover));
11390 else
11391 free(u);
11392
11393 return 0;
11394 }
11395
11396 static int imsm_reshape_super(struct supertype *st, unsigned long long size,
11397 int level,
11398 int layout, int chunksize, int raid_disks,
11399 int delta_disks, char *backup, char *dev,
11400 int direction, int verbose)
11401 {
11402 int ret_val = 1;
11403 struct geo_params geo;
11404
11405 dprintf("(enter)\n");
11406
11407 memset(&geo, 0, sizeof(struct geo_params));
11408
11409 geo.dev_name = dev;
11410 strcpy(geo.devnm, st->devnm);
11411 geo.size = size;
11412 geo.level = level;
11413 geo.layout = layout;
11414 geo.chunksize = chunksize;
11415 geo.raid_disks = raid_disks;
11416 if (delta_disks != UnSet)
11417 geo.raid_disks += delta_disks;
11418
11419 dprintf("for level : %i\n", geo.level);
11420 dprintf("for raid_disks : %i\n", geo.raid_disks);
11421
11422 if (experimental() == 0)
11423 return ret_val;
11424
11425 if (strcmp(st->container_devnm, st->devnm) == 0) {
11426 /* On container level we can only increase number of devices. */
11427 dprintf("imsm: info: Container operation\n");
11428 int old_raid_disks = 0;
11429
11430 if (imsm_reshape_is_allowed_on_container(
11431 st, &geo, &old_raid_disks, direction)) {
11432 struct imsm_update_reshape *u = NULL;
11433 int len;
11434
11435 len = imsm_create_metadata_update_for_reshape(
11436 st, &geo, old_raid_disks, &u);
11437
11438 if (len <= 0) {
11439 dprintf("imsm: Cannot prepare update\n");
11440 goto exit_imsm_reshape_super;
11441 }
11442
11443 ret_val = 0;
11444 /* update metadata locally */
11445 imsm_update_metadata_locally(st, u, len);
11446 /* and possibly remotely */
11447 if (st->update_tail)
11448 append_metadata_update(st, u, len);
11449 else
11450 free(u);
11451
11452 } else {
11453 pr_err("(imsm) Operation is not allowed on this container\n");
11454 }
11455 } else {
11456 /* On volume level we support following operations
11457 * - takeover: raid10 -> raid0; raid0 -> raid10
11458 * - chunk size migration
11459 * - migration: raid5 -> raid0; raid0 -> raid5
11460 */
11461 struct intel_super *super = st->sb;
11462 struct intel_dev *dev = super->devlist;
11463 int change;
11464 dprintf("imsm: info: Volume operation\n");
11465 /* find requested device */
11466 while (dev) {
11467 char *devnm =
11468 imsm_find_array_devnm_by_subdev(
11469 dev->index, st->container_devnm);
11470 if (devnm && strcmp(devnm, geo.devnm) == 0)
11471 break;
11472 dev = dev->next;
11473 }
11474 if (dev == NULL) {
11475 pr_err("Cannot find %s (%s) subarray\n",
11476 geo.dev_name, geo.devnm);
11477 goto exit_imsm_reshape_super;
11478 }
11479 super->current_vol = dev->index;
11480 change = imsm_analyze_change(st, &geo, direction);
11481 switch (change) {
11482 case CH_TAKEOVER:
11483 ret_val = imsm_takeover(st, &geo);
11484 break;
11485 case CH_MIGRATION: {
11486 struct imsm_update_reshape_migration *u = NULL;
11487 int len =
11488 imsm_create_metadata_update_for_migration(
11489 st, &geo, &u);
11490 if (len < 1) {
11491 dprintf("imsm: Cannot prepare update\n");
11492 break;
11493 }
11494 ret_val = 0;
11495 /* update metadata locally */
11496 imsm_update_metadata_locally(st, u, len);
11497 /* and possibly remotely */
11498 if (st->update_tail)
11499 append_metadata_update(st, u, len);
11500 else
11501 free(u);
11502 }
11503 break;
11504 case CH_ARRAY_SIZE: {
11505 struct imsm_update_size_change *u = NULL;
11506 int len =
11507 imsm_create_metadata_update_for_size_change(
11508 st, &geo, &u);
11509 if (len < 1) {
11510 dprintf("imsm: Cannot prepare update\n");
11511 break;
11512 }
11513 ret_val = 0;
11514 /* update metadata locally */
11515 imsm_update_metadata_locally(st, u, len);
11516 /* and possibly remotely */
11517 if (st->update_tail)
11518 append_metadata_update(st, u, len);
11519 else
11520 free(u);
11521 }
11522 break;
11523 default:
11524 ret_val = 1;
11525 }
11526 }
11527
11528 exit_imsm_reshape_super:
11529 dprintf("imsm: reshape_super Exit code = %i\n", ret_val);
11530 return ret_val;
11531 }
11532
11533 #define COMPLETED_OK 0
11534 #define COMPLETED_NONE 1
11535 #define COMPLETED_DELAYED 2
11536
11537 static int read_completed(int fd, unsigned long long *val)
11538 {
11539 int ret;
11540 char buf[50];
11541
11542 ret = sysfs_fd_get_str(fd, buf, 50);
11543 if (ret < 0)
11544 return ret;
11545
11546 ret = COMPLETED_OK;
11547 if (strncmp(buf, "none", 4) == 0) {
11548 ret = COMPLETED_NONE;
11549 } else if (strncmp(buf, "delayed", 7) == 0) {
11550 ret = COMPLETED_DELAYED;
11551 } else {
11552 char *ep;
11553 *val = strtoull(buf, &ep, 0);
11554 if (ep == buf || (*ep != 0 && *ep != '\n' && *ep != ' '))
11555 ret = -1;
11556 }
11557 return ret;
11558 }
11559
11560 /*******************************************************************************
11561 * Function: wait_for_reshape_imsm
11562 * Description: Function writes new sync_max value and waits until
11563 * reshape process reach new position
11564 * Parameters:
11565 * sra : general array info
11566 * ndata : number of disks in new array's layout
11567 * Returns:
11568 * 0 : success,
11569 * 1 : there is no reshape in progress,
11570 * -1 : fail
11571 ******************************************************************************/
11572 int wait_for_reshape_imsm(struct mdinfo *sra, int ndata)
11573 {
11574 int fd = sysfs_get_fd(sra, NULL, "sync_completed");
11575 int retry = 3;
11576 unsigned long long completed;
11577 /* to_complete : new sync_max position */
11578 unsigned long long to_complete = sra->reshape_progress;
11579 unsigned long long position_to_set = to_complete / ndata;
11580
11581 if (fd < 0) {
11582 dprintf("cannot open reshape_position\n");
11583 return 1;
11584 }
11585
11586 do {
11587 if (sysfs_fd_get_ll(fd, &completed) < 0) {
11588 if (!retry) {
11589 dprintf("cannot read reshape_position (no reshape in progres)\n");
11590 close(fd);
11591 return 1;
11592 }
11593 usleep(30000);
11594 } else
11595 break;
11596 } while (retry--);
11597
11598 if (completed > position_to_set) {
11599 dprintf("wrong next position to set %llu (%llu)\n",
11600 to_complete, position_to_set);
11601 close(fd);
11602 return -1;
11603 }
11604 dprintf("Position set: %llu\n", position_to_set);
11605 if (sysfs_set_num(sra, NULL, "sync_max",
11606 position_to_set) != 0) {
11607 dprintf("cannot set reshape position to %llu\n",
11608 position_to_set);
11609 close(fd);
11610 return -1;
11611 }
11612
11613 do {
11614 int rc;
11615 char action[20];
11616 int timeout = 3000;
11617
11618 sysfs_wait(fd, &timeout);
11619 if (sysfs_get_str(sra, NULL, "sync_action",
11620 action, 20) > 0 &&
11621 strncmp(action, "reshape", 7) != 0) {
11622 if (strncmp(action, "idle", 4) == 0)
11623 break;
11624 close(fd);
11625 return -1;
11626 }
11627
11628 rc = read_completed(fd, &completed);
11629 if (rc < 0) {
11630 dprintf("cannot read reshape_position (in loop)\n");
11631 close(fd);
11632 return 1;
11633 } else if (rc == COMPLETED_NONE)
11634 break;
11635 } while (completed < position_to_set);
11636
11637 close(fd);
11638 return 0;
11639 }
11640
11641 /*******************************************************************************
11642 * Function: check_degradation_change
11643 * Description: Check that array hasn't become failed.
11644 * Parameters:
11645 * info : for sysfs access
11646 * sources : source disks descriptors
11647 * degraded: previous degradation level
11648 * Returns:
11649 * degradation level
11650 ******************************************************************************/
11651 int check_degradation_change(struct mdinfo *info,
11652 int *sources,
11653 int degraded)
11654 {
11655 unsigned long long new_degraded;
11656 int rv;
11657
11658 rv = sysfs_get_ll(info, NULL, "degraded", &new_degraded);
11659 if (rv == -1 || (new_degraded != (unsigned long long)degraded)) {
11660 /* check each device to ensure it is still working */
11661 struct mdinfo *sd;
11662 new_degraded = 0;
11663 for (sd = info->devs ; sd ; sd = sd->next) {
11664 if (sd->disk.state & (1<<MD_DISK_FAULTY))
11665 continue;
11666 if (sd->disk.state & (1<<MD_DISK_SYNC)) {
11667 char sbuf[100];
11668
11669 if (sysfs_get_str(info,
11670 sd, "state", sbuf, sizeof(sbuf)) < 0 ||
11671 strstr(sbuf, "faulty") ||
11672 strstr(sbuf, "in_sync") == NULL) {
11673 /* this device is dead */
11674 sd->disk.state = (1<<MD_DISK_FAULTY);
11675 if (sd->disk.raid_disk >= 0 &&
11676 sources[sd->disk.raid_disk] >= 0) {
11677 close(sources[
11678 sd->disk.raid_disk]);
11679 sources[sd->disk.raid_disk] =
11680 -1;
11681 }
11682 new_degraded++;
11683 }
11684 }
11685 }
11686 }
11687
11688 return new_degraded;
11689 }
11690
11691 /*******************************************************************************
11692 * Function: imsm_manage_reshape
11693 * Description: Function finds array under reshape and it manages reshape
11694 * process. It creates stripes backups (if required) and sets
11695 * checkpoints.
11696 * Parameters:
11697 * afd : Backup handle (nattive) - not used
11698 * sra : general array info
11699 * reshape : reshape parameters - not used
11700 * st : supertype structure
11701 * blocks : size of critical section [blocks]
11702 * fds : table of source device descriptor
11703 * offsets : start of array (offest per devices)
11704 * dests : not used
11705 * destfd : table of destination device descriptor
11706 * destoffsets : table of destination offsets (per device)
11707 * Returns:
11708 * 1 : success, reshape is done
11709 * 0 : fail
11710 ******************************************************************************/
11711 static int imsm_manage_reshape(
11712 int afd, struct mdinfo *sra, struct reshape *reshape,
11713 struct supertype *st, unsigned long backup_blocks,
11714 int *fds, unsigned long long *offsets,
11715 int dests, int *destfd, unsigned long long *destoffsets)
11716 {
11717 int ret_val = 0;
11718 struct intel_super *super = st->sb;
11719 struct intel_dev *dv;
11720 unsigned int sector_size = super->sector_size;
11721 struct imsm_dev *dev = NULL;
11722 struct imsm_map *map_src;
11723 int migr_vol_qan = 0;
11724 int ndata, odata; /* [bytes] */
11725 int chunk; /* [bytes] */
11726 struct migr_record *migr_rec;
11727 char *buf = NULL;
11728 unsigned int buf_size; /* [bytes] */
11729 unsigned long long max_position; /* array size [bytes] */
11730 unsigned long long next_step; /* [blocks]/[bytes] */
11731 unsigned long long old_data_stripe_length;
11732 unsigned long long start_src; /* [bytes] */
11733 unsigned long long start; /* [bytes] */
11734 unsigned long long start_buf_shift; /* [bytes] */
11735 int degraded = 0;
11736 int source_layout = 0;
11737
11738 if (!sra)
11739 return ret_val;
11740
11741 if (!fds || !offsets)
11742 goto abort;
11743
11744 /* Find volume during the reshape */
11745 for (dv = super->devlist; dv; dv = dv->next) {
11746 if (dv->dev->vol.migr_type == MIGR_GEN_MIGR &&
11747 dv->dev->vol.migr_state == 1) {
11748 dev = dv->dev;
11749 migr_vol_qan++;
11750 }
11751 }
11752 /* Only one volume can migrate at the same time */
11753 if (migr_vol_qan != 1) {
11754 pr_err("%s", migr_vol_qan ?
11755 "Number of migrating volumes greater than 1\n" :
11756 "There is no volume during migrationg\n");
11757 goto abort;
11758 }
11759
11760 map_src = get_imsm_map(dev, MAP_1);
11761 if (map_src == NULL)
11762 goto abort;
11763
11764 ndata = imsm_num_data_members(dev, MAP_0);
11765 odata = imsm_num_data_members(dev, MAP_1);
11766
11767 chunk = __le16_to_cpu(map_src->blocks_per_strip) * 512;
11768 old_data_stripe_length = odata * chunk;
11769
11770 migr_rec = super->migr_rec;
11771
11772 /* initialize migration record for start condition */
11773 if (sra->reshape_progress == 0)
11774 init_migr_record_imsm(st, dev, sra);
11775 else {
11776 if (__le32_to_cpu(migr_rec->rec_status) != UNIT_SRC_NORMAL) {
11777 dprintf("imsm: cannot restart migration when data are present in copy area.\n");
11778 goto abort;
11779 }
11780 /* Save checkpoint to update migration record for current
11781 * reshape position (in md). It can be farther than current
11782 * reshape position in metadata.
11783 */
11784 if (save_checkpoint_imsm(st, sra, UNIT_SRC_NORMAL) == 1) {
11785 /* ignore error == 2, this can mean end of reshape here
11786 */
11787 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL, initial save)\n");
11788 goto abort;
11789 }
11790 }
11791
11792 /* size for data */
11793 buf_size = __le32_to_cpu(migr_rec->blocks_per_unit) * 512;
11794 /* extend buffer size for parity disk */
11795 buf_size += __le32_to_cpu(migr_rec->dest_depth_per_unit) * 512;
11796 /* add space for stripe aligment */
11797 buf_size += old_data_stripe_length;
11798 if (posix_memalign((void **)&buf, MAX_SECTOR_SIZE, buf_size)) {
11799 dprintf("imsm: Cannot allocate checkpoint buffer\n");
11800 goto abort;
11801 }
11802
11803 max_position = sra->component_size * ndata;
11804 source_layout = imsm_level_to_layout(map_src->raid_level);
11805
11806 while (__le32_to_cpu(migr_rec->curr_migr_unit) <
11807 __le32_to_cpu(migr_rec->num_migr_units)) {
11808 /* current reshape position [blocks] */
11809 unsigned long long current_position =
11810 __le32_to_cpu(migr_rec->blocks_per_unit)
11811 * __le32_to_cpu(migr_rec->curr_migr_unit);
11812 unsigned long long border;
11813
11814 /* Check that array hasn't become failed.
11815 */
11816 degraded = check_degradation_change(sra, fds, degraded);
11817 if (degraded > 1) {
11818 dprintf("imsm: Abort reshape due to degradation level (%i)\n", degraded);
11819 goto abort;
11820 }
11821
11822 next_step = __le32_to_cpu(migr_rec->blocks_per_unit);
11823
11824 if ((current_position + next_step) > max_position)
11825 next_step = max_position - current_position;
11826
11827 start = current_position * 512;
11828
11829 /* align reading start to old geometry */
11830 start_buf_shift = start % old_data_stripe_length;
11831 start_src = start - start_buf_shift;
11832
11833 border = (start_src / odata) - (start / ndata);
11834 border /= 512;
11835 if (border <= __le32_to_cpu(migr_rec->dest_depth_per_unit)) {
11836 /* save critical stripes to buf
11837 * start - start address of current unit
11838 * to backup [bytes]
11839 * start_src - start address of current unit
11840 * to backup alligned to source array
11841 * [bytes]
11842 */
11843 unsigned long long next_step_filler;
11844 unsigned long long copy_length = next_step * 512;
11845
11846 /* allign copy area length to stripe in old geometry */
11847 next_step_filler = ((copy_length + start_buf_shift)
11848 % old_data_stripe_length);
11849 if (next_step_filler)
11850 next_step_filler = (old_data_stripe_length
11851 - next_step_filler);
11852 dprintf("save_stripes() parameters: start = %llu,\tstart_src = %llu,\tnext_step*512 = %llu,\tstart_in_buf_shift = %llu,\tnext_step_filler = %llu\n",
11853 start, start_src, copy_length,
11854 start_buf_shift, next_step_filler);
11855
11856 if (save_stripes(fds, offsets, map_src->num_members,
11857 chunk, map_src->raid_level,
11858 source_layout, 0, NULL, start_src,
11859 copy_length +
11860 next_step_filler + start_buf_shift,
11861 buf)) {
11862 dprintf("imsm: Cannot save stripes to buffer\n");
11863 goto abort;
11864 }
11865 /* Convert data to destination format and store it
11866 * in backup general migration area
11867 */
11868 if (save_backup_imsm(st, dev, sra,
11869 buf + start_buf_shift, copy_length)) {
11870 dprintf("imsm: Cannot save stripes to target devices\n");
11871 goto abort;
11872 }
11873 if (save_checkpoint_imsm(st, sra,
11874 UNIT_SRC_IN_CP_AREA)) {
11875 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_IN_CP_AREA)\n");
11876 goto abort;
11877 }
11878 } else {
11879 /* set next step to use whole border area */
11880 border /= next_step;
11881 if (border > 1)
11882 next_step *= border;
11883 }
11884 /* When data backed up, checkpoint stored,
11885 * kick the kernel to reshape unit of data
11886 */
11887 next_step = next_step + sra->reshape_progress;
11888 /* limit next step to array max position */
11889 if (next_step > max_position)
11890 next_step = max_position;
11891 sysfs_set_num(sra, NULL, "suspend_lo", sra->reshape_progress);
11892 sysfs_set_num(sra, NULL, "suspend_hi", next_step);
11893 sra->reshape_progress = next_step;
11894
11895 /* wait until reshape finish */
11896 if (wait_for_reshape_imsm(sra, ndata)) {
11897 dprintf("wait_for_reshape_imsm returned error!\n");
11898 goto abort;
11899 }
11900 if (sigterm)
11901 goto abort;
11902
11903 if (save_checkpoint_imsm(st, sra, UNIT_SRC_NORMAL) == 1) {
11904 /* ignore error == 2, this can mean end of reshape here
11905 */
11906 dprintf("imsm: Cannot write checkpoint to migration record (UNIT_SRC_NORMAL)\n");
11907 goto abort;
11908 }
11909
11910 }
11911
11912 /* clear migr_rec on disks after successful migration */
11913 struct dl *d;
11914
11915 memset(super->migr_rec_buf, 0, MIGR_REC_BUF_SECTORS*MAX_SECTOR_SIZE);
11916 for (d = super->disks; d; d = d->next) {
11917 if (d->index < 0 || is_failed(&d->disk))
11918 continue;
11919 unsigned long long dsize;
11920
11921 get_dev_size(d->fd, NULL, &dsize);
11922 if (lseek64(d->fd, dsize - MIGR_REC_SECTOR_POSITION*sector_size,
11923 SEEK_SET) >= 0) {
11924 if ((unsigned int)write(d->fd, super->migr_rec_buf,
11925 MIGR_REC_BUF_SECTORS*sector_size) !=
11926 MIGR_REC_BUF_SECTORS*sector_size)
11927 perror("Write migr_rec failed");
11928 }
11929 }
11930
11931 /* return '1' if done */
11932 ret_val = 1;
11933 abort:
11934 free(buf);
11935 /* See Grow.c: abort_reshape() for further explanation */
11936 sysfs_set_num(sra, NULL, "suspend_lo", 0x7FFFFFFFFFFFFFFFULL);
11937 sysfs_set_num(sra, NULL, "suspend_hi", 0);
11938 sysfs_set_num(sra, NULL, "suspend_lo", 0);
11939
11940 return ret_val;
11941 }
11942
11943 struct superswitch super_imsm = {
11944 .examine_super = examine_super_imsm,
11945 .brief_examine_super = brief_examine_super_imsm,
11946 .brief_examine_subarrays = brief_examine_subarrays_imsm,
11947 .export_examine_super = export_examine_super_imsm,
11948 .detail_super = detail_super_imsm,
11949 .brief_detail_super = brief_detail_super_imsm,
11950 .write_init_super = write_init_super_imsm,
11951 .validate_geometry = validate_geometry_imsm,
11952 .add_to_super = add_to_super_imsm,
11953 .remove_from_super = remove_from_super_imsm,
11954 .detail_platform = detail_platform_imsm,
11955 .export_detail_platform = export_detail_platform_imsm,
11956 .kill_subarray = kill_subarray_imsm,
11957 .update_subarray = update_subarray_imsm,
11958 .load_container = load_container_imsm,
11959 .default_geometry = default_geometry_imsm,
11960 .get_disk_controller_domain = imsm_get_disk_controller_domain,
11961 .reshape_super = imsm_reshape_super,
11962 .manage_reshape = imsm_manage_reshape,
11963 .recover_backup = recover_backup_imsm,
11964 .copy_metadata = copy_metadata_imsm,
11965 .examine_badblocks = examine_badblocks_imsm,
11966 .match_home = match_home_imsm,
11967 .uuid_from_super= uuid_from_super_imsm,
11968 .getinfo_super = getinfo_super_imsm,
11969 .getinfo_super_disks = getinfo_super_disks_imsm,
11970 .update_super = update_super_imsm,
11971
11972 .avail_size = avail_size_imsm,
11973 .get_spare_criteria = get_spare_criteria_imsm,
11974
11975 .compare_super = compare_super_imsm,
11976
11977 .load_super = load_super_imsm,
11978 .init_super = init_super_imsm,
11979 .store_super = store_super_imsm,
11980 .free_super = free_super_imsm,
11981 .match_metadata_desc = match_metadata_desc_imsm,
11982 .container_content = container_content_imsm,
11983 .validate_container = validate_container_imsm,
11984
11985 .write_init_ppl = write_init_ppl_imsm,
11986 .validate_ppl = validate_ppl_imsm,
11987
11988 .external = 1,
11989 .name = "imsm",
11990
11991 /* for mdmon */
11992 .open_new = imsm_open_new,
11993 .set_array_state= imsm_set_array_state,
11994 .set_disk = imsm_set_disk,
11995 .sync_metadata = imsm_sync_metadata,
11996 .activate_spare = imsm_activate_spare,
11997 .process_update = imsm_process_update,
11998 .prepare_update = imsm_prepare_update,
11999 .record_bad_block = imsm_record_badblock,
12000 .clear_bad_block = imsm_clear_badblock,
12001 .get_bad_blocks = imsm_get_badblocks,
12002 };