]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-intel.c
be01f325c02c238a01d509f70b8db5b9aee1836d
[thirdparty/mdadm.git] / super-intel.c
1 /*
2 * mdadm - Intel(R) Matrix Storage Manager Support
3 *
4 * Copyright (C) 2002-2008 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define HAVE_STDINT_H 1
21 #include "mdadm.h"
22 #include "mdmon.h"
23 #include "sha1.h"
24 #include "platform-intel.h"
25 #include <values.h>
26 #include <scsi/sg.h>
27 #include <ctype.h>
28 #include <dirent.h>
29
30 /* MPB == Metadata Parameter Block */
31 #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
32 #define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
33 #define MPB_VERSION_RAID0 "1.0.00"
34 #define MPB_VERSION_RAID1 "1.1.00"
35 #define MPB_VERSION_MANY_VOLUMES_PER_ARRAY "1.2.00"
36 #define MPB_VERSION_3OR4_DISK_ARRAY "1.2.01"
37 #define MPB_VERSION_RAID5 "1.2.02"
38 #define MPB_VERSION_5OR6_DISK_ARRAY "1.2.04"
39 #define MPB_VERSION_CNG "1.2.06"
40 #define MPB_VERSION_ATTRIBS "1.3.00"
41 #define MAX_SIGNATURE_LENGTH 32
42 #define MAX_RAID_SERIAL_LEN 16
43
44 #define MPB_ATTRIB_CHECKSUM_VERIFY __cpu_to_le32(0x80000000)
45 #define MPB_ATTRIB_PM __cpu_to_le32(0x40000000)
46 #define MPB_ATTRIB_2TB __cpu_to_le32(0x20000000)
47 #define MPB_ATTRIB_RAID0 __cpu_to_le32(0x00000001)
48 #define MPB_ATTRIB_RAID1 __cpu_to_le32(0x00000002)
49 #define MPB_ATTRIB_RAID10 __cpu_to_le32(0x00000004)
50 #define MPB_ATTRIB_RAID1E __cpu_to_le32(0x00000008)
51 #define MPB_ATTRIB_RAID5 __cpu_to_le32(0x00000010)
52 #define MPB_ATTRIB_RAIDCNG __cpu_to_le32(0x00000020)
53
54 #define MPB_SECTOR_CNT 418
55 #define IMSM_RESERVED_SECTORS 4096
56
57 /* Disk configuration info. */
58 #define IMSM_MAX_DEVICES 255
59 struct imsm_disk {
60 __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */
61 __u32 total_blocks; /* 0xE8 - 0xEB total blocks */
62 __u32 scsi_id; /* 0xEC - 0xEF scsi ID */
63 #define SPARE_DISK __cpu_to_le32(0x01) /* Spare */
64 #define CONFIGURED_DISK __cpu_to_le32(0x02) /* Member of some RaidDev */
65 #define FAILED_DISK __cpu_to_le32(0x04) /* Permanent failure */
66 #define USABLE_DISK __cpu_to_le32(0x08) /* Fully usable unless FAILED_DISK is set */
67 __u32 status; /* 0xF0 - 0xF3 */
68 __u32 owner_cfg_num; /* which config 0,1,2... owns this disk */
69 #define IMSM_DISK_FILLERS 4
70 __u32 filler[IMSM_DISK_FILLERS]; /* 0xF4 - 0x107 MPB_DISK_FILLERS for future expansion */
71 };
72
73 /* RAID map configuration infos. */
74 struct imsm_map {
75 __u32 pba_of_lba0; /* start address of partition */
76 __u32 blocks_per_member;/* blocks per member */
77 __u32 num_data_stripes; /* number of data stripes */
78 __u16 blocks_per_strip;
79 __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */
80 #define IMSM_T_STATE_NORMAL 0
81 #define IMSM_T_STATE_UNINITIALIZED 1
82 #define IMSM_T_STATE_DEGRADED 2
83 #define IMSM_T_STATE_FAILED 3
84 __u8 raid_level;
85 #define IMSM_T_RAID0 0
86 #define IMSM_T_RAID1 1
87 #define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
88 __u8 num_members; /* number of member disks */
89 __u8 num_domains; /* number of parity domains */
90 __u8 failed_disk_num; /* valid only when state is degraded */
91 __u8 reserved[1];
92 __u32 filler[7]; /* expansion area */
93 #define IMSM_ORD_REBUILD (1 << 24)
94 __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members],
95 * top byte contains some flags
96 */
97 } __attribute__ ((packed));
98
99 struct imsm_vol {
100 __u32 curr_migr_unit;
101 __u32 checkpoint_id; /* id to access curr_migr_unit */
102 __u8 migr_state; /* Normal or Migrating */
103 #define MIGR_INIT 0
104 #define MIGR_REBUILD 1
105 #define MIGR_VERIFY 2 /* analagous to echo check > sync_action */
106 #define MIGR_GEN_MIGR 3
107 #define MIGR_STATE_CHANGE 4
108 __u8 migr_type; /* Initializing, Rebuilding, ... */
109 __u8 dirty;
110 __u8 fs_state; /* fast-sync state for CnG (0xff == disabled) */
111 __u16 verify_errors; /* number of mismatches */
112 __u16 bad_blocks; /* number of bad blocks during verify */
113 __u32 filler[4];
114 struct imsm_map map[1];
115 /* here comes another one if migr_state */
116 } __attribute__ ((packed));
117
118 struct imsm_dev {
119 __u8 volume[MAX_RAID_SERIAL_LEN];
120 __u32 size_low;
121 __u32 size_high;
122 #define DEV_BOOTABLE __cpu_to_le32(0x01)
123 #define DEV_BOOT_DEVICE __cpu_to_le32(0x02)
124 #define DEV_READ_COALESCING __cpu_to_le32(0x04)
125 #define DEV_WRITE_COALESCING __cpu_to_le32(0x08)
126 #define DEV_LAST_SHUTDOWN_DIRTY __cpu_to_le32(0x10)
127 #define DEV_HIDDEN_AT_BOOT __cpu_to_le32(0x20)
128 #define DEV_CURRENTLY_HIDDEN __cpu_to_le32(0x40)
129 #define DEV_VERIFY_AND_FIX __cpu_to_le32(0x80)
130 #define DEV_MAP_STATE_UNINIT __cpu_to_le32(0x100)
131 #define DEV_NO_AUTO_RECOVERY __cpu_to_le32(0x200)
132 #define DEV_CLONE_N_GO __cpu_to_le32(0x400)
133 #define DEV_CLONE_MAN_SYNC __cpu_to_le32(0x800)
134 #define DEV_CNG_MASTER_DISK_NUM __cpu_to_le32(0x1000)
135 __u32 status; /* Persistent RaidDev status */
136 __u32 reserved_blocks; /* Reserved blocks at beginning of volume */
137 __u8 migr_priority;
138 __u8 num_sub_vols;
139 __u8 tid;
140 __u8 cng_master_disk;
141 __u16 cache_policy;
142 __u8 cng_state;
143 __u8 cng_sub_state;
144 #define IMSM_DEV_FILLERS 10
145 __u32 filler[IMSM_DEV_FILLERS];
146 struct imsm_vol vol;
147 } __attribute__ ((packed));
148
149 struct imsm_super {
150 __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */
151 __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */
152 __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */
153 __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */
154 __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
155 __u32 error_log_size; /* 0x30 - 0x33 in bytes */
156 __u32 attributes; /* 0x34 - 0x37 */
157 __u8 num_disks; /* 0x38 Number of configured disks */
158 __u8 num_raid_devs; /* 0x39 Number of configured volumes */
159 __u8 error_log_pos; /* 0x3A */
160 __u8 fill[1]; /* 0x3B */
161 __u32 cache_size; /* 0x3c - 0x40 in mb */
162 __u32 orig_family_num; /* 0x40 - 0x43 original family num */
163 __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
164 __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
165 #define IMSM_FILLERS 35
166 __u32 filler[IMSM_FILLERS]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */
167 struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
168 /* here comes imsm_dev[num_raid_devs] */
169 /* here comes BBM logs */
170 } __attribute__ ((packed));
171
172 #define BBM_LOG_MAX_ENTRIES 254
173
174 struct bbm_log_entry {
175 __u64 defective_block_start;
176 #define UNREADABLE 0xFFFFFFFF
177 __u32 spare_block_offset;
178 __u16 remapped_marked_count;
179 __u16 disk_ordinal;
180 } __attribute__ ((__packed__));
181
182 struct bbm_log {
183 __u32 signature; /* 0xABADB10C */
184 __u32 entry_count;
185 __u32 reserved_spare_block_count; /* 0 */
186 __u32 reserved; /* 0xFFFF */
187 __u64 first_spare_lba;
188 struct bbm_log_entry mapped_block_entries[BBM_LOG_MAX_ENTRIES];
189 } __attribute__ ((__packed__));
190
191
192 #ifndef MDASSEMBLE
193 static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
194 #endif
195
196 static unsigned int sector_count(__u32 bytes)
197 {
198 return ((bytes + (512-1)) & (~(512-1))) / 512;
199 }
200
201 static unsigned int mpb_sectors(struct imsm_super *mpb)
202 {
203 return sector_count(__le32_to_cpu(mpb->mpb_size));
204 }
205
206 struct intel_dev {
207 struct imsm_dev *dev;
208 struct intel_dev *next;
209 int index;
210 };
211
212 /* internal representation of IMSM metadata */
213 struct intel_super {
214 union {
215 void *buf; /* O_DIRECT buffer for reading/writing metadata */
216 struct imsm_super *anchor; /* immovable parameters */
217 };
218 size_t len; /* size of the 'buf' allocation */
219 void *next_buf; /* for realloc'ing buf from the manager */
220 size_t next_len;
221 int updates_pending; /* count of pending updates for mdmon */
222 int creating_imsm; /* flag to indicate container creation */
223 int current_vol; /* index of raid device undergoing creation */
224 __u32 create_offset; /* common start for 'current_vol' */
225 struct intel_dev *devlist;
226 struct dl {
227 struct dl *next;
228 int index;
229 __u8 serial[MAX_RAID_SERIAL_LEN];
230 int major, minor;
231 char *devname;
232 struct imsm_disk disk;
233 int fd;
234 int extent_cnt;
235 struct extent *e; /* for determining freespace @ create */
236 } *disks;
237 struct dl *add; /* list of disks to add while mdmon active */
238 struct dl *missing; /* disks removed while we weren't looking */
239 struct bbm_log *bbm_log;
240 const char *hba; /* device path of the raid controller for this metadata */
241 const struct imsm_orom *orom; /* platform firmware support */
242 };
243
244 struct extent {
245 unsigned long long start, size;
246 };
247
248 /* definition of messages passed to imsm_process_update */
249 enum imsm_update_type {
250 update_activate_spare,
251 update_create_array,
252 update_add_disk,
253 };
254
255 struct imsm_update_activate_spare {
256 enum imsm_update_type type;
257 struct dl *dl;
258 int slot;
259 int array;
260 struct imsm_update_activate_spare *next;
261 };
262
263 struct disk_info {
264 __u8 serial[MAX_RAID_SERIAL_LEN];
265 };
266
267 struct imsm_update_create_array {
268 enum imsm_update_type type;
269 int dev_idx;
270 struct imsm_dev dev;
271 };
272
273 struct imsm_update_add_disk {
274 enum imsm_update_type type;
275 };
276
277 static struct supertype *match_metadata_desc_imsm(char *arg)
278 {
279 struct supertype *st;
280
281 if (strcmp(arg, "imsm") != 0 &&
282 strcmp(arg, "default") != 0
283 )
284 return NULL;
285
286 st = malloc(sizeof(*st));
287 memset(st, 0, sizeof(*st));
288 st->ss = &super_imsm;
289 st->max_devs = IMSM_MAX_DEVICES;
290 st->minor_version = 0;
291 st->sb = NULL;
292 return st;
293 }
294
295 #ifndef MDASSEMBLE
296 static __u8 *get_imsm_version(struct imsm_super *mpb)
297 {
298 return &mpb->sig[MPB_SIG_LEN];
299 }
300 #endif
301
302 /* retrieve a disk directly from the anchor when the anchor is known to be
303 * up-to-date, currently only at load time
304 */
305 static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index)
306 {
307 if (index >= mpb->num_disks)
308 return NULL;
309 return &mpb->disk[index];
310 }
311
312 #ifndef MDASSEMBLE
313 /* retrieve a disk from the parsed metadata */
314 static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index)
315 {
316 struct dl *d;
317
318 for (d = super->disks; d; d = d->next)
319 if (d->index == index)
320 return &d->disk;
321
322 return NULL;
323 }
324 #endif
325
326 /* generate a checksum directly from the anchor when the anchor is known to be
327 * up-to-date, currently only at load or write_super after coalescing
328 */
329 static __u32 __gen_imsm_checksum(struct imsm_super *mpb)
330 {
331 __u32 end = mpb->mpb_size / sizeof(end);
332 __u32 *p = (__u32 *) mpb;
333 __u32 sum = 0;
334
335 while (end--) {
336 sum += __le32_to_cpu(*p);
337 p++;
338 }
339
340 return sum - __le32_to_cpu(mpb->check_sum);
341 }
342
343 static size_t sizeof_imsm_map(struct imsm_map *map)
344 {
345 return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1);
346 }
347
348 struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
349 {
350 struct imsm_map *map = &dev->vol.map[0];
351
352 if (second_map && !dev->vol.migr_state)
353 return NULL;
354 else if (second_map) {
355 void *ptr = map;
356
357 return ptr + sizeof_imsm_map(map);
358 } else
359 return map;
360
361 }
362
363 /* return the size of the device.
364 * migr_state increases the returned size if map[0] were to be duplicated
365 */
366 static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state)
367 {
368 size_t size = sizeof(*dev) - sizeof(struct imsm_map) +
369 sizeof_imsm_map(get_imsm_map(dev, 0));
370
371 /* migrating means an additional map */
372 if (dev->vol.migr_state)
373 size += sizeof_imsm_map(get_imsm_map(dev, 1));
374 else if (migr_state)
375 size += sizeof_imsm_map(get_imsm_map(dev, 0));
376
377 return size;
378 }
379
380 #ifndef MDASSEMBLE
381 /* retrieve disk serial number list from a metadata update */
382 static struct disk_info *get_disk_info(struct imsm_update_create_array *update)
383 {
384 void *u = update;
385 struct disk_info *inf;
386
387 inf = u + sizeof(*update) - sizeof(struct imsm_dev) +
388 sizeof_imsm_dev(&update->dev, 0);
389
390 return inf;
391 }
392 #endif
393
394 static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index)
395 {
396 int offset;
397 int i;
398 void *_mpb = mpb;
399
400 if (index >= mpb->num_raid_devs)
401 return NULL;
402
403 /* devices start after all disks */
404 offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb;
405
406 for (i = 0; i <= index; i++)
407 if (i == index)
408 return _mpb + offset;
409 else
410 offset += sizeof_imsm_dev(_mpb + offset, 0);
411
412 return NULL;
413 }
414
415 static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index)
416 {
417 struct intel_dev *dv;
418
419 if (index >= super->anchor->num_raid_devs)
420 return NULL;
421 for (dv = super->devlist; dv; dv = dv->next)
422 if (dv->index == index)
423 return dv->dev;
424 return NULL;
425 }
426
427 static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev, int slot)
428 {
429 struct imsm_map *map;
430
431 if (dev->vol.migr_state)
432 map = get_imsm_map(dev, 1);
433 else
434 map = get_imsm_map(dev, 0);
435
436 /* top byte identifies disk under rebuild */
437 return __le32_to_cpu(map->disk_ord_tbl[slot]);
438 }
439
440 #define ord_to_idx(ord) (((ord) << 8) >> 8)
441 static __u32 get_imsm_disk_idx(struct imsm_dev *dev, int slot)
442 {
443 __u32 ord = get_imsm_ord_tbl_ent(dev, slot);
444
445 return ord_to_idx(ord);
446 }
447
448 static void set_imsm_ord_tbl_ent(struct imsm_map *map, int slot, __u32 ord)
449 {
450 map->disk_ord_tbl[slot] = __cpu_to_le32(ord);
451 }
452
453 static int get_imsm_raid_level(struct imsm_map *map)
454 {
455 if (map->raid_level == 1) {
456 if (map->num_members == 2)
457 return 1;
458 else
459 return 10;
460 }
461
462 return map->raid_level;
463 }
464
465 static int cmp_extent(const void *av, const void *bv)
466 {
467 const struct extent *a = av;
468 const struct extent *b = bv;
469 if (a->start < b->start)
470 return -1;
471 if (a->start > b->start)
472 return 1;
473 return 0;
474 }
475
476 static int count_memberships(struct dl *dl, struct intel_super *super)
477 {
478 int memberships = 0;
479 int i, j;
480
481 for (i = 0; i < super->anchor->num_raid_devs; i++) {
482 struct imsm_dev *dev = get_imsm_dev(super, i);
483 struct imsm_map *map = get_imsm_map(dev, 0);
484
485 for (j = 0; j < map->num_members; j++) {
486 __u32 index = get_imsm_disk_idx(dev, j);
487
488 if (index == dl->index)
489 memberships++;
490 }
491 }
492
493 return memberships;
494 }
495
496 static struct extent *get_extents(struct intel_super *super, struct dl *dl)
497 {
498 /* find a list of used extents on the given physical device */
499 struct extent *rv, *e;
500 int i, j;
501 int memberships = count_memberships(dl, super);
502 __u32 reservation = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
503
504 rv = malloc(sizeof(struct extent) * (memberships + 1));
505 if (!rv)
506 return NULL;
507 e = rv;
508
509 for (i = 0; i < super->anchor->num_raid_devs; i++) {
510 struct imsm_dev *dev = get_imsm_dev(super, i);
511 struct imsm_map *map = get_imsm_map(dev, 0);
512
513 for (j = 0; j < map->num_members; j++) {
514 __u32 index = get_imsm_disk_idx(dev, j);
515
516 if (index == dl->index) {
517 e->start = __le32_to_cpu(map->pba_of_lba0);
518 e->size = __le32_to_cpu(map->blocks_per_member);
519 e++;
520 }
521 }
522 }
523 qsort(rv, memberships, sizeof(*rv), cmp_extent);
524
525 /* determine the start of the metadata
526 * when no raid devices are defined use the default
527 * ...otherwise allow the metadata to truncate the value
528 * as is the case with older versions of imsm
529 */
530 if (memberships) {
531 struct extent *last = &rv[memberships - 1];
532 __u32 remainder;
533
534 remainder = __le32_to_cpu(dl->disk.total_blocks) -
535 (last->start + last->size);
536 /* round down to 1k block to satisfy precision of the kernel
537 * 'size' interface
538 */
539 remainder &= ~1UL;
540 /* make sure remainder is still sane */
541 if (remainder < ROUND_UP(super->len, 512) >> 9)
542 remainder = ROUND_UP(super->len, 512) >> 9;
543 if (reservation > remainder)
544 reservation = remainder;
545 }
546 e->start = __le32_to_cpu(dl->disk.total_blocks) - reservation;
547 e->size = 0;
548 return rv;
549 }
550
551 /* try to determine how much space is reserved for metadata from
552 * the last get_extents() entry, otherwise fallback to the
553 * default
554 */
555 static __u32 imsm_reserved_sectors(struct intel_super *super, struct dl *dl)
556 {
557 struct extent *e;
558 int i;
559 __u32 rv;
560
561 /* for spares just return a minimal reservation which will grow
562 * once the spare is picked up by an array
563 */
564 if (dl->index == -1)
565 return MPB_SECTOR_CNT;
566
567 e = get_extents(super, dl);
568 if (!e)
569 return MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
570
571 /* scroll to last entry */
572 for (i = 0; e[i].size; i++)
573 continue;
574
575 rv = __le32_to_cpu(dl->disk.total_blocks) - e[i].start;
576
577 free(e);
578
579 return rv;
580 }
581
582 #ifndef MDASSEMBLE
583 static void print_imsm_dev(struct imsm_dev *dev, char *uuid, int disk_idx)
584 {
585 __u64 sz;
586 int slot;
587 struct imsm_map *map = get_imsm_map(dev, 0);
588 __u32 ord;
589
590 printf("\n");
591 printf("[%.16s]:\n", dev->volume);
592 printf(" UUID : %s\n", uuid);
593 printf(" RAID Level : %d\n", get_imsm_raid_level(map));
594 printf(" Members : %d\n", map->num_members);
595 for (slot = 0; slot < map->num_members; slot++)
596 if (disk_idx== get_imsm_disk_idx(dev, slot))
597 break;
598 if (slot < map->num_members) {
599 ord = get_imsm_ord_tbl_ent(dev, slot);
600 printf(" This Slot : %d%s\n", slot,
601 ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : "");
602 } else
603 printf(" This Slot : ?\n");
604 sz = __le32_to_cpu(dev->size_high);
605 sz <<= 32;
606 sz += __le32_to_cpu(dev->size_low);
607 printf(" Array Size : %llu%s\n", (unsigned long long)sz,
608 human_size(sz * 512));
609 sz = __le32_to_cpu(map->blocks_per_member);
610 printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz,
611 human_size(sz * 512));
612 printf(" Sector Offset : %u\n",
613 __le32_to_cpu(map->pba_of_lba0));
614 printf(" Num Stripes : %u\n",
615 __le32_to_cpu(map->num_data_stripes));
616 printf(" Chunk Size : %u KiB\n",
617 __le16_to_cpu(map->blocks_per_strip) / 2);
618 printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
619 printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle");
620 if (dev->vol.migr_state)
621 printf(": %s", dev->vol.migr_type ? "rebuilding" : "initializing");
622 printf("\n");
623 printf(" Map State : %s", map_state_str[map->map_state]);
624 if (dev->vol.migr_state) {
625 struct imsm_map *map = get_imsm_map(dev, 1);
626 printf(" <-- %s", map_state_str[map->map_state]);
627 }
628 printf("\n");
629 printf(" Dirty State : %s\n", dev->vol.dirty ? "dirty" : "clean");
630 }
631
632 static void print_imsm_disk(struct imsm_super *mpb, int index, __u32 reserved)
633 {
634 struct imsm_disk *disk = __get_imsm_disk(mpb, index);
635 char str[MAX_RAID_SERIAL_LEN + 1];
636 __u32 s;
637 __u64 sz;
638
639 if (index < 0)
640 return;
641
642 printf("\n");
643 snprintf(str, MAX_RAID_SERIAL_LEN + 1, "%s", disk->serial);
644 printf(" Disk%02d Serial : %s\n", index, str);
645 s = disk->status;
646 printf(" State :%s%s%s%s\n", s&SPARE_DISK ? " spare" : "",
647 s&CONFIGURED_DISK ? " active" : "",
648 s&FAILED_DISK ? " failed" : "",
649 s&USABLE_DISK ? " usable" : "");
650 printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id));
651 sz = __le32_to_cpu(disk->total_blocks) - reserved;
652 printf(" Usable Size : %llu%s\n", (unsigned long long)sz,
653 human_size(sz * 512));
654 }
655
656 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info);
657
658 static void examine_super_imsm(struct supertype *st, char *homehost)
659 {
660 struct intel_super *super = st->sb;
661 struct imsm_super *mpb = super->anchor;
662 char str[MAX_SIGNATURE_LENGTH];
663 int i;
664 struct mdinfo info;
665 char nbuf[64];
666 __u32 sum;
667 __u32 reserved = imsm_reserved_sectors(super, super->disks);
668
669
670 snprintf(str, MPB_SIG_LEN, "%s", mpb->sig);
671 printf(" Magic : %s\n", str);
672 snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
673 printf(" Version : %s\n", get_imsm_version(mpb));
674 printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
675 printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num));
676 getinfo_super_imsm(st, &info);
677 fname_from_uuid(st, &info, nbuf,'-');
678 printf(" UUID : %s\n", nbuf + 5);
679 sum = __le32_to_cpu(mpb->check_sum);
680 printf(" Checksum : %08x %s\n", sum,
681 __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect");
682 printf(" MPB Sectors : %d\n", mpb_sectors(mpb));
683 printf(" Disks : %d\n", mpb->num_disks);
684 printf(" RAID Devices : %d\n", mpb->num_raid_devs);
685 print_imsm_disk(mpb, super->disks->index, reserved);
686 if (super->bbm_log) {
687 struct bbm_log *log = super->bbm_log;
688
689 printf("\n");
690 printf("Bad Block Management Log:\n");
691 printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size));
692 printf(" Signature : %x\n", __le32_to_cpu(log->signature));
693 printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count));
694 printf(" Spare Blocks : %d\n", __le32_to_cpu(log->reserved_spare_block_count));
695 printf(" First Spare : %llx\n", __le64_to_cpu(log->first_spare_lba));
696 }
697 for (i = 0; i < mpb->num_raid_devs; i++) {
698 struct mdinfo info;
699 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
700
701 super->current_vol = i;
702 getinfo_super_imsm(st, &info);
703 fname_from_uuid(st, &info, nbuf, '-');
704 print_imsm_dev(dev, nbuf + 5, super->disks->index);
705 }
706 for (i = 0; i < mpb->num_disks; i++) {
707 if (i == super->disks->index)
708 continue;
709 print_imsm_disk(mpb, i, reserved);
710 }
711 }
712
713 static void brief_examine_super_imsm(struct supertype *st)
714 {
715 /* We just write a generic IMSM ARRAY entry */
716 struct mdinfo info;
717 char nbuf[64];
718 char nbuf1[64];
719 struct intel_super *super = st->sb;
720 int i;
721
722 if (!super->anchor->num_raid_devs)
723 return;
724
725 getinfo_super_imsm(st, &info);
726 fname_from_uuid(st, &info, nbuf,'-');
727 printf("ARRAY metadata=imsm auto=md UUID=%s\n", nbuf + 5);
728 for (i = 0; i < super->anchor->num_raid_devs; i++) {
729 struct imsm_dev *dev = get_imsm_dev(super, i);
730
731 super->current_vol = i;
732 getinfo_super_imsm(st, &info);
733 fname_from_uuid(st, &info, nbuf1,'-');
734 printf("ARRAY /dev/md/%.16s container=%s\n"
735 " member=%d auto=mdp UUID=%s\n",
736 dev->volume, nbuf + 5, i, nbuf1 + 5);
737 }
738 }
739
740 static void detail_super_imsm(struct supertype *st, char *homehost)
741 {
742 struct mdinfo info;
743 char nbuf[64];
744
745 getinfo_super_imsm(st, &info);
746 fname_from_uuid(st, &info, nbuf,'-');
747 printf("\n UUID : %s\n", nbuf + 5);
748 }
749
750 static void brief_detail_super_imsm(struct supertype *st)
751 {
752 struct mdinfo info;
753 char nbuf[64];
754 getinfo_super_imsm(st, &info);
755 fname_from_uuid(st, &info, nbuf,'-');
756 printf(" UUID=%s", nbuf + 5);
757 }
758
759 static int imsm_read_serial(int fd, char *devname, __u8 *serial);
760 static void fd2devname(int fd, char *name);
761
762 static int imsm_enumerate_ports(const char *hba_path, int port_count, int host_base, int verbose)
763 {
764 /* dump an unsorted list of devices attached to ahci, as well as
765 * non-connected ports
766 */
767 int hba_len = strlen(hba_path) + 1;
768 struct dirent *ent;
769 DIR *dir;
770 char *path = NULL;
771 int err = 0;
772 unsigned long port_mask = (1 << port_count) - 1;
773
774 if (port_count > sizeof(port_mask) * 8) {
775 if (verbose)
776 fprintf(stderr, Name ": port_count %d out of range\n", port_count);
777 return 2;
778 }
779
780 /* scroll through /sys/dev/block looking for devices attached to
781 * this hba
782 */
783 dir = opendir("/sys/dev/block");
784 for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
785 int fd;
786 char model[64];
787 char vendor[64];
788 char buf[1024];
789 int major, minor;
790 char *device;
791 char *c;
792 int port;
793 int type;
794
795 if (sscanf(ent->d_name, "%d:%d", &major, &minor) != 2)
796 continue;
797 path = devt_to_devpath(makedev(major, minor));
798 if (!path)
799 continue;
800 if (!path_attached_to_hba(path, hba_path)) {
801 free(path);
802 path = NULL;
803 continue;
804 }
805
806 /* retrieve the scsi device type */
807 if (asprintf(&device, "/sys/dev/block/%d:%d/device/xxxxxxx", major, minor) < 0) {
808 if (verbose)
809 fprintf(stderr, Name ": failed to allocate 'device'\n");
810 err = 2;
811 break;
812 }
813 sprintf(device, "/sys/dev/block/%d:%d/device/type", major, minor);
814 if (load_sys(device, buf) != 0) {
815 if (verbose)
816 fprintf(stderr, Name ": failed to read device type for %s\n",
817 path);
818 err = 2;
819 free(device);
820 break;
821 }
822 type = strtoul(buf, NULL, 10);
823
824 /* if it's not a disk print the vendor and model */
825 if (!(type == 0 || type == 7 || type == 14)) {
826 vendor[0] = '\0';
827 model[0] = '\0';
828 sprintf(device, "/sys/dev/block/%d:%d/device/vendor", major, minor);
829 if (load_sys(device, buf) == 0) {
830 strncpy(vendor, buf, sizeof(vendor));
831 vendor[sizeof(vendor) - 1] = '\0';
832 c = (char *) &vendor[sizeof(vendor) - 1];
833 while (isspace(*c) || *c == '\0')
834 *c-- = '\0';
835
836 }
837 sprintf(device, "/sys/dev/block/%d:%d/device/model", major, minor);
838 if (load_sys(device, buf) == 0) {
839 strncpy(model, buf, sizeof(model));
840 model[sizeof(model) - 1] = '\0';
841 c = (char *) &model[sizeof(model) - 1];
842 while (isspace(*c) || *c == '\0')
843 *c-- = '\0';
844 }
845
846 if (vendor[0] && model[0])
847 sprintf(buf, "%.64s %.64s", vendor, model);
848 else
849 switch (type) { /* numbers from hald/linux/device.c */
850 case 1: sprintf(buf, "tape"); break;
851 case 2: sprintf(buf, "printer"); break;
852 case 3: sprintf(buf, "processor"); break;
853 case 4:
854 case 5: sprintf(buf, "cdrom"); break;
855 case 6: sprintf(buf, "scanner"); break;
856 case 8: sprintf(buf, "media_changer"); break;
857 case 9: sprintf(buf, "comm"); break;
858 case 12: sprintf(buf, "raid"); break;
859 default: sprintf(buf, "unknown");
860 }
861 } else
862 buf[0] = '\0';
863 free(device);
864
865 /* chop device path to 'host%d' and calculate the port number */
866 c = strchr(&path[hba_len], '/');
867 *c = '\0';
868 if (sscanf(&path[hba_len], "host%d", &port) == 1)
869 port -= host_base;
870 else {
871 if (verbose) {
872 *c = '/'; /* repair the full string */
873 fprintf(stderr, Name ": failed to determine port number for %s\n",
874 path);
875 }
876 err = 2;
877 break;
878 }
879
880 /* mark this port as used */
881 port_mask &= ~(1 << port);
882
883 /* print out the device information */
884 if (buf[0]) {
885 printf(" Port%d : - non-disk device (%s) -\n", port, buf);
886 continue;
887 }
888
889 fd = dev_open(ent->d_name, O_RDONLY);
890 if (fd < 0)
891 printf(" Port%d : - disk info unavailable -\n", port);
892 else {
893 fd2devname(fd, buf);
894 printf(" Port%d : %s", port, buf);
895 if (imsm_read_serial(fd, NULL, (__u8 *) buf) == 0)
896 printf(" (%s)\n", buf);
897 else
898 printf("()\n");
899 }
900 close(fd);
901 free(path);
902 path = NULL;
903 }
904 if (path)
905 free(path);
906 if (dir)
907 closedir(dir);
908 if (err == 0) {
909 int i;
910
911 for (i = 0; i < port_count; i++)
912 if (port_mask & (1 << i))
913 printf(" Port%d : - no device attached -\n", i);
914 }
915
916 return err;
917 }
918
919 static int detail_platform_imsm(int verbose, int enumerate_only)
920 {
921 /* There are two components to imsm platform support, the ahci SATA
922 * controller and the option-rom. To find the SATA controller we
923 * simply look in /sys/bus/pci/drivers/ahci to see if an ahci
924 * controller with the Intel vendor id is present. This approach
925 * allows mdadm to leverage the kernel's ahci detection logic, with the
926 * caveat that if ahci.ko is not loaded mdadm will not be able to
927 * detect platform raid capabilities. The option-rom resides in a
928 * platform "Adapter ROM". We scan for its signature to retrieve the
929 * platform capabilities. If raid support is disabled in the BIOS the
930 * option-rom capability structure will not be available.
931 */
932 const struct imsm_orom *orom;
933 struct sys_dev *list, *hba;
934 DIR *dir;
935 struct dirent *ent;
936 const char *hba_path;
937 int host_base = 0;
938 int port_count = 0;
939
940 if (enumerate_only) {
941 if (check_env("IMSM_NO_PLATFORM") || find_imsm_orom())
942 return 0;
943 return 2;
944 }
945
946 list = find_driver_devices("pci", "ahci");
947 for (hba = list; hba; hba = hba->next)
948 if (devpath_to_vendor(hba->path) == 0x8086)
949 break;
950
951 if (!hba) {
952 if (verbose)
953 fprintf(stderr, Name ": unable to find active ahci controller\n");
954 free_sys_dev(&list);
955 return 2;
956 } else if (verbose)
957 fprintf(stderr, Name ": found Intel SATA AHCI Controller\n");
958 hba_path = hba->path;
959 hba->path = NULL;
960 free_sys_dev(&list);
961
962 orom = find_imsm_orom();
963 if (!orom) {
964 if (verbose)
965 fprintf(stderr, Name ": imsm option-rom not found\n");
966 return 2;
967 }
968
969 printf(" Platform : Intel(R) Matrix Storage Manager\n");
970 printf(" Version : %d.%d.%d.%d\n", orom->major_ver, orom->minor_ver,
971 orom->hotfix_ver, orom->build);
972 printf(" RAID Levels :%s%s%s%s%s\n",
973 imsm_orom_has_raid0(orom) ? " raid0" : "",
974 imsm_orom_has_raid1(orom) ? " raid1" : "",
975 imsm_orom_has_raid1e(orom) ? " raid1e" : "",
976 imsm_orom_has_raid10(orom) ? " raid10" : "",
977 imsm_orom_has_raid5(orom) ? " raid5" : "");
978 printf(" Max Disks : %d\n", orom->tds);
979 printf(" Max Volumes : %d\n", orom->vpa);
980 printf(" I/O Controller : %s\n", hba_path);
981
982 /* find the smallest scsi host number to determine a port number base */
983 dir = opendir(hba_path);
984 for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) {
985 int host;
986
987 if (sscanf(ent->d_name, "host%d", &host) != 1)
988 continue;
989 if (port_count == 0)
990 host_base = host;
991 else if (host < host_base)
992 host_base = host;
993
994 if (host + 1 > port_count + host_base)
995 port_count = host + 1 - host_base;
996
997 }
998 if (dir)
999 closedir(dir);
1000
1001 if (!port_count || imsm_enumerate_ports(hba_path, port_count,
1002 host_base, verbose) != 0) {
1003 if (verbose)
1004 fprintf(stderr, Name ": failed to enumerate ports\n");
1005 return 2;
1006 }
1007
1008 return 0;
1009 }
1010 #endif
1011
1012 static int match_home_imsm(struct supertype *st, char *homehost)
1013 {
1014 /* the imsm metadata format does not specify any host
1015 * identification information. We return -1 since we can never
1016 * confirm nor deny whether a given array is "meant" for this
1017 * host. We rely on compare_super and the 'family_num' field to
1018 * exclude member disks that do not belong, and we rely on
1019 * mdadm.conf to specify the arrays that should be assembled.
1020 * Auto-assembly may still pick up "foreign" arrays.
1021 */
1022
1023 return -1;
1024 }
1025
1026 static void uuid_from_super_imsm(struct supertype *st, int uuid[4])
1027 {
1028 /* The uuid returned here is used for:
1029 * uuid to put into bitmap file (Create, Grow)
1030 * uuid for backup header when saving critical section (Grow)
1031 * comparing uuids when re-adding a device into an array
1032 * In these cases the uuid required is that of the data-array,
1033 * not the device-set.
1034 * uuid to recognise same set when adding a missing device back
1035 * to an array. This is a uuid for the device-set.
1036 *
1037 * For each of these we can make do with a truncated
1038 * or hashed uuid rather than the original, as long as
1039 * everyone agrees.
1040 * In each case the uuid required is that of the data-array,
1041 * not the device-set.
1042 */
1043 /* imsm does not track uuid's so we synthesis one using sha1 on
1044 * - The signature (Which is constant for all imsm array, but no matter)
1045 * - the family_num of the container
1046 * - the index number of the volume
1047 * - the 'serial' number of the volume.
1048 * Hopefully these are all constant.
1049 */
1050 struct intel_super *super = st->sb;
1051
1052 char buf[20];
1053 struct sha1_ctx ctx;
1054 struct imsm_dev *dev = NULL;
1055
1056 sha1_init_ctx(&ctx);
1057 sha1_process_bytes(super->anchor->sig, MPB_SIG_LEN, &ctx);
1058 sha1_process_bytes(&super->anchor->family_num, sizeof(__u32), &ctx);
1059 if (super->current_vol >= 0)
1060 dev = get_imsm_dev(super, super->current_vol);
1061 if (dev) {
1062 __u32 vol = super->current_vol;
1063 sha1_process_bytes(&vol, sizeof(vol), &ctx);
1064 sha1_process_bytes(dev->volume, MAX_RAID_SERIAL_LEN, &ctx);
1065 }
1066 sha1_finish_ctx(&ctx, buf);
1067 memcpy(uuid, buf, 4*4);
1068 }
1069
1070 #if 0
1071 static void
1072 get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p)
1073 {
1074 __u8 *v = get_imsm_version(mpb);
1075 __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH;
1076 char major[] = { 0, 0, 0 };
1077 char minor[] = { 0 ,0, 0 };
1078 char patch[] = { 0, 0, 0 };
1079 char *ver_parse[] = { major, minor, patch };
1080 int i, j;
1081
1082 i = j = 0;
1083 while (*v != '\0' && v < end) {
1084 if (*v != '.' && j < 2)
1085 ver_parse[i][j++] = *v;
1086 else {
1087 i++;
1088 j = 0;
1089 }
1090 v++;
1091 }
1092
1093 *m = strtol(minor, NULL, 0);
1094 *p = strtol(patch, NULL, 0);
1095 }
1096 #endif
1097
1098 static int imsm_level_to_layout(int level)
1099 {
1100 switch (level) {
1101 case 0:
1102 case 1:
1103 return 0;
1104 case 5:
1105 case 6:
1106 return ALGORITHM_LEFT_ASYMMETRIC;
1107 case 10:
1108 return 0x102;
1109 }
1110 return UnSet;
1111 }
1112
1113 static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info)
1114 {
1115 struct intel_super *super = st->sb;
1116 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
1117 struct imsm_map *map = get_imsm_map(dev, 0);
1118
1119 info->container_member = super->current_vol;
1120 info->array.raid_disks = map->num_members;
1121 info->array.level = get_imsm_raid_level(map);
1122 info->array.layout = imsm_level_to_layout(info->array.level);
1123 info->array.md_minor = -1;
1124 info->array.ctime = 0;
1125 info->array.utime = 0;
1126 info->array.chunk_size = __le16_to_cpu(map->blocks_per_strip) << 9;
1127 info->array.state = !dev->vol.dirty;
1128
1129 info->disk.major = 0;
1130 info->disk.minor = 0;
1131
1132 info->data_offset = __le32_to_cpu(map->pba_of_lba0);
1133 info->component_size = __le32_to_cpu(map->blocks_per_member);
1134 memset(info->uuid, 0, sizeof(info->uuid));
1135
1136 if (map->map_state == IMSM_T_STATE_UNINITIALIZED || dev->vol.dirty)
1137 info->resync_start = 0;
1138 else if (dev->vol.migr_state)
1139 info->resync_start = __le32_to_cpu(dev->vol.curr_migr_unit);
1140 else
1141 info->resync_start = ~0ULL;
1142
1143 strncpy(info->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
1144 info->name[MAX_RAID_SERIAL_LEN] = 0;
1145
1146 info->array.major_version = -1;
1147 info->array.minor_version = -2;
1148 sprintf(info->text_version, "/%s/%d",
1149 devnum2devname(st->container_dev),
1150 info->container_member);
1151 info->safe_mode_delay = 4000; /* 4 secs like the Matrix driver */
1152 uuid_from_super_imsm(st, info->uuid);
1153 }
1154
1155 /* check the config file to see if we can return a real uuid for this spare */
1156 static void fixup_container_spare_uuid(struct mdinfo *inf)
1157 {
1158 struct mddev_ident_s *array_list;
1159
1160 if (inf->array.level != LEVEL_CONTAINER ||
1161 memcmp(inf->uuid, uuid_match_any, sizeof(int[4])) != 0)
1162 return;
1163
1164 array_list = conf_get_ident(NULL);
1165
1166 for (; array_list; array_list = array_list->next) {
1167 if (array_list->uuid_set) {
1168 struct supertype *_sst; /* spare supertype */
1169 struct supertype *_cst; /* container supertype */
1170
1171 _cst = array_list->st;
1172 _sst = _cst->ss->match_metadata_desc(inf->text_version);
1173 if (_sst) {
1174 memcpy(inf->uuid, array_list->uuid, sizeof(int[4]));
1175 free(_sst);
1176 break;
1177 }
1178 }
1179 }
1180 }
1181
1182 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info)
1183 {
1184 struct intel_super *super = st->sb;
1185 struct imsm_disk *disk;
1186 __u32 s;
1187
1188 if (super->current_vol >= 0) {
1189 getinfo_super_imsm_volume(st, info);
1190 return;
1191 }
1192
1193 /* Set raid_disks to zero so that Assemble will always pull in valid
1194 * spares
1195 */
1196 info->array.raid_disks = 0;
1197 info->array.level = LEVEL_CONTAINER;
1198 info->array.layout = 0;
1199 info->array.md_minor = -1;
1200 info->array.ctime = 0; /* N/A for imsm */
1201 info->array.utime = 0;
1202 info->array.chunk_size = 0;
1203
1204 info->disk.major = 0;
1205 info->disk.minor = 0;
1206 info->disk.raid_disk = -1;
1207 info->reshape_active = 0;
1208 info->array.major_version = -1;
1209 info->array.minor_version = -2;
1210 strcpy(info->text_version, "imsm");
1211 info->safe_mode_delay = 0;
1212 info->disk.number = -1;
1213 info->disk.state = 0;
1214 info->name[0] = 0;
1215
1216 if (super->disks) {
1217 __u32 reserved = imsm_reserved_sectors(super, super->disks);
1218
1219 disk = &super->disks->disk;
1220 info->data_offset = __le32_to_cpu(disk->total_blocks) - reserved;
1221 info->component_size = reserved;
1222 s = disk->status;
1223 info->disk.state = s & CONFIGURED_DISK ? (1 << MD_DISK_ACTIVE) : 0;
1224 info->disk.state |= s & SPARE_DISK ? 0 : (1 << MD_DISK_SYNC);
1225 if (s & FAILED_DISK || super->disks->index == -2) {
1226 info->disk.state |= 1 << MD_DISK_FAULTY;
1227 info->disk.raid_disk = -2;
1228 }
1229 }
1230
1231 /* only call uuid_from_super_imsm when this disk is part of a populated container,
1232 * ->compare_super may have updated the 'num_raid_devs' field for spares
1233 */
1234 if (info->disk.state & (1 << MD_DISK_SYNC) || super->anchor->num_raid_devs)
1235 uuid_from_super_imsm(st, info->uuid);
1236 else {
1237 memcpy(info->uuid, uuid_match_any, sizeof(int[4]));
1238 fixup_container_spare_uuid(info);
1239 }
1240 }
1241
1242 static int update_super_imsm(struct supertype *st, struct mdinfo *info,
1243 char *update, char *devname, int verbose,
1244 int uuid_set, char *homehost)
1245 {
1246 /* FIXME */
1247
1248 /* For 'assemble' and 'force' we need to return non-zero if any
1249 * change was made. For others, the return value is ignored.
1250 * Update options are:
1251 * force-one : This device looks a bit old but needs to be included,
1252 * update age info appropriately.
1253 * assemble: clear any 'faulty' flag to allow this device to
1254 * be assembled.
1255 * force-array: Array is degraded but being forced, mark it clean
1256 * if that will be needed to assemble it.
1257 *
1258 * newdev: not used ????
1259 * grow: Array has gained a new device - this is currently for
1260 * linear only
1261 * resync: mark as dirty so a resync will happen.
1262 * name: update the name - preserving the homehost
1263 *
1264 * Following are not relevant for this imsm:
1265 * sparc2.2 : update from old dodgey metadata
1266 * super-minor: change the preferred_minor number
1267 * summaries: update redundant counters.
1268 * uuid: Change the uuid of the array to match watch is given
1269 * homehost: update the recorded homehost
1270 * _reshape_progress: record new reshape_progress position.
1271 */
1272 int rv = 0;
1273 //struct intel_super *super = st->sb;
1274 //struct imsm_super *mpb = super->mpb;
1275
1276 if (strcmp(update, "grow") == 0) {
1277 }
1278 if (strcmp(update, "resync") == 0) {
1279 /* dev->vol.dirty = 1; */
1280 }
1281
1282 /* IMSM has no concept of UUID or homehost */
1283
1284 return rv;
1285 }
1286
1287 static size_t disks_to_mpb_size(int disks)
1288 {
1289 size_t size;
1290
1291 size = sizeof(struct imsm_super);
1292 size += (disks - 1) * sizeof(struct imsm_disk);
1293 size += 2 * sizeof(struct imsm_dev);
1294 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
1295 size += (4 - 2) * sizeof(struct imsm_map);
1296 /* 4 possible disk_ord_tbl's */
1297 size += 4 * (disks - 1) * sizeof(__u32);
1298
1299 return size;
1300 }
1301
1302 static __u64 avail_size_imsm(struct supertype *st, __u64 devsize)
1303 {
1304 if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS))
1305 return 0;
1306
1307 return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
1308 }
1309
1310 static void free_devlist(struct intel_super *super)
1311 {
1312 struct intel_dev *dv;
1313
1314 while (super->devlist) {
1315 dv = super->devlist->next;
1316 free(super->devlist->dev);
1317 free(super->devlist);
1318 super->devlist = dv;
1319 }
1320 }
1321
1322 static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src)
1323 {
1324 memcpy(dest, src, sizeof_imsm_dev(src, 0));
1325 }
1326
1327 static int compare_super_imsm(struct supertype *st, struct supertype *tst)
1328 {
1329 /*
1330 * return:
1331 * 0 same, or first was empty, and second was copied
1332 * 1 second had wrong number
1333 * 2 wrong uuid
1334 * 3 wrong other info
1335 */
1336 struct intel_super *first = st->sb;
1337 struct intel_super *sec = tst->sb;
1338
1339 if (!first) {
1340 st->sb = tst->sb;
1341 tst->sb = NULL;
1342 return 0;
1343 }
1344
1345 if (memcmp(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH) != 0)
1346 return 3;
1347
1348 /* if an anchor does not have num_raid_devs set then it is a free
1349 * floating spare
1350 */
1351 if (first->anchor->num_raid_devs > 0 &&
1352 sec->anchor->num_raid_devs > 0) {
1353 if (first->anchor->family_num != sec->anchor->family_num)
1354 return 3;
1355 }
1356
1357 /* if 'first' is a spare promote it to a populated mpb with sec's
1358 * family number
1359 */
1360 if (first->anchor->num_raid_devs == 0 &&
1361 sec->anchor->num_raid_devs > 0) {
1362 int i;
1363 struct intel_dev *dv;
1364 struct imsm_dev *dev;
1365
1366 /* we need to copy raid device info from sec if an allocation
1367 * fails here we don't associate the spare
1368 */
1369 for (i = 0; i < sec->anchor->num_raid_devs; i++) {
1370 dv = malloc(sizeof(*dv));
1371 if (!dv)
1372 break;
1373 dev = malloc(sizeof_imsm_dev(get_imsm_dev(sec, i), 1));
1374 if (!dev) {
1375 free(dv);
1376 break;
1377 }
1378 dv->dev = dev;
1379 dv->index = i;
1380 dv->next = first->devlist;
1381 first->devlist = dv;
1382 }
1383 if (i <= sec->anchor->num_raid_devs) {
1384 /* allocation failure */
1385 free_devlist(first);
1386 fprintf(stderr, "imsm: failed to associate spare\n");
1387 return 3;
1388 }
1389 for (i = 0; i < sec->anchor->num_raid_devs; i++)
1390 imsm_copy_dev(get_imsm_dev(first, i), get_imsm_dev(sec, i));
1391
1392 first->anchor->num_raid_devs = sec->anchor->num_raid_devs;
1393 first->anchor->family_num = sec->anchor->family_num;
1394 }
1395
1396 return 0;
1397 }
1398
1399 static void fd2devname(int fd, char *name)
1400 {
1401 struct stat st;
1402 char path[256];
1403 char dname[100];
1404 char *nm;
1405 int rv;
1406
1407 name[0] = '\0';
1408 if (fstat(fd, &st) != 0)
1409 return;
1410 sprintf(path, "/sys/dev/block/%d:%d",
1411 major(st.st_rdev), minor(st.st_rdev));
1412
1413 rv = readlink(path, dname, sizeof(dname));
1414 if (rv <= 0)
1415 return;
1416
1417 dname[rv] = '\0';
1418 nm = strrchr(dname, '/');
1419 nm++;
1420 snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
1421 }
1422
1423
1424 extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
1425
1426 static int imsm_read_serial(int fd, char *devname,
1427 __u8 serial[MAX_RAID_SERIAL_LEN])
1428 {
1429 unsigned char scsi_serial[255];
1430 int rv;
1431 int rsp_len;
1432 int len;
1433 char *c, *rsp_buf;
1434
1435 memset(scsi_serial, 0, sizeof(scsi_serial));
1436
1437 rv = scsi_get_serial(fd, scsi_serial, sizeof(scsi_serial));
1438
1439 if (rv && check_env("IMSM_DEVNAME_AS_SERIAL")) {
1440 memset(serial, 0, MAX_RAID_SERIAL_LEN);
1441 fd2devname(fd, (char *) serial);
1442 return 0;
1443 }
1444
1445 if (rv != 0) {
1446 if (devname)
1447 fprintf(stderr,
1448 Name ": Failed to retrieve serial for %s\n",
1449 devname);
1450 return rv;
1451 }
1452
1453 /* trim leading whitespace */
1454 rsp_len = scsi_serial[3];
1455 if (!rsp_len) {
1456 if (devname)
1457 fprintf(stderr,
1458 Name ": Failed to retrieve serial for %s\n",
1459 devname);
1460 return 2;
1461 }
1462 rsp_buf = (char *) &scsi_serial[4];
1463 c = rsp_buf;
1464 while (isspace(*c))
1465 c++;
1466
1467 /* truncate len to the end of rsp_buf if necessary */
1468 if (c + MAX_RAID_SERIAL_LEN > rsp_buf + rsp_len)
1469 len = rsp_len - (c - rsp_buf);
1470 else
1471 len = MAX_RAID_SERIAL_LEN;
1472
1473 /* initialize the buffer and copy rsp_buf characters */
1474 memset(serial, 0, MAX_RAID_SERIAL_LEN);
1475 memcpy(serial, c, len);
1476
1477 /* trim trailing whitespace starting with the last character copied */
1478 c = (char *) &serial[len - 1];
1479 while (isspace(*c) || *c == '\0')
1480 *c-- = '\0';
1481
1482 return 0;
1483 }
1484
1485 static int serialcmp(__u8 *s1, __u8 *s2)
1486 {
1487 return strncmp((char *) s1, (char *) s2, MAX_RAID_SERIAL_LEN);
1488 }
1489
1490 static void serialcpy(__u8 *dest, __u8 *src)
1491 {
1492 strncpy((char *) dest, (char *) src, MAX_RAID_SERIAL_LEN);
1493 }
1494
1495 static struct dl *serial_to_dl(__u8 *serial, struct intel_super *super)
1496 {
1497 struct dl *dl;
1498
1499 for (dl = super->disks; dl; dl = dl->next)
1500 if (serialcmp(dl->serial, serial) == 0)
1501 break;
1502
1503 return dl;
1504 }
1505
1506 static int
1507 load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
1508 {
1509 struct dl *dl;
1510 struct stat stb;
1511 int rv;
1512 int i;
1513 int alloc = 1;
1514 __u8 serial[MAX_RAID_SERIAL_LEN];
1515
1516 rv = imsm_read_serial(fd, devname, serial);
1517
1518 if (rv != 0)
1519 return 2;
1520
1521 /* check if this is a disk we have seen before. it may be a spare in
1522 * super->disks while the current anchor believes it is a raid member,
1523 * check if we need to update dl->index
1524 */
1525 dl = serial_to_dl(serial, super);
1526 if (!dl)
1527 dl = malloc(sizeof(*dl));
1528 else
1529 alloc = 0;
1530
1531 if (!dl) {
1532 if (devname)
1533 fprintf(stderr,
1534 Name ": failed to allocate disk buffer for %s\n",
1535 devname);
1536 return 2;
1537 }
1538
1539 if (alloc) {
1540 fstat(fd, &stb);
1541 dl->major = major(stb.st_rdev);
1542 dl->minor = minor(stb.st_rdev);
1543 dl->next = super->disks;
1544 dl->fd = keep_fd ? fd : -1;
1545 dl->devname = devname ? strdup(devname) : NULL;
1546 serialcpy(dl->serial, serial);
1547 dl->index = -2;
1548 dl->e = NULL;
1549 } else if (keep_fd) {
1550 close(dl->fd);
1551 dl->fd = fd;
1552 }
1553
1554 /* look up this disk's index in the current anchor */
1555 for (i = 0; i < super->anchor->num_disks; i++) {
1556 struct imsm_disk *disk_iter;
1557
1558 disk_iter = __get_imsm_disk(super->anchor, i);
1559
1560 if (serialcmp(disk_iter->serial, dl->serial) == 0) {
1561 dl->disk = *disk_iter;
1562 /* only set index on disks that are a member of a
1563 * populated contianer, i.e. one with raid_devs
1564 */
1565 if (dl->disk.status & FAILED_DISK)
1566 dl->index = -2;
1567 else if (dl->disk.status & SPARE_DISK)
1568 dl->index = -1;
1569 else
1570 dl->index = i;
1571
1572 break;
1573 }
1574 }
1575
1576 /* no match, maybe a stale failed drive */
1577 if (i == super->anchor->num_disks && dl->index >= 0) {
1578 dl->disk = *__get_imsm_disk(super->anchor, dl->index);
1579 if (dl->disk.status & FAILED_DISK)
1580 dl->index = -2;
1581 }
1582
1583 if (alloc)
1584 super->disks = dl;
1585
1586 return 0;
1587 }
1588
1589 #ifndef MDASSEMBLE
1590 /* When migrating map0 contains the 'destination' state while map1
1591 * contains the current state. When not migrating map0 contains the
1592 * current state. This routine assumes that map[0].map_state is set to
1593 * the current array state before being called.
1594 *
1595 * Migration is indicated by one of the following states
1596 * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed)
1597 * 2/ Initialize (migr_state=1 migr_type=MIGR_INIT map0state=normal
1598 * map1state=unitialized)
1599 * 3/ Verify (Resync) (migr_state=1 migr_type=MIGR_REBUILD map0state=normal
1600 * map1state=normal)
1601 * 4/ Rebuild (migr_state=1 migr_type=MIGR_REBUILD map0state=normal
1602 * map1state=degraded)
1603 */
1604 static void migrate(struct imsm_dev *dev, __u8 to_state, int rebuild_resync)
1605 {
1606 struct imsm_map *dest;
1607 struct imsm_map *src = get_imsm_map(dev, 0);
1608
1609 dev->vol.migr_state = 1;
1610 dev->vol.migr_type = rebuild_resync;
1611 dev->vol.curr_migr_unit = 0;
1612 dest = get_imsm_map(dev, 1);
1613
1614 memcpy(dest, src, sizeof_imsm_map(src));
1615 src->map_state = to_state;
1616 }
1617
1618 static void end_migration(struct imsm_dev *dev, __u8 map_state)
1619 {
1620 struct imsm_map *map = get_imsm_map(dev, 0);
1621
1622 dev->vol.migr_state = 0;
1623 dev->vol.curr_migr_unit = 0;
1624 map->map_state = map_state;
1625 }
1626 #endif
1627
1628 static int parse_raid_devices(struct intel_super *super)
1629 {
1630 int i;
1631 struct imsm_dev *dev_new;
1632 size_t len, len_migr;
1633 size_t space_needed = 0;
1634 struct imsm_super *mpb = super->anchor;
1635
1636 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1637 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
1638 struct intel_dev *dv;
1639
1640 len = sizeof_imsm_dev(dev_iter, 0);
1641 len_migr = sizeof_imsm_dev(dev_iter, 1);
1642 if (len_migr > len)
1643 space_needed += len_migr - len;
1644
1645 dv = malloc(sizeof(*dv));
1646 if (!dv)
1647 return 1;
1648 dev_new = malloc(len_migr);
1649 if (!dev_new) {
1650 free(dv);
1651 return 1;
1652 }
1653 imsm_copy_dev(dev_new, dev_iter);
1654 dv->dev = dev_new;
1655 dv->index = i;
1656 dv->next = super->devlist;
1657 super->devlist = dv;
1658 }
1659
1660 /* ensure that super->buf is large enough when all raid devices
1661 * are migrating
1662 */
1663 if (__le32_to_cpu(mpb->mpb_size) + space_needed > super->len) {
1664 void *buf;
1665
1666 len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + space_needed, 512);
1667 if (posix_memalign(&buf, 512, len) != 0)
1668 return 1;
1669
1670 memcpy(buf, super->buf, len);
1671 free(super->buf);
1672 super->buf = buf;
1673 super->len = len;
1674 }
1675
1676 return 0;
1677 }
1678
1679 /* retrieve a pointer to the bbm log which starts after all raid devices */
1680 struct bbm_log *__get_imsm_bbm_log(struct imsm_super *mpb)
1681 {
1682 void *ptr = NULL;
1683
1684 if (__le32_to_cpu(mpb->bbm_log_size)) {
1685 ptr = mpb;
1686 ptr += mpb->mpb_size - __le32_to_cpu(mpb->bbm_log_size);
1687 }
1688
1689 return ptr;
1690 }
1691
1692 static void __free_imsm(struct intel_super *super, int free_disks);
1693
1694 /* load_imsm_mpb - read matrix metadata
1695 * allocates super->mpb to be freed by free_super
1696 */
1697 static int load_imsm_mpb(int fd, struct intel_super *super, char *devname)
1698 {
1699 unsigned long long dsize;
1700 unsigned long long sectors;
1701 struct stat;
1702 struct imsm_super *anchor;
1703 __u32 check_sum;
1704 int rc;
1705
1706 get_dev_size(fd, NULL, &dsize);
1707
1708 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) {
1709 if (devname)
1710 fprintf(stderr,
1711 Name ": Cannot seek to anchor block on %s: %s\n",
1712 devname, strerror(errno));
1713 return 1;
1714 }
1715
1716 if (posix_memalign((void**)&anchor, 512, 512) != 0) {
1717 if (devname)
1718 fprintf(stderr,
1719 Name ": Failed to allocate imsm anchor buffer"
1720 " on %s\n", devname);
1721 return 1;
1722 }
1723 if (read(fd, anchor, 512) != 512) {
1724 if (devname)
1725 fprintf(stderr,
1726 Name ": Cannot read anchor block on %s: %s\n",
1727 devname, strerror(errno));
1728 free(anchor);
1729 return 1;
1730 }
1731
1732 if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) {
1733 if (devname)
1734 fprintf(stderr,
1735 Name ": no IMSM anchor on %s\n", devname);
1736 free(anchor);
1737 return 2;
1738 }
1739
1740 __free_imsm(super, 0);
1741 super->len = ROUND_UP(anchor->mpb_size, 512);
1742 if (posix_memalign(&super->buf, 512, super->len) != 0) {
1743 if (devname)
1744 fprintf(stderr,
1745 Name ": unable to allocate %zu byte mpb buffer\n",
1746 super->len);
1747 free(anchor);
1748 return 2;
1749 }
1750 memcpy(super->buf, anchor, 512);
1751
1752 sectors = mpb_sectors(anchor) - 1;
1753 free(anchor);
1754 if (!sectors) {
1755 rc = load_imsm_disk(fd, super, devname, 0);
1756 if (rc == 0)
1757 rc = parse_raid_devices(super);
1758 return rc;
1759 }
1760
1761 /* read the extended mpb */
1762 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0) {
1763 if (devname)
1764 fprintf(stderr,
1765 Name ": Cannot seek to extended mpb on %s: %s\n",
1766 devname, strerror(errno));
1767 return 1;
1768 }
1769
1770 if (read(fd, super->buf + 512, super->len - 512) != super->len - 512) {
1771 if (devname)
1772 fprintf(stderr,
1773 Name ": Cannot read extended mpb on %s: %s\n",
1774 devname, strerror(errno));
1775 return 2;
1776 }
1777
1778 check_sum = __gen_imsm_checksum(super->anchor);
1779 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
1780 if (devname)
1781 fprintf(stderr,
1782 Name ": IMSM checksum %x != %x on %s\n",
1783 check_sum, __le32_to_cpu(super->anchor->check_sum),
1784 devname);
1785 return 2;
1786 }
1787
1788 /* FIXME the BBM log is disk specific so we cannot use this global
1789 * buffer for all disks. Ok for now since we only look at the global
1790 * bbm_log_size parameter to gate assembly
1791 */
1792 super->bbm_log = __get_imsm_bbm_log(super->anchor);
1793
1794 rc = load_imsm_disk(fd, super, devname, 0);
1795 if (rc == 0)
1796 rc = parse_raid_devices(super);
1797
1798 return rc;
1799 }
1800
1801 static void __free_imsm_disk(struct dl *d)
1802 {
1803 if (d->fd >= 0)
1804 close(d->fd);
1805 if (d->devname)
1806 free(d->devname);
1807 if (d->e)
1808 free(d->e);
1809 free(d);
1810
1811 }
1812 static void free_imsm_disks(struct intel_super *super)
1813 {
1814 struct dl *d;
1815
1816 while (super->disks) {
1817 d = super->disks;
1818 super->disks = d->next;
1819 __free_imsm_disk(d);
1820 }
1821 while (super->missing) {
1822 d = super->missing;
1823 super->missing = d->next;
1824 __free_imsm_disk(d);
1825 }
1826
1827 }
1828
1829 /* free all the pieces hanging off of a super pointer */
1830 static void __free_imsm(struct intel_super *super, int free_disks)
1831 {
1832 if (super->buf) {
1833 free(super->buf);
1834 super->buf = NULL;
1835 }
1836 if (free_disks)
1837 free_imsm_disks(super);
1838 free_devlist(super);
1839 if (super->hba) {
1840 free((void *) super->hba);
1841 super->hba = NULL;
1842 }
1843 }
1844
1845 static void free_imsm(struct intel_super *super)
1846 {
1847 __free_imsm(super, 1);
1848 free(super);
1849 }
1850
1851 static void free_super_imsm(struct supertype *st)
1852 {
1853 struct intel_super *super = st->sb;
1854
1855 if (!super)
1856 return;
1857
1858 free_imsm(super);
1859 st->sb = NULL;
1860 }
1861
1862 static struct intel_super *alloc_super(int creating_imsm)
1863 {
1864 struct intel_super *super = malloc(sizeof(*super));
1865
1866 if (super) {
1867 memset(super, 0, sizeof(*super));
1868 super->creating_imsm = creating_imsm;
1869 super->current_vol = -1;
1870 super->create_offset = ~((__u32 ) 0);
1871 if (!check_env("IMSM_NO_PLATFORM"))
1872 super->orom = find_imsm_orom();
1873 if (super->orom && !check_env("IMSM_TEST_OROM")) {
1874 struct sys_dev *list, *ent;
1875
1876 /* find the first intel ahci controller */
1877 list = find_driver_devices("pci", "ahci");
1878 for (ent = list; ent; ent = ent->next)
1879 if (devpath_to_vendor(ent->path) == 0x8086)
1880 break;
1881 if (ent) {
1882 super->hba = ent->path;
1883 ent->path = NULL;
1884 }
1885 free_sys_dev(&list);
1886 }
1887 }
1888
1889 return super;
1890 }
1891
1892 #ifndef MDASSEMBLE
1893 /* find_missing - helper routine for load_super_imsm_all that identifies
1894 * disks that have disappeared from the system. This routine relies on
1895 * the mpb being uptodate, which it is at load time.
1896 */
1897 static int find_missing(struct intel_super *super)
1898 {
1899 int i;
1900 struct imsm_super *mpb = super->anchor;
1901 struct dl *dl;
1902 struct imsm_disk *disk;
1903
1904 for (i = 0; i < mpb->num_disks; i++) {
1905 disk = __get_imsm_disk(mpb, i);
1906 dl = serial_to_dl(disk->serial, super);
1907 if (dl)
1908 continue;
1909 /* ok we have a 'disk' without a live entry in
1910 * super->disks
1911 */
1912 if (disk->status & FAILED_DISK || !(disk->status & USABLE_DISK))
1913 continue; /* never mind, already marked */
1914
1915 dl = malloc(sizeof(*dl));
1916 if (!dl)
1917 return 1;
1918 dl->major = 0;
1919 dl->minor = 0;
1920 dl->fd = -1;
1921 dl->devname = strdup("missing");
1922 dl->index = i;
1923 serialcpy(dl->serial, disk->serial);
1924 dl->disk = *disk;
1925 dl->e = NULL;
1926 dl->next = super->missing;
1927 super->missing = dl;
1928 }
1929
1930 return 0;
1931 }
1932
1933 static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
1934 char *devname, int keep_fd)
1935 {
1936 struct mdinfo *sra;
1937 struct intel_super *super;
1938 struct mdinfo *sd, *best = NULL;
1939 __u32 bestgen = 0;
1940 __u32 gen;
1941 char nm[20];
1942 int dfd;
1943 int rv;
1944
1945 /* check if this disk is a member of an active array */
1946 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
1947 if (!sra)
1948 return 1;
1949
1950 if (sra->array.major_version != -1 ||
1951 sra->array.minor_version != -2 ||
1952 strcmp(sra->text_version, "imsm") != 0)
1953 return 1;
1954
1955 super = alloc_super(0);
1956 if (!super)
1957 return 1;
1958
1959 /* find the most up to date disk in this array, skipping spares */
1960 for (sd = sra->devs; sd; sd = sd->next) {
1961 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
1962 dfd = dev_open(nm, keep_fd ? O_RDWR : O_RDONLY);
1963 if (dfd < 0) {
1964 free_imsm(super);
1965 return 2;
1966 }
1967 rv = load_imsm_mpb(dfd, super, NULL);
1968 if (!keep_fd)
1969 close(dfd);
1970 if (rv == 0) {
1971 if (super->anchor->num_raid_devs == 0)
1972 gen = 0;
1973 else
1974 gen = __le32_to_cpu(super->anchor->generation_num);
1975 if (!best || gen > bestgen) {
1976 bestgen = gen;
1977 best = sd;
1978 }
1979 } else {
1980 free_imsm(super);
1981 return 2;
1982 }
1983 }
1984
1985 if (!best) {
1986 free_imsm(super);
1987 return 1;
1988 }
1989
1990 /* load the most up to date anchor */
1991 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
1992 dfd = dev_open(nm, O_RDONLY);
1993 if (dfd < 0) {
1994 free_imsm(super);
1995 return 1;
1996 }
1997 rv = load_imsm_mpb(dfd, super, NULL);
1998 close(dfd);
1999 if (rv != 0) {
2000 free_imsm(super);
2001 return 2;
2002 }
2003
2004 /* re-parse the disk list with the current anchor */
2005 for (sd = sra->devs ; sd ; sd = sd->next) {
2006 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
2007 dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
2008 if (dfd < 0) {
2009 free_imsm(super);
2010 return 2;
2011 }
2012 load_imsm_disk(dfd, super, NULL, keep_fd);
2013 if (!keep_fd)
2014 close(dfd);
2015 }
2016
2017
2018 if (find_missing(super) != 0) {
2019 free_imsm(super);
2020 return 2;
2021 }
2022
2023 if (st->subarray[0]) {
2024 if (atoi(st->subarray) <= super->anchor->num_raid_devs)
2025 super->current_vol = atoi(st->subarray);
2026 else
2027 return 1;
2028 }
2029
2030 *sbp = super;
2031 st->container_dev = fd2devnum(fd);
2032 if (st->ss == NULL) {
2033 st->ss = &super_imsm;
2034 st->minor_version = 0;
2035 st->max_devs = IMSM_MAX_DEVICES;
2036 }
2037 st->loaded_container = 1;
2038
2039 return 0;
2040 }
2041 #endif
2042
2043 static int load_super_imsm(struct supertype *st, int fd, char *devname)
2044 {
2045 struct intel_super *super;
2046 int rv;
2047
2048 #ifndef MDASSEMBLE
2049 if (load_super_imsm_all(st, fd, &st->sb, devname, 1) == 0)
2050 return 0;
2051 #endif
2052 if (st->subarray[0])
2053 return 1; /* FIXME */
2054
2055 super = alloc_super(0);
2056 if (!super) {
2057 fprintf(stderr,
2058 Name ": malloc of %zu failed.\n",
2059 sizeof(*super));
2060 return 1;
2061 }
2062
2063 rv = load_imsm_mpb(fd, super, devname);
2064
2065 if (rv) {
2066 if (devname)
2067 fprintf(stderr,
2068 Name ": Failed to load all information "
2069 "sections on %s\n", devname);
2070 free_imsm(super);
2071 return rv;
2072 }
2073
2074 st->sb = super;
2075 if (st->ss == NULL) {
2076 st->ss = &super_imsm;
2077 st->minor_version = 0;
2078 st->max_devs = IMSM_MAX_DEVICES;
2079 }
2080 st->loaded_container = 0;
2081
2082 return 0;
2083 }
2084
2085 static __u16 info_to_blocks_per_strip(mdu_array_info_t *info)
2086 {
2087 if (info->level == 1)
2088 return 128;
2089 return info->chunk_size >> 9;
2090 }
2091
2092 static __u32 info_to_num_data_stripes(mdu_array_info_t *info)
2093 {
2094 __u32 num_stripes;
2095
2096 num_stripes = (info->size * 2) / info_to_blocks_per_strip(info);
2097 if (info->level == 1)
2098 num_stripes /= 2;
2099
2100 return num_stripes;
2101 }
2102
2103 static __u32 info_to_blocks_per_member(mdu_array_info_t *info)
2104 {
2105 if (info->level == 1)
2106 return info->size * 2;
2107 else
2108 return (info->size * 2) & ~(info_to_blocks_per_strip(info) - 1);
2109 }
2110
2111 static void imsm_update_version_info(struct intel_super *super)
2112 {
2113 /* update the version and attributes */
2114 struct imsm_super *mpb = super->anchor;
2115 char *version;
2116 struct imsm_dev *dev;
2117 struct imsm_map *map;
2118 int i;
2119
2120 for (i = 0; i < mpb->num_raid_devs; i++) {
2121 dev = get_imsm_dev(super, i);
2122 map = get_imsm_map(dev, 0);
2123 if (__le32_to_cpu(dev->size_high) > 0)
2124 mpb->attributes |= MPB_ATTRIB_2TB;
2125
2126 /* FIXME detect when an array spans a port multiplier */
2127 #if 0
2128 mpb->attributes |= MPB_ATTRIB_PM;
2129 #endif
2130
2131 if (mpb->num_raid_devs > 1 ||
2132 mpb->attributes != MPB_ATTRIB_CHECKSUM_VERIFY) {
2133 version = MPB_VERSION_ATTRIBS;
2134 switch (get_imsm_raid_level(map)) {
2135 case 0: mpb->attributes |= MPB_ATTRIB_RAID0; break;
2136 case 1: mpb->attributes |= MPB_ATTRIB_RAID1; break;
2137 case 10: mpb->attributes |= MPB_ATTRIB_RAID10; break;
2138 case 5: mpb->attributes |= MPB_ATTRIB_RAID5; break;
2139 }
2140 } else {
2141 if (map->num_members >= 5)
2142 version = MPB_VERSION_5OR6_DISK_ARRAY;
2143 else if (dev->status == DEV_CLONE_N_GO)
2144 version = MPB_VERSION_CNG;
2145 else if (get_imsm_raid_level(map) == 5)
2146 version = MPB_VERSION_RAID5;
2147 else if (map->num_members >= 3)
2148 version = MPB_VERSION_3OR4_DISK_ARRAY;
2149 else if (get_imsm_raid_level(map) == 1)
2150 version = MPB_VERSION_RAID1;
2151 else
2152 version = MPB_VERSION_RAID0;
2153 }
2154 strcpy(((char *) mpb->sig) + strlen(MPB_SIGNATURE), version);
2155 }
2156 }
2157
2158 static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
2159 unsigned long long size, char *name,
2160 char *homehost, int *uuid)
2161 {
2162 /* We are creating a volume inside a pre-existing container.
2163 * so st->sb is already set.
2164 */
2165 struct intel_super *super = st->sb;
2166 struct imsm_super *mpb = super->anchor;
2167 struct intel_dev *dv;
2168 struct imsm_dev *dev;
2169 struct imsm_vol *vol;
2170 struct imsm_map *map;
2171 int idx = mpb->num_raid_devs;
2172 int i;
2173 unsigned long long array_blocks;
2174 size_t size_old, size_new;
2175
2176 if (super->orom && mpb->num_raid_devs >= super->orom->vpa) {
2177 fprintf(stderr, Name": This imsm-container already has the "
2178 "maximum of %d volumes\n", super->orom->vpa);
2179 return 0;
2180 }
2181
2182 /* ensure the mpb is large enough for the new data */
2183 size_old = __le32_to_cpu(mpb->mpb_size);
2184 size_new = disks_to_mpb_size(info->nr_disks);
2185 if (size_new > size_old) {
2186 void *mpb_new;
2187 size_t size_round = ROUND_UP(size_new, 512);
2188
2189 if (posix_memalign(&mpb_new, 512, size_round) != 0) {
2190 fprintf(stderr, Name": could not allocate new mpb\n");
2191 return 0;
2192 }
2193 memcpy(mpb_new, mpb, size_old);
2194 free(mpb);
2195 mpb = mpb_new;
2196 super->anchor = mpb_new;
2197 mpb->mpb_size = __cpu_to_le32(size_new);
2198 memset(mpb_new + size_old, 0, size_round - size_old);
2199 }
2200 super->current_vol = idx;
2201 /* when creating the first raid device in this container set num_disks
2202 * to zero, i.e. delete this spare and add raid member devices in
2203 * add_to_super_imsm_volume()
2204 */
2205 if (super->current_vol == 0)
2206 mpb->num_disks = 0;
2207
2208 for (i = 0; i < super->current_vol; i++) {
2209 dev = get_imsm_dev(super, i);
2210 if (strncmp((char *) dev->volume, name,
2211 MAX_RAID_SERIAL_LEN) == 0) {
2212 fprintf(stderr, Name": '%s' is already defined for this container\n",
2213 name);
2214 return 0;
2215 }
2216 }
2217
2218 sprintf(st->subarray, "%d", idx);
2219 dv = malloc(sizeof(*dv));
2220 if (!dv) {
2221 fprintf(stderr, Name ": failed to allocate device list entry\n");
2222 return 0;
2223 }
2224 dev = malloc(sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
2225 if (!dev) {
2226 free(dv);
2227 fprintf(stderr, Name": could not allocate raid device\n");
2228 return 0;
2229 }
2230 strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
2231 if (info->level == 1)
2232 array_blocks = info_to_blocks_per_member(info);
2233 else
2234 array_blocks = calc_array_size(info->level, info->raid_disks,
2235 info->layout, info->chunk_size,
2236 info->size*2);
2237 dev->size_low = __cpu_to_le32((__u32) array_blocks);
2238 dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
2239 dev->status = __cpu_to_le32(0);
2240 dev->reserved_blocks = __cpu_to_le32(0);
2241 vol = &dev->vol;
2242 vol->migr_state = 0;
2243 vol->migr_type = MIGR_INIT;
2244 vol->dirty = 0;
2245 vol->curr_migr_unit = 0;
2246 map = get_imsm_map(dev, 0);
2247 map->pba_of_lba0 = __cpu_to_le32(super->create_offset);
2248 map->blocks_per_member = __cpu_to_le32(info_to_blocks_per_member(info));
2249 map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
2250 map->num_data_stripes = __cpu_to_le32(info_to_num_data_stripes(info));
2251 map->map_state = info->level ? IMSM_T_STATE_UNINITIALIZED :
2252 IMSM_T_STATE_NORMAL;
2253
2254 if (info->level == 1 && info->raid_disks > 2) {
2255 fprintf(stderr, Name": imsm does not support more than 2 disks"
2256 "in a raid1 volume\n");
2257 return 0;
2258 }
2259 if (info->level == 10) {
2260 map->raid_level = 1;
2261 map->num_domains = info->raid_disks / 2;
2262 } else {
2263 map->raid_level = info->level;
2264 map->num_domains = !!map->raid_level;
2265 }
2266
2267 map->num_members = info->raid_disks;
2268 for (i = 0; i < map->num_members; i++) {
2269 /* initialized in add_to_super */
2270 set_imsm_ord_tbl_ent(map, i, 0);
2271 }
2272 mpb->num_raid_devs++;
2273
2274 dv->dev = dev;
2275 dv->index = super->current_vol;
2276 dv->next = super->devlist;
2277 super->devlist = dv;
2278
2279 imsm_update_version_info(super);
2280
2281 return 1;
2282 }
2283
2284 static int init_super_imsm(struct supertype *st, mdu_array_info_t *info,
2285 unsigned long long size, char *name,
2286 char *homehost, int *uuid)
2287 {
2288 /* This is primarily called by Create when creating a new array.
2289 * We will then get add_to_super called for each component, and then
2290 * write_init_super called to write it out to each device.
2291 * For IMSM, Create can create on fresh devices or on a pre-existing
2292 * array.
2293 * To create on a pre-existing array a different method will be called.
2294 * This one is just for fresh drives.
2295 */
2296 struct intel_super *super;
2297 struct imsm_super *mpb;
2298 size_t mpb_size;
2299 char *version;
2300
2301 if (!info) {
2302 st->sb = NULL;
2303 return 0;
2304 }
2305 if (st->sb)
2306 return init_super_imsm_volume(st, info, size, name, homehost,
2307 uuid);
2308
2309 super = alloc_super(1);
2310 if (!super)
2311 return 0;
2312 mpb_size = disks_to_mpb_size(info->nr_disks);
2313 if (posix_memalign(&super->buf, 512, mpb_size) != 0) {
2314 free(super);
2315 return 0;
2316 }
2317 mpb = super->buf;
2318 memset(mpb, 0, mpb_size);
2319
2320 mpb->attributes = MPB_ATTRIB_CHECKSUM_VERIFY;
2321
2322 version = (char *) mpb->sig;
2323 strcpy(version, MPB_SIGNATURE);
2324 version += strlen(MPB_SIGNATURE);
2325 strcpy(version, MPB_VERSION_RAID0);
2326 mpb->mpb_size = mpb_size;
2327
2328 st->sb = super;
2329 return 1;
2330 }
2331
2332 #ifndef MDASSEMBLE
2333 static int add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
2334 int fd, char *devname)
2335 {
2336 struct intel_super *super = st->sb;
2337 struct imsm_super *mpb = super->anchor;
2338 struct dl *dl;
2339 struct imsm_dev *dev;
2340 struct imsm_map *map;
2341
2342 dev = get_imsm_dev(super, super->current_vol);
2343 map = get_imsm_map(dev, 0);
2344
2345 if (! (dk->state & (1<<MD_DISK_SYNC))) {
2346 fprintf(stderr, Name ": %s: Cannot add spare devices to IMSM volume\n",
2347 devname);
2348 return 1;
2349 }
2350
2351 for (dl = super->disks; dl ; dl = dl->next)
2352 if (dl->major == dk->major &&
2353 dl->minor == dk->minor)
2354 break;
2355
2356 if (!dl) {
2357 fprintf(stderr, Name ": %s is not a member of the same container\n", devname);
2358 return 1;
2359 }
2360
2361 /* add a pristine spare to the metadata */
2362 if (dl->index < 0) {
2363 dl->index = super->anchor->num_disks;
2364 super->anchor->num_disks++;
2365 }
2366 set_imsm_ord_tbl_ent(map, dk->number, dl->index);
2367 dl->disk.status = CONFIGURED_DISK | USABLE_DISK;
2368
2369 /* if we are creating the first raid device update the family number */
2370 if (super->current_vol == 0) {
2371 __u32 sum;
2372 struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
2373 struct imsm_disk *_disk = __get_imsm_disk(mpb, dl->index);
2374
2375 *_dev = *dev;
2376 *_disk = dl->disk;
2377 sum = __gen_imsm_checksum(mpb);
2378 mpb->family_num = __cpu_to_le32(sum);
2379 }
2380
2381 return 0;
2382 }
2383
2384 static int add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
2385 int fd, char *devname)
2386 {
2387 struct intel_super *super = st->sb;
2388 struct dl *dd;
2389 unsigned long long size;
2390 __u32 id;
2391 int rv;
2392 struct stat stb;
2393
2394 /* if we are on an RAID enabled platform check that the disk is
2395 * attached to the raid controller
2396 */
2397 if (super->hba && !disk_attached_to_hba(fd, super->hba)) {
2398 fprintf(stderr,
2399 Name ": %s is not attached to the raid controller: %s\n",
2400 devname ? : "disk", super->hba);
2401 return 1;
2402 }
2403
2404 if (super->current_vol >= 0)
2405 return add_to_super_imsm_volume(st, dk, fd, devname);
2406
2407 fstat(fd, &stb);
2408 dd = malloc(sizeof(*dd));
2409 if (!dd) {
2410 fprintf(stderr,
2411 Name ": malloc failed %s:%d.\n", __func__, __LINE__);
2412 return 1;
2413 }
2414 memset(dd, 0, sizeof(*dd));
2415 dd->major = major(stb.st_rdev);
2416 dd->minor = minor(stb.st_rdev);
2417 dd->index = -1;
2418 dd->devname = devname ? strdup(devname) : NULL;
2419 dd->fd = fd;
2420 dd->e = NULL;
2421 rv = imsm_read_serial(fd, devname, dd->serial);
2422 if (rv) {
2423 fprintf(stderr,
2424 Name ": failed to retrieve scsi serial, aborting\n");
2425 free(dd);
2426 abort();
2427 }
2428
2429 get_dev_size(fd, NULL, &size);
2430 size /= 512;
2431 serialcpy(dd->disk.serial, dd->serial);
2432 dd->disk.total_blocks = __cpu_to_le32(size);
2433 dd->disk.status = USABLE_DISK | SPARE_DISK;
2434 if (sysfs_disk_to_scsi_id(fd, &id) == 0)
2435 dd->disk.scsi_id = __cpu_to_le32(id);
2436 else
2437 dd->disk.scsi_id = __cpu_to_le32(0);
2438
2439 if (st->update_tail) {
2440 dd->next = super->add;
2441 super->add = dd;
2442 } else {
2443 dd->next = super->disks;
2444 super->disks = dd;
2445 }
2446
2447 return 0;
2448 }
2449
2450 static int store_imsm_mpb(int fd, struct intel_super *super);
2451
2452 /* spare records have their own family number and do not have any defined raid
2453 * devices
2454 */
2455 static int write_super_imsm_spares(struct intel_super *super, int doclose)
2456 {
2457 struct imsm_super mpb_save;
2458 struct imsm_super *mpb = super->anchor;
2459 __u32 sum;
2460 struct dl *d;
2461
2462 mpb_save = *mpb;
2463 mpb->num_raid_devs = 0;
2464 mpb->num_disks = 1;
2465 mpb->mpb_size = sizeof(struct imsm_super);
2466 mpb->generation_num = __cpu_to_le32(1UL);
2467
2468 for (d = super->disks; d; d = d->next) {
2469 if (d->index != -1)
2470 continue;
2471
2472 mpb->disk[0] = d->disk;
2473 sum = __gen_imsm_checksum(mpb);
2474 mpb->family_num = __cpu_to_le32(sum);
2475 sum = __gen_imsm_checksum(mpb);
2476 mpb->check_sum = __cpu_to_le32(sum);
2477
2478 if (store_imsm_mpb(d->fd, super)) {
2479 fprintf(stderr, "%s: failed for device %d:%d %s\n",
2480 __func__, d->major, d->minor, strerror(errno));
2481 *mpb = mpb_save;
2482 return 1;
2483 }
2484 if (doclose) {
2485 close(d->fd);
2486 d->fd = -1;
2487 }
2488 }
2489
2490 *mpb = mpb_save;
2491 return 0;
2492 }
2493
2494 static int write_super_imsm(struct intel_super *super, int doclose)
2495 {
2496 struct imsm_super *mpb = super->anchor;
2497 struct dl *d;
2498 __u32 generation;
2499 __u32 sum;
2500 int spares = 0;
2501 int i;
2502 __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk);
2503
2504 /* 'generation' is incremented everytime the metadata is written */
2505 generation = __le32_to_cpu(mpb->generation_num);
2506 generation++;
2507 mpb->generation_num = __cpu_to_le32(generation);
2508
2509 mpb_size += sizeof(struct imsm_disk) * mpb->num_disks;
2510 for (d = super->disks; d; d = d->next) {
2511 if (d->index == -1)
2512 spares++;
2513 else
2514 mpb->disk[d->index] = d->disk;
2515 }
2516 for (d = super->missing; d; d = d->next)
2517 mpb->disk[d->index] = d->disk;
2518
2519 for (i = 0; i < mpb->num_raid_devs; i++) {
2520 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
2521
2522 imsm_copy_dev(dev, get_imsm_dev(super, i));
2523 mpb_size += sizeof_imsm_dev(dev, 0);
2524 }
2525 mpb_size += __le32_to_cpu(mpb->bbm_log_size);
2526 mpb->mpb_size = __cpu_to_le32(mpb_size);
2527
2528 /* recalculate checksum */
2529 sum = __gen_imsm_checksum(mpb);
2530 mpb->check_sum = __cpu_to_le32(sum);
2531
2532 /* write the mpb for disks that compose raid devices */
2533 for (d = super->disks; d ; d = d->next) {
2534 if (d->index < 0)
2535 continue;
2536 if (store_imsm_mpb(d->fd, super))
2537 fprintf(stderr, "%s: failed for device %d:%d %s\n",
2538 __func__, d->major, d->minor, strerror(errno));
2539 if (doclose) {
2540 close(d->fd);
2541 d->fd = -1;
2542 }
2543 }
2544
2545 if (spares)
2546 return write_super_imsm_spares(super, doclose);
2547
2548 return 0;
2549 }
2550
2551
2552 static int create_array(struct supertype *st)
2553 {
2554 size_t len;
2555 struct imsm_update_create_array *u;
2556 struct intel_super *super = st->sb;
2557 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
2558 struct imsm_map *map = get_imsm_map(dev, 0);
2559 struct disk_info *inf;
2560 struct imsm_disk *disk;
2561 int i;
2562 int idx;
2563
2564 len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0) +
2565 sizeof(*inf) * map->num_members;
2566 u = malloc(len);
2567 if (!u) {
2568 fprintf(stderr, "%s: failed to allocate update buffer\n",
2569 __func__);
2570 return 1;
2571 }
2572
2573 u->type = update_create_array;
2574 u->dev_idx = super->current_vol;
2575 imsm_copy_dev(&u->dev, dev);
2576 inf = get_disk_info(u);
2577 for (i = 0; i < map->num_members; i++) {
2578 idx = get_imsm_disk_idx(dev, i);
2579 disk = get_imsm_disk(super, idx);
2580 serialcpy(inf[i].serial, disk->serial);
2581 }
2582 append_metadata_update(st, u, len);
2583
2584 return 0;
2585 }
2586
2587 static int _add_disk(struct supertype *st)
2588 {
2589 struct intel_super *super = st->sb;
2590 size_t len;
2591 struct imsm_update_add_disk *u;
2592
2593 if (!super->add)
2594 return 0;
2595
2596 len = sizeof(*u);
2597 u = malloc(len);
2598 if (!u) {
2599 fprintf(stderr, "%s: failed to allocate update buffer\n",
2600 __func__);
2601 return 1;
2602 }
2603
2604 u->type = update_add_disk;
2605 append_metadata_update(st, u, len);
2606
2607 return 0;
2608 }
2609
2610 static int write_init_super_imsm(struct supertype *st)
2611 {
2612 if (st->update_tail) {
2613 /* queue the recently created array / added disk
2614 * as a metadata update */
2615 struct intel_super *super = st->sb;
2616 struct dl *d;
2617 int rv;
2618
2619 /* determine if we are creating a volume or adding a disk */
2620 if (super->current_vol < 0) {
2621 /* in the add disk case we are running in mdmon
2622 * context, so don't close fd's
2623 */
2624 return _add_disk(st);
2625 } else
2626 rv = create_array(st);
2627
2628 for (d = super->disks; d ; d = d->next) {
2629 close(d->fd);
2630 d->fd = -1;
2631 }
2632
2633 return rv;
2634 } else
2635 return write_super_imsm(st->sb, 1);
2636 }
2637 #endif
2638
2639 static int store_zero_imsm(struct supertype *st, int fd)
2640 {
2641 unsigned long long dsize;
2642 void *buf;
2643
2644 get_dev_size(fd, NULL, &dsize);
2645
2646 /* first block is stored on second to last sector of the disk */
2647 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
2648 return 1;
2649
2650 if (posix_memalign(&buf, 512, 512) != 0)
2651 return 1;
2652
2653 memset(buf, 0, 512);
2654 if (write(fd, buf, 512) != 512)
2655 return 1;
2656 return 0;
2657 }
2658
2659 static int imsm_bbm_log_size(struct imsm_super *mpb)
2660 {
2661 return __le32_to_cpu(mpb->bbm_log_size);
2662 }
2663
2664 #ifndef MDASSEMBLE
2665 static int validate_geometry_imsm_container(struct supertype *st, int level,
2666 int layout, int raiddisks, int chunk,
2667 unsigned long long size, char *dev,
2668 unsigned long long *freesize,
2669 int verbose)
2670 {
2671 int fd;
2672 unsigned long long ldsize;
2673 const struct imsm_orom *orom;
2674
2675 if (level != LEVEL_CONTAINER)
2676 return 0;
2677 if (!dev)
2678 return 1;
2679
2680 if (check_env("IMSM_NO_PLATFORM"))
2681 orom = NULL;
2682 else
2683 orom = find_imsm_orom();
2684 if (orom && raiddisks > orom->tds) {
2685 if (verbose)
2686 fprintf(stderr, Name ": %d exceeds maximum number of"
2687 " platform supported disks: %d\n",
2688 raiddisks, orom->tds);
2689 return 0;
2690 }
2691
2692 fd = open(dev, O_RDONLY|O_EXCL, 0);
2693 if (fd < 0) {
2694 if (verbose)
2695 fprintf(stderr, Name ": imsm: Cannot open %s: %s\n",
2696 dev, strerror(errno));
2697 return 0;
2698 }
2699 if (!get_dev_size(fd, dev, &ldsize)) {
2700 close(fd);
2701 return 0;
2702 }
2703 close(fd);
2704
2705 *freesize = avail_size_imsm(st, ldsize >> 9);
2706
2707 return 1;
2708 }
2709
2710 static unsigned long long find_size(struct extent *e, int *idx, int num_extents)
2711 {
2712 const unsigned long long base_start = e[*idx].start;
2713 unsigned long long end = base_start + e[*idx].size;
2714 int i;
2715
2716 if (base_start == end)
2717 return 0;
2718
2719 *idx = *idx + 1;
2720 for (i = *idx; i < num_extents; i++) {
2721 /* extend overlapping extents */
2722 if (e[i].start >= base_start &&
2723 e[i].start <= end) {
2724 if (e[i].size == 0)
2725 return 0;
2726 if (e[i].start + e[i].size > end)
2727 end = e[i].start + e[i].size;
2728 } else if (e[i].start > end) {
2729 *idx = i;
2730 break;
2731 }
2732 }
2733
2734 return end - base_start;
2735 }
2736
2737 static unsigned long long merge_extents(struct intel_super *super, int sum_extents)
2738 {
2739 /* build a composite disk with all known extents and generate a new
2740 * 'maxsize' given the "all disks in an array must share a common start
2741 * offset" constraint
2742 */
2743 struct extent *e = calloc(sum_extents, sizeof(*e));
2744 struct dl *dl;
2745 int i, j;
2746 int start_extent;
2747 unsigned long long pos;
2748 unsigned long long start;
2749 unsigned long long maxsize;
2750 unsigned long reserve;
2751
2752 if (!e)
2753 return ~0ULL; /* error */
2754
2755 /* coalesce and sort all extents. also, check to see if we need to
2756 * reserve space between member arrays
2757 */
2758 j = 0;
2759 for (dl = super->disks; dl; dl = dl->next) {
2760 if (!dl->e)
2761 continue;
2762 for (i = 0; i < dl->extent_cnt; i++)
2763 e[j++] = dl->e[i];
2764 }
2765 qsort(e, sum_extents, sizeof(*e), cmp_extent);
2766
2767 /* merge extents */
2768 i = 0;
2769 j = 0;
2770 while (i < sum_extents) {
2771 e[j].start = e[i].start;
2772 e[j].size = find_size(e, &i, sum_extents);
2773 j++;
2774 if (e[j-1].size == 0)
2775 break;
2776 }
2777
2778 pos = 0;
2779 maxsize = 0;
2780 start_extent = 0;
2781 i = 0;
2782 do {
2783 unsigned long long esize;
2784
2785 esize = e[i].start - pos;
2786 if (esize >= maxsize) {
2787 maxsize = esize;
2788 start = pos;
2789 start_extent = i;
2790 }
2791 pos = e[i].start + e[i].size;
2792 i++;
2793 } while (e[i-1].size);
2794 free(e);
2795
2796 if (start_extent > 0)
2797 reserve = IMSM_RESERVED_SECTORS; /* gap between raid regions */
2798 else
2799 reserve = 0;
2800
2801 if (maxsize < reserve)
2802 return ~0ULL;
2803
2804 super->create_offset = ~((__u32) 0);
2805 if (start + reserve > super->create_offset)
2806 return ~0ULL; /* start overflows create_offset */
2807 super->create_offset = start + reserve;
2808
2809 return maxsize - reserve;
2810 }
2811
2812 static int is_raid_level_supported(const struct imsm_orom *orom, int level, int raiddisks)
2813 {
2814 if (level < 0 || level == 6 || level == 4)
2815 return 0;
2816
2817 /* if we have an orom prevent invalid raid levels */
2818 if (orom)
2819 switch (level) {
2820 case 0: return imsm_orom_has_raid0(orom);
2821 case 1:
2822 if (raiddisks > 2)
2823 return imsm_orom_has_raid1e(orom);
2824 return imsm_orom_has_raid1(orom) && raiddisks == 2;
2825 case 10: return imsm_orom_has_raid10(orom) && raiddisks == 4;
2826 case 5: return imsm_orom_has_raid5(orom) && raiddisks > 2;
2827 }
2828 else
2829 return 1; /* not on an Intel RAID platform so anything goes */
2830
2831 return 0;
2832 }
2833
2834 #define pr_vrb(fmt, arg...) (void) (verbose && fprintf(stderr, Name fmt, ##arg))
2835 /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
2836 * FIX ME add ahci details
2837 */
2838 static int validate_geometry_imsm_volume(struct supertype *st, int level,
2839 int layout, int raiddisks, int chunk,
2840 unsigned long long size, char *dev,
2841 unsigned long long *freesize,
2842 int verbose)
2843 {
2844 struct stat stb;
2845 struct intel_super *super = st->sb;
2846 struct imsm_super *mpb = super->anchor;
2847 struct dl *dl;
2848 unsigned long long pos = 0;
2849 unsigned long long maxsize;
2850 struct extent *e;
2851 int i;
2852
2853 /* We must have the container info already read in. */
2854 if (!super)
2855 return 0;
2856
2857 if (!is_raid_level_supported(super->orom, level, raiddisks)) {
2858 pr_vrb(": platform does not support raid%d with %d disk%s\n",
2859 level, raiddisks, raiddisks > 1 ? "s" : "");
2860 return 0;
2861 }
2862 if (super->orom && level != 1 &&
2863 !imsm_orom_has_chunk(super->orom, chunk)) {
2864 pr_vrb(": platform does not support a chunk size of: %d\n", chunk);
2865 return 0;
2866 }
2867 if (layout != imsm_level_to_layout(level)) {
2868 if (level == 5)
2869 pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n");
2870 else if (level == 10)
2871 pr_vrb(": imsm raid 10 only supports the n2 layout\n");
2872 else
2873 pr_vrb(": imsm unknown layout %#x for this raid level %d\n",
2874 layout, level);
2875 return 0;
2876 }
2877
2878 if (!dev) {
2879 /* General test: make sure there is space for
2880 * 'raiddisks' device extents of size 'size' at a given
2881 * offset
2882 */
2883 unsigned long long minsize = size;
2884 unsigned long long start_offset = ~0ULL;
2885 int dcnt = 0;
2886 if (minsize == 0)
2887 minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
2888 for (dl = super->disks; dl ; dl = dl->next) {
2889 int found = 0;
2890
2891 pos = 0;
2892 i = 0;
2893 e = get_extents(super, dl);
2894 if (!e) continue;
2895 do {
2896 unsigned long long esize;
2897 esize = e[i].start - pos;
2898 if (esize >= minsize)
2899 found = 1;
2900 if (found && start_offset == ~0ULL) {
2901 start_offset = pos;
2902 break;
2903 } else if (found && pos != start_offset) {
2904 found = 0;
2905 break;
2906 }
2907 pos = e[i].start + e[i].size;
2908 i++;
2909 } while (e[i-1].size);
2910 if (found)
2911 dcnt++;
2912 free(e);
2913 }
2914 if (dcnt < raiddisks) {
2915 if (verbose)
2916 fprintf(stderr, Name ": imsm: Not enough "
2917 "devices with space for this array "
2918 "(%d < %d)\n",
2919 dcnt, raiddisks);
2920 return 0;
2921 }
2922 return 1;
2923 }
2924
2925 /* This device must be a member of the set */
2926 if (stat(dev, &stb) < 0)
2927 return 0;
2928 if ((S_IFMT & stb.st_mode) != S_IFBLK)
2929 return 0;
2930 for (dl = super->disks ; dl ; dl = dl->next) {
2931 if (dl->major == major(stb.st_rdev) &&
2932 dl->minor == minor(stb.st_rdev))
2933 break;
2934 }
2935 if (!dl) {
2936 if (verbose)
2937 fprintf(stderr, Name ": %s is not in the "
2938 "same imsm set\n", dev);
2939 return 0;
2940 } else if (super->orom && dl->index < 0 && mpb->num_raid_devs) {
2941 /* If a volume is present then the current creation attempt
2942 * cannot incorporate new spares because the orom may not
2943 * understand this configuration (all member disks must be
2944 * members of each array in the container).
2945 */
2946 fprintf(stderr, Name ": %s is a spare and a volume"
2947 " is already defined for this container\n", dev);
2948 fprintf(stderr, Name ": The option-rom requires all member"
2949 " disks to be a member of all volumes\n");
2950 return 0;
2951 }
2952
2953 /* retrieve the largest free space block */
2954 e = get_extents(super, dl);
2955 maxsize = 0;
2956 i = 0;
2957 if (e) {
2958 do {
2959 unsigned long long esize;
2960
2961 esize = e[i].start - pos;
2962 if (esize >= maxsize)
2963 maxsize = esize;
2964 pos = e[i].start + e[i].size;
2965 i++;
2966 } while (e[i-1].size);
2967 dl->e = e;
2968 dl->extent_cnt = i;
2969 } else {
2970 if (verbose)
2971 fprintf(stderr, Name ": unable to determine free space for: %s\n",
2972 dev);
2973 return 0;
2974 }
2975 if (maxsize < size) {
2976 if (verbose)
2977 fprintf(stderr, Name ": %s not enough space (%llu < %llu)\n",
2978 dev, maxsize, size);
2979 return 0;
2980 }
2981
2982 /* count total number of extents for merge */
2983 i = 0;
2984 for (dl = super->disks; dl; dl = dl->next)
2985 if (dl->e)
2986 i += dl->extent_cnt;
2987
2988 maxsize = merge_extents(super, i);
2989 if (maxsize < size) {
2990 if (verbose)
2991 fprintf(stderr, Name ": not enough space after merge (%llu < %llu)\n",
2992 maxsize, size);
2993 return 0;
2994 } else if (maxsize == ~0ULL) {
2995 if (verbose)
2996 fprintf(stderr, Name ": failed to merge %d extents\n", i);
2997 return 0;
2998 }
2999
3000 *freesize = maxsize;
3001
3002 return 1;
3003 }
3004
3005 static int validate_geometry_imsm(struct supertype *st, int level, int layout,
3006 int raiddisks, int chunk, unsigned long long size,
3007 char *dev, unsigned long long *freesize,
3008 int verbose)
3009 {
3010 int fd, cfd;
3011 struct mdinfo *sra;
3012
3013 /* if given unused devices create a container
3014 * if given given devices in a container create a member volume
3015 */
3016 if (level == LEVEL_CONTAINER) {
3017 /* Must be a fresh device to add to a container */
3018 return validate_geometry_imsm_container(st, level, layout,
3019 raiddisks, chunk, size,
3020 dev, freesize,
3021 verbose);
3022 }
3023
3024 if (!dev) {
3025 if (st->sb && freesize) {
3026 /* Should do auto-layout here */
3027 fprintf(stderr, Name ": IMSM does not support auto-layout yet\n");
3028 return 0;
3029 }
3030 return 1;
3031 }
3032 if (st->sb) {
3033 /* creating in a given container */
3034 return validate_geometry_imsm_volume(st, level, layout,
3035 raiddisks, chunk, size,
3036 dev, freesize, verbose);
3037 }
3038
3039 /* limit creation to the following levels */
3040 if (!dev)
3041 switch (level) {
3042 case 0:
3043 case 1:
3044 case 10:
3045 case 5:
3046 break;
3047 default:
3048 return 1;
3049 }
3050
3051 /* This device needs to be a device in an 'imsm' container */
3052 fd = open(dev, O_RDONLY|O_EXCL, 0);
3053 if (fd >= 0) {
3054 if (verbose)
3055 fprintf(stderr,
3056 Name ": Cannot create this array on device %s\n",
3057 dev);
3058 close(fd);
3059 return 0;
3060 }
3061 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
3062 if (verbose)
3063 fprintf(stderr, Name ": Cannot open %s: %s\n",
3064 dev, strerror(errno));
3065 return 0;
3066 }
3067 /* Well, it is in use by someone, maybe an 'imsm' container. */
3068 cfd = open_container(fd);
3069 if (cfd < 0) {
3070 close(fd);
3071 if (verbose)
3072 fprintf(stderr, Name ": Cannot use %s: It is busy\n",
3073 dev);
3074 return 0;
3075 }
3076 sra = sysfs_read(cfd, 0, GET_VERSION);
3077 close(fd);
3078 if (sra && sra->array.major_version == -1 &&
3079 strcmp(sra->text_version, "imsm") == 0) {
3080 /* This is a member of a imsm container. Load the container
3081 * and try to create a volume
3082 */
3083 struct intel_super *super;
3084
3085 if (load_super_imsm_all(st, cfd, (void **) &super, NULL, 1) == 0) {
3086 st->sb = super;
3087 st->container_dev = fd2devnum(cfd);
3088 close(cfd);
3089 return validate_geometry_imsm_volume(st, level, layout,
3090 raiddisks, chunk,
3091 size, dev,
3092 freesize, verbose);
3093 }
3094 close(cfd);
3095 } else /* may belong to another container */
3096 return 0;
3097
3098 return 1;
3099 }
3100 #endif /* MDASSEMBLE */
3101
3102 static struct mdinfo *container_content_imsm(struct supertype *st)
3103 {
3104 /* Given a container loaded by load_super_imsm_all,
3105 * extract information about all the arrays into
3106 * an mdinfo tree.
3107 *
3108 * For each imsm_dev create an mdinfo, fill it in,
3109 * then look for matching devices in super->disks
3110 * and create appropriate device mdinfo.
3111 */
3112 struct intel_super *super = st->sb;
3113 struct imsm_super *mpb = super->anchor;
3114 struct mdinfo *rest = NULL;
3115 int i;
3116
3117 /* do not assemble arrays that might have bad blocks */
3118 if (imsm_bbm_log_size(super->anchor)) {
3119 fprintf(stderr, Name ": BBM log found in metadata. "
3120 "Cannot activate array(s).\n");
3121 return NULL;
3122 }
3123
3124 for (i = 0; i < mpb->num_raid_devs; i++) {
3125 struct imsm_dev *dev = get_imsm_dev(super, i);
3126 struct imsm_map *map = get_imsm_map(dev, 0);
3127 struct mdinfo *this;
3128 int slot;
3129
3130 this = malloc(sizeof(*this));
3131 memset(this, 0, sizeof(*this));
3132 this->next = rest;
3133
3134 super->current_vol = i;
3135 getinfo_super_imsm_volume(st, this);
3136 for (slot = 0 ; slot < map->num_members; slot++) {
3137 struct mdinfo *info_d;
3138 struct dl *d;
3139 int idx;
3140 int skip;
3141 __u32 s;
3142 __u32 ord;
3143
3144 skip = 0;
3145 idx = get_imsm_disk_idx(dev, slot);
3146 ord = get_imsm_ord_tbl_ent(dev, slot);
3147 for (d = super->disks; d ; d = d->next)
3148 if (d->index == idx)
3149 break;
3150
3151 if (d == NULL)
3152 skip = 1;
3153
3154 s = d ? d->disk.status : 0;
3155 if (s & FAILED_DISK)
3156 skip = 1;
3157 if (!(s & USABLE_DISK))
3158 skip = 1;
3159 if (ord & IMSM_ORD_REBUILD)
3160 skip = 1;
3161
3162 /*
3163 * if we skip some disks the array will be assmebled degraded;
3164 * reset resync start to avoid a dirty-degraded situation
3165 *
3166 * FIXME handle dirty degraded
3167 */
3168 if (skip && !dev->vol.dirty)
3169 this->resync_start = ~0ULL;
3170 if (skip)
3171 continue;
3172
3173 info_d = malloc(sizeof(*info_d));
3174 if (!info_d) {
3175 fprintf(stderr, Name ": failed to allocate disk"
3176 " for volume %s\n", (char *) dev->volume);
3177 free(this);
3178 this = rest;
3179 break;
3180 }
3181 memset(info_d, 0, sizeof(*info_d));
3182 info_d->next = this->devs;
3183 this->devs = info_d;
3184
3185 info_d->disk.number = d->index;
3186 info_d->disk.major = d->major;
3187 info_d->disk.minor = d->minor;
3188 info_d->disk.raid_disk = slot;
3189
3190 this->array.working_disks++;
3191
3192 info_d->events = __le32_to_cpu(mpb->generation_num);
3193 info_d->data_offset = __le32_to_cpu(map->pba_of_lba0);
3194 info_d->component_size = __le32_to_cpu(map->blocks_per_member);
3195 if (d->devname)
3196 strcpy(info_d->name, d->devname);
3197 }
3198 rest = this;
3199 }
3200
3201 return rest;
3202 }
3203
3204
3205 #ifndef MDASSEMBLE
3206 static int imsm_open_new(struct supertype *c, struct active_array *a,
3207 char *inst)
3208 {
3209 struct intel_super *super = c->sb;
3210 struct imsm_super *mpb = super->anchor;
3211
3212 if (atoi(inst) >= mpb->num_raid_devs) {
3213 fprintf(stderr, "%s: subarry index %d, out of range\n",
3214 __func__, atoi(inst));
3215 return -ENODEV;
3216 }
3217
3218 dprintf("imsm: open_new %s\n", inst);
3219 a->info.container_member = atoi(inst);
3220 return 0;
3221 }
3222
3223 static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev, int failed)
3224 {
3225 struct imsm_map *map = get_imsm_map(dev, 0);
3226
3227 if (!failed)
3228 return map->map_state == IMSM_T_STATE_UNINITIALIZED ?
3229 IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL;
3230
3231 switch (get_imsm_raid_level(map)) {
3232 case 0:
3233 return IMSM_T_STATE_FAILED;
3234 break;
3235 case 1:
3236 if (failed < map->num_members)
3237 return IMSM_T_STATE_DEGRADED;
3238 else
3239 return IMSM_T_STATE_FAILED;
3240 break;
3241 case 10:
3242 {
3243 /**
3244 * check to see if any mirrors have failed, otherwise we
3245 * are degraded. Even numbered slots are mirrored on
3246 * slot+1
3247 */
3248 int i;
3249 /* gcc -Os complains that this is unused */
3250 int insync = insync;
3251
3252 for (i = 0; i < map->num_members; i++) {
3253 __u32 ord = get_imsm_ord_tbl_ent(dev, i);
3254 int idx = ord_to_idx(ord);
3255 struct imsm_disk *disk;
3256
3257 /* reset the potential in-sync count on even-numbered
3258 * slots. num_copies is always 2 for imsm raid10
3259 */
3260 if ((i & 1) == 0)
3261 insync = 2;
3262
3263 disk = get_imsm_disk(super, idx);
3264 if (!disk || disk->status & FAILED_DISK ||
3265 ord & IMSM_ORD_REBUILD)
3266 insync--;
3267
3268 /* no in-sync disks left in this mirror the
3269 * array has failed
3270 */
3271 if (insync == 0)
3272 return IMSM_T_STATE_FAILED;
3273 }
3274
3275 return IMSM_T_STATE_DEGRADED;
3276 }
3277 case 5:
3278 if (failed < 2)
3279 return IMSM_T_STATE_DEGRADED;
3280 else
3281 return IMSM_T_STATE_FAILED;
3282 break;
3283 default:
3284 break;
3285 }
3286
3287 return map->map_state;
3288 }
3289
3290 static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev)
3291 {
3292 int i;
3293 int failed = 0;
3294 struct imsm_disk *disk;
3295 struct imsm_map *map = get_imsm_map(dev, 0);
3296
3297 for (i = 0; i < map->num_members; i++) {
3298 __u32 ord = get_imsm_ord_tbl_ent(dev, i);
3299 int idx = ord_to_idx(ord);
3300
3301 disk = get_imsm_disk(super, idx);
3302 if (!disk || disk->status & FAILED_DISK ||
3303 ord & IMSM_ORD_REBUILD)
3304 failed++;
3305 }
3306
3307 return failed;
3308 }
3309
3310 static int is_resyncing(struct imsm_dev *dev)
3311 {
3312 struct imsm_map *migr_map;
3313
3314 if (!dev->vol.migr_state)
3315 return 0;
3316
3317 if (dev->vol.migr_type == MIGR_INIT)
3318 return 1;
3319
3320 migr_map = get_imsm_map(dev, 1);
3321
3322 if (migr_map->map_state == IMSM_T_STATE_NORMAL)
3323 return 1;
3324 else
3325 return 0;
3326 }
3327
3328 static int is_rebuilding(struct imsm_dev *dev)
3329 {
3330 struct imsm_map *migr_map;
3331
3332 if (!dev->vol.migr_state)
3333 return 0;
3334
3335 if (dev->vol.migr_type != MIGR_REBUILD)
3336 return 0;
3337
3338 migr_map = get_imsm_map(dev, 1);
3339
3340 if (migr_map->map_state == IMSM_T_STATE_DEGRADED)
3341 return 1;
3342 else
3343 return 0;
3344 }
3345
3346 static void mark_failure(struct imsm_disk *disk)
3347 {
3348 if (disk->status & FAILED_DISK)
3349 return;
3350 disk->status |= FAILED_DISK;
3351 disk->scsi_id = __cpu_to_le32(~(__u32)0);
3352 memmove(&disk->serial[0], &disk->serial[1], MAX_RAID_SERIAL_LEN - 1);
3353 }
3354
3355 /* Handle dirty -> clean transititions and resync. Degraded and rebuild
3356 * states are handled in imsm_set_disk() with one exception, when a
3357 * resync is stopped due to a new failure this routine will set the
3358 * 'degraded' state for the array.
3359 */
3360 static int imsm_set_array_state(struct active_array *a, int consistent)
3361 {
3362 int inst = a->info.container_member;
3363 struct intel_super *super = a->container->sb;
3364 struct imsm_dev *dev = get_imsm_dev(super, inst);
3365 struct imsm_map *map = get_imsm_map(dev, 0);
3366 int failed = imsm_count_failed(super, dev);
3367 __u8 map_state = imsm_check_degraded(super, dev, failed);
3368
3369 /* before we activate this array handle any missing disks */
3370 if (consistent == 2 && super->missing) {
3371 struct dl *dl;
3372
3373 dprintf("imsm: mark missing\n");
3374 end_migration(dev, map_state);
3375 for (dl = super->missing; dl; dl = dl->next)
3376 mark_failure(&dl->disk);
3377 super->updates_pending++;
3378 }
3379
3380 if (consistent == 2 &&
3381 (!is_resync_complete(a) ||
3382 map_state != IMSM_T_STATE_NORMAL ||
3383 dev->vol.migr_state))
3384 consistent = 0;
3385
3386 if (is_resync_complete(a)) {
3387 /* complete intialization / resync,
3388 * recovery is completed in ->set_disk
3389 */
3390 if (is_resyncing(dev)) {
3391 dprintf("imsm: mark resync done\n");
3392 end_migration(dev, map_state);
3393 super->updates_pending++;
3394 }
3395 } else if (!is_resyncing(dev) && !failed) {
3396 /* mark the start of the init process if nothing is failed */
3397 dprintf("imsm: mark resync start (%llu)\n", a->resync_start);
3398 if (map->map_state == IMSM_T_STATE_NORMAL)
3399 migrate(dev, IMSM_T_STATE_NORMAL, MIGR_REBUILD);
3400 else
3401 migrate(dev, IMSM_T_STATE_NORMAL, MIGR_INIT);
3402 super->updates_pending++;
3403 }
3404
3405 /* check if we can update the migration checkpoint */
3406 if (dev->vol.migr_state &&
3407 __le32_to_cpu(dev->vol.curr_migr_unit) != a->resync_start) {
3408 dprintf("imsm: checkpoint migration (%llu)\n", a->resync_start);
3409 dev->vol.curr_migr_unit = __cpu_to_le32(a->resync_start);
3410 super->updates_pending++;
3411 }
3412
3413 /* mark dirty / clean */
3414 if (dev->vol.dirty != !consistent) {
3415 dprintf("imsm: mark '%s' (%llu)\n",
3416 consistent ? "clean" : "dirty", a->resync_start);
3417 if (consistent)
3418 dev->vol.dirty = 0;
3419 else
3420 dev->vol.dirty = 1;
3421 super->updates_pending++;
3422 }
3423 return consistent;
3424 }
3425
3426 static void imsm_set_disk(struct active_array *a, int n, int state)
3427 {
3428 int inst = a->info.container_member;
3429 struct intel_super *super = a->container->sb;
3430 struct imsm_dev *dev = get_imsm_dev(super, inst);
3431 struct imsm_map *map = get_imsm_map(dev, 0);
3432 struct imsm_disk *disk;
3433 int failed;
3434 __u32 ord;
3435 __u8 map_state;
3436
3437 if (n > map->num_members)
3438 fprintf(stderr, "imsm: set_disk %d out of range 0..%d\n",
3439 n, map->num_members - 1);
3440
3441 if (n < 0)
3442 return;
3443
3444 dprintf("imsm: set_disk %d:%x\n", n, state);
3445
3446 ord = get_imsm_ord_tbl_ent(dev, n);
3447 disk = get_imsm_disk(super, ord_to_idx(ord));
3448
3449 /* check for new failures */
3450 if ((state & DS_FAULTY) && !(disk->status & FAILED_DISK)) {
3451 mark_failure(disk);
3452 super->updates_pending++;
3453 }
3454
3455 /* check if in_sync */
3456 if (state & DS_INSYNC && ord & IMSM_ORD_REBUILD) {
3457 struct imsm_map *migr_map = get_imsm_map(dev, 1);
3458
3459 set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord));
3460 super->updates_pending++;
3461 }
3462
3463 failed = imsm_count_failed(super, dev);
3464 map_state = imsm_check_degraded(super, dev, failed);
3465
3466 /* check if recovery complete, newly degraded, or failed */
3467 if (map_state == IMSM_T_STATE_NORMAL && is_rebuilding(dev)) {
3468 end_migration(dev, map_state);
3469 super->updates_pending++;
3470 } else if (map_state == IMSM_T_STATE_DEGRADED &&
3471 map->map_state != map_state &&
3472 !dev->vol.migr_state) {
3473 dprintf("imsm: mark degraded\n");
3474 map->map_state = map_state;
3475 super->updates_pending++;
3476 } else if (map_state == IMSM_T_STATE_FAILED &&
3477 map->map_state != map_state) {
3478 dprintf("imsm: mark failed\n");
3479 end_migration(dev, map_state);
3480 super->updates_pending++;
3481 }
3482 }
3483
3484 static int store_imsm_mpb(int fd, struct intel_super *super)
3485 {
3486 struct imsm_super *mpb = super->anchor;
3487 __u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
3488 unsigned long long dsize;
3489 unsigned long long sectors;
3490
3491 get_dev_size(fd, NULL, &dsize);
3492
3493 if (mpb_size > 512) {
3494 /* -1 to account for anchor */
3495 sectors = mpb_sectors(mpb) - 1;
3496
3497 /* write the extended mpb to the sectors preceeding the anchor */
3498 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0)
3499 return 1;
3500
3501 if (write(fd, super->buf + 512, 512 * sectors) != 512 * sectors)
3502 return 1;
3503 }
3504
3505 /* first block is stored on second to last sector of the disk */
3506 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
3507 return 1;
3508
3509 if (write(fd, super->buf, 512) != 512)
3510 return 1;
3511
3512 return 0;
3513 }
3514
3515 static void imsm_sync_metadata(struct supertype *container)
3516 {
3517 struct intel_super *super = container->sb;
3518
3519 if (!super->updates_pending)
3520 return;
3521
3522 write_super_imsm(super, 0);
3523
3524 super->updates_pending = 0;
3525 }
3526
3527 static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a)
3528 {
3529 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
3530 int i = get_imsm_disk_idx(dev, idx);
3531 struct dl *dl;
3532
3533 for (dl = super->disks; dl; dl = dl->next)
3534 if (dl->index == i)
3535 break;
3536
3537 if (dl && dl->disk.status & FAILED_DISK)
3538 dl = NULL;
3539
3540 if (dl)
3541 dprintf("%s: found %x:%x\n", __func__, dl->major, dl->minor);
3542
3543 return dl;
3544 }
3545
3546 static struct dl *imsm_add_spare(struct intel_super *super, int slot,
3547 struct active_array *a, int activate_new)
3548 {
3549 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
3550 int idx = get_imsm_disk_idx(dev, slot);
3551 struct imsm_super *mpb = super->anchor;
3552 struct imsm_map *map;
3553 unsigned long long esize;
3554 unsigned long long pos;
3555 struct mdinfo *d;
3556 struct extent *ex;
3557 int i, j;
3558 int found;
3559 __u32 array_start;
3560 __u32 blocks;
3561 struct dl *dl;
3562
3563 for (dl = super->disks; dl; dl = dl->next) {
3564 /* If in this array, skip */
3565 for (d = a->info.devs ; d ; d = d->next)
3566 if (d->state_fd >= 0 &&
3567 d->disk.major == dl->major &&
3568 d->disk.minor == dl->minor) {
3569 dprintf("%x:%x already in array\n", dl->major, dl->minor);
3570 break;
3571 }
3572 if (d)
3573 continue;
3574
3575 /* skip in use or failed drives */
3576 if (dl->disk.status & FAILED_DISK || idx == dl->index) {
3577 dprintf("%x:%x status ( %s%s)\n",
3578 dl->major, dl->minor,
3579 dl->disk.status & FAILED_DISK ? "failed " : "",
3580 idx == dl->index ? "in use " : "");
3581 continue;
3582 }
3583
3584 /* skip pure spares when we are looking for partially
3585 * assimilated drives
3586 */
3587 if (dl->index == -1 && !activate_new)
3588 continue;
3589
3590 /* Does this unused device have the requisite free space?
3591 * It needs to be able to cover all member volumes
3592 */
3593 ex = get_extents(super, dl);
3594 if (!ex) {
3595 dprintf("cannot get extents\n");
3596 continue;
3597 }
3598 for (i = 0; i < mpb->num_raid_devs; i++) {
3599 dev = get_imsm_dev(super, i);
3600 map = get_imsm_map(dev, 0);
3601
3602 /* check if this disk is already a member of
3603 * this array
3604 */
3605 for (j = 0; j < map->num_members; j++)
3606 if (get_imsm_disk_idx(dev, j) == dl->index)
3607 break;
3608 if (j < map->num_members)
3609 continue;
3610
3611 found = 0;
3612 j = 0;
3613 pos = 0;
3614 array_start = __le32_to_cpu(map->pba_of_lba0);
3615 blocks = __le32_to_cpu(map->blocks_per_member);
3616
3617 do {
3618 /* check that we can start at pba_of_lba0 with
3619 * blocks_per_member of space
3620 */
3621 esize = ex[j].start - pos;
3622 if (array_start >= pos &&
3623 array_start + blocks < ex[j].start) {
3624 found = 1;
3625 break;
3626 }
3627 pos = ex[j].start + ex[j].size;
3628 j++;
3629 } while (ex[j-1].size);
3630
3631 if (!found)
3632 break;
3633 }
3634
3635 free(ex);
3636 if (i < mpb->num_raid_devs) {
3637 dprintf("%x:%x does not have %u at %u\n",
3638 dl->major, dl->minor,
3639 blocks, array_start);
3640 /* No room */
3641 continue;
3642 }
3643 return dl;
3644 }
3645
3646 return dl;
3647 }
3648
3649 static struct mdinfo *imsm_activate_spare(struct active_array *a,
3650 struct metadata_update **updates)
3651 {
3652 /**
3653 * Find a device with unused free space and use it to replace a
3654 * failed/vacant region in an array. We replace failed regions one a
3655 * array at a time. The result is that a new spare disk will be added
3656 * to the first failed array and after the monitor has finished
3657 * propagating failures the remainder will be consumed.
3658 *
3659 * FIXME add a capability for mdmon to request spares from another
3660 * container.
3661 */
3662
3663 struct intel_super *super = a->container->sb;
3664 int inst = a->info.container_member;
3665 struct imsm_dev *dev = get_imsm_dev(super, inst);
3666 struct imsm_map *map = get_imsm_map(dev, 0);
3667 int failed = a->info.array.raid_disks;
3668 struct mdinfo *rv = NULL;
3669 struct mdinfo *d;
3670 struct mdinfo *di;
3671 struct metadata_update *mu;
3672 struct dl *dl;
3673 struct imsm_update_activate_spare *u;
3674 int num_spares = 0;
3675 int i;
3676
3677 for (d = a->info.devs ; d ; d = d->next) {
3678 if ((d->curr_state & DS_FAULTY) &&
3679 d->state_fd >= 0)
3680 /* wait for Removal to happen */
3681 return NULL;
3682 if (d->state_fd >= 0)
3683 failed--;
3684 }
3685
3686 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
3687 inst, failed, a->info.array.raid_disks, a->info.array.level);
3688 if (imsm_check_degraded(super, dev, failed) != IMSM_T_STATE_DEGRADED)
3689 return NULL;
3690
3691 /* For each slot, if it is not working, find a spare */
3692 for (i = 0; i < a->info.array.raid_disks; i++) {
3693 for (d = a->info.devs ; d ; d = d->next)
3694 if (d->disk.raid_disk == i)
3695 break;
3696 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
3697 if (d && (d->state_fd >= 0))
3698 continue;
3699
3700 /*
3701 * OK, this device needs recovery. Try to re-add the
3702 * previous occupant of this slot, if this fails see if
3703 * we can continue the assimilation of a spare that was
3704 * partially assimilated, finally try to activate a new
3705 * spare.
3706 */
3707 dl = imsm_readd(super, i, a);
3708 if (!dl)
3709 dl = imsm_add_spare(super, i, a, 0);
3710 if (!dl)
3711 dl = imsm_add_spare(super, i, a, 1);
3712 if (!dl)
3713 continue;
3714
3715 /* found a usable disk with enough space */
3716 di = malloc(sizeof(*di));
3717 if (!di)
3718 continue;
3719 memset(di, 0, sizeof(*di));
3720
3721 /* dl->index will be -1 in the case we are activating a
3722 * pristine spare. imsm_process_update() will create a
3723 * new index in this case. Once a disk is found to be
3724 * failed in all member arrays it is kicked from the
3725 * metadata
3726 */
3727 di->disk.number = dl->index;
3728
3729 /* (ab)use di->devs to store a pointer to the device
3730 * we chose
3731 */
3732 di->devs = (struct mdinfo *) dl;
3733
3734 di->disk.raid_disk = i;
3735 di->disk.major = dl->major;
3736 di->disk.minor = dl->minor;
3737 di->disk.state = 0;
3738 di->data_offset = __le32_to_cpu(map->pba_of_lba0);
3739 di->component_size = a->info.component_size;
3740 di->container_member = inst;
3741 di->next = rv;
3742 rv = di;
3743 num_spares++;
3744 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
3745 i, di->data_offset);
3746
3747 break;
3748 }
3749
3750 if (!rv)
3751 /* No spares found */
3752 return rv;
3753 /* Now 'rv' has a list of devices to return.
3754 * Create a metadata_update record to update the
3755 * disk_ord_tbl for the array
3756 */
3757 mu = malloc(sizeof(*mu));
3758 if (mu) {
3759 mu->buf = malloc(sizeof(struct imsm_update_activate_spare) * num_spares);
3760 if (mu->buf == NULL) {
3761 free(mu);
3762 mu = NULL;
3763 }
3764 }
3765 if (!mu) {
3766 while (rv) {
3767 struct mdinfo *n = rv->next;
3768
3769 free(rv);
3770 rv = n;
3771 }
3772 return NULL;
3773 }
3774
3775 mu->space = NULL;
3776 mu->len = sizeof(struct imsm_update_activate_spare) * num_spares;
3777 mu->next = *updates;
3778 u = (struct imsm_update_activate_spare *) mu->buf;
3779
3780 for (di = rv ; di ; di = di->next) {
3781 u->type = update_activate_spare;
3782 u->dl = (struct dl *) di->devs;
3783 di->devs = NULL;
3784 u->slot = di->disk.raid_disk;
3785 u->array = inst;
3786 u->next = u + 1;
3787 u++;
3788 }
3789 (u-1)->next = NULL;
3790 *updates = mu;
3791
3792 return rv;
3793 }
3794
3795 static int disks_overlap(struct intel_super *super, int idx, struct imsm_update_create_array *u)
3796 {
3797 struct imsm_dev *dev = get_imsm_dev(super, idx);
3798 struct imsm_map *map = get_imsm_map(dev, 0);
3799 struct imsm_map *new_map = get_imsm_map(&u->dev, 0);
3800 struct disk_info *inf = get_disk_info(u);
3801 struct imsm_disk *disk;
3802 int i;
3803 int j;
3804
3805 for (i = 0; i < map->num_members; i++) {
3806 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i));
3807 for (j = 0; j < new_map->num_members; j++)
3808 if (serialcmp(disk->serial, inf[j].serial) == 0)
3809 return 1;
3810 }
3811
3812 return 0;
3813 }
3814
3815 static void imsm_delete(struct intel_super *super, struct dl **dlp, int index);
3816
3817 static void imsm_process_update(struct supertype *st,
3818 struct metadata_update *update)
3819 {
3820 /**
3821 * crack open the metadata_update envelope to find the update record
3822 * update can be one of:
3823 * update_activate_spare - a spare device has replaced a failed
3824 * device in an array, update the disk_ord_tbl. If this disk is
3825 * present in all member arrays then also clear the SPARE_DISK
3826 * flag
3827 */
3828 struct intel_super *super = st->sb;
3829 struct imsm_super *mpb;
3830 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
3831
3832 /* update requires a larger buf but the allocation failed */
3833 if (super->next_len && !super->next_buf) {
3834 super->next_len = 0;
3835 return;
3836 }
3837
3838 if (super->next_buf) {
3839 memcpy(super->next_buf, super->buf, super->len);
3840 free(super->buf);
3841 super->len = super->next_len;
3842 super->buf = super->next_buf;
3843
3844 super->next_len = 0;
3845 super->next_buf = NULL;
3846 }
3847
3848 mpb = super->anchor;
3849
3850 switch (type) {
3851 case update_activate_spare: {
3852 struct imsm_update_activate_spare *u = (void *) update->buf;
3853 struct imsm_dev *dev = get_imsm_dev(super, u->array);
3854 struct imsm_map *map = get_imsm_map(dev, 0);
3855 struct imsm_map *migr_map;
3856 struct active_array *a;
3857 struct imsm_disk *disk;
3858 __u8 to_state;
3859 struct dl *dl;
3860 unsigned int found;
3861 int failed;
3862 int victim = get_imsm_disk_idx(dev, u->slot);
3863 int i;
3864
3865 for (dl = super->disks; dl; dl = dl->next)
3866 if (dl == u->dl)
3867 break;
3868
3869 if (!dl) {
3870 fprintf(stderr, "error: imsm_activate_spare passed "
3871 "an unknown disk (index: %d)\n",
3872 u->dl->index);
3873 return;
3874 }
3875
3876 super->updates_pending++;
3877
3878 /* count failures (excluding rebuilds and the victim)
3879 * to determine map[0] state
3880 */
3881 failed = 0;
3882 for (i = 0; i < map->num_members; i++) {
3883 if (i == u->slot)
3884 continue;
3885 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i));
3886 if (!disk || disk->status & FAILED_DISK)
3887 failed++;
3888 }
3889
3890 /* adding a pristine spare, assign a new index */
3891 if (dl->index < 0) {
3892 dl->index = super->anchor->num_disks;
3893 super->anchor->num_disks++;
3894 }
3895 disk = &dl->disk;
3896 disk->status |= CONFIGURED_DISK;
3897 disk->status &= ~SPARE_DISK;
3898
3899 /* mark rebuild */
3900 to_state = imsm_check_degraded(super, dev, failed);
3901 map->map_state = IMSM_T_STATE_DEGRADED;
3902 migrate(dev, to_state, MIGR_REBUILD);
3903 migr_map = get_imsm_map(dev, 1);
3904 set_imsm_ord_tbl_ent(map, u->slot, dl->index);
3905 set_imsm_ord_tbl_ent(migr_map, u->slot, dl->index | IMSM_ORD_REBUILD);
3906
3907 /* count arrays using the victim in the metadata */
3908 found = 0;
3909 for (a = st->arrays; a ; a = a->next) {
3910 dev = get_imsm_dev(super, a->info.container_member);
3911 for (i = 0; i < map->num_members; i++)
3912 if (victim == get_imsm_disk_idx(dev, i))
3913 found++;
3914 }
3915
3916 /* delete the victim if it is no longer being
3917 * utilized anywhere
3918 */
3919 if (!found) {
3920 struct dl **dlp;
3921
3922 /* We know that 'manager' isn't touching anything,
3923 * so it is safe to delete
3924 */
3925 for (dlp = &super->disks; *dlp; dlp = &(*dlp)->next)
3926 if ((*dlp)->index == victim)
3927 break;
3928
3929 /* victim may be on the missing list */
3930 if (!*dlp)
3931 for (dlp = &super->missing; *dlp; dlp = &(*dlp)->next)
3932 if ((*dlp)->index == victim)
3933 break;
3934 imsm_delete(super, dlp, victim);
3935 }
3936 break;
3937 }
3938 case update_create_array: {
3939 /* someone wants to create a new array, we need to be aware of
3940 * a few races/collisions:
3941 * 1/ 'Create' called by two separate instances of mdadm
3942 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
3943 * devices that have since been assimilated via
3944 * activate_spare.
3945 * In the event this update can not be carried out mdadm will
3946 * (FIX ME) notice that its update did not take hold.
3947 */
3948 struct imsm_update_create_array *u = (void *) update->buf;
3949 struct intel_dev *dv;
3950 struct imsm_dev *dev;
3951 struct imsm_map *map, *new_map;
3952 unsigned long long start, end;
3953 unsigned long long new_start, new_end;
3954 int i;
3955 struct disk_info *inf;
3956 struct dl *dl;
3957
3958 /* handle racing creates: first come first serve */
3959 if (u->dev_idx < mpb->num_raid_devs) {
3960 dprintf("%s: subarray %d already defined\n",
3961 __func__, u->dev_idx);
3962 goto create_error;
3963 }
3964
3965 /* check update is next in sequence */
3966 if (u->dev_idx != mpb->num_raid_devs) {
3967 dprintf("%s: can not create array %d expected index %d\n",
3968 __func__, u->dev_idx, mpb->num_raid_devs);
3969 goto create_error;
3970 }
3971
3972 new_map = get_imsm_map(&u->dev, 0);
3973 new_start = __le32_to_cpu(new_map->pba_of_lba0);
3974 new_end = new_start + __le32_to_cpu(new_map->blocks_per_member);
3975 inf = get_disk_info(u);
3976
3977 /* handle activate_spare versus create race:
3978 * check to make sure that overlapping arrays do not include
3979 * overalpping disks
3980 */
3981 for (i = 0; i < mpb->num_raid_devs; i++) {
3982 dev = get_imsm_dev(super, i);
3983 map = get_imsm_map(dev, 0);
3984 start = __le32_to_cpu(map->pba_of_lba0);
3985 end = start + __le32_to_cpu(map->blocks_per_member);
3986 if ((new_start >= start && new_start <= end) ||
3987 (start >= new_start && start <= new_end))
3988 /* overlap */;
3989 else
3990 continue;
3991
3992 if (disks_overlap(super, i, u)) {
3993 dprintf("%s: arrays overlap\n", __func__);
3994 goto create_error;
3995 }
3996 }
3997
3998 /* check that prepare update was successful */
3999 if (!update->space) {
4000 dprintf("%s: prepare update failed\n", __func__);
4001 goto create_error;
4002 }
4003
4004 /* check that all disks are still active before committing
4005 * changes. FIXME: could we instead handle this by creating a
4006 * degraded array? That's probably not what the user expects,
4007 * so better to drop this update on the floor.
4008 */
4009 for (i = 0; i < new_map->num_members; i++) {
4010 dl = serial_to_dl(inf[i].serial, super);
4011 if (!dl) {
4012 dprintf("%s: disk disappeared\n", __func__);
4013 goto create_error;
4014 }
4015 }
4016
4017 super->updates_pending++;
4018
4019 /* convert spares to members and fixup ord_tbl */
4020 for (i = 0; i < new_map->num_members; i++) {
4021 dl = serial_to_dl(inf[i].serial, super);
4022 if (dl->index == -1) {
4023 dl->index = mpb->num_disks;
4024 mpb->num_disks++;
4025 dl->disk.status |= CONFIGURED_DISK;
4026 dl->disk.status &= ~SPARE_DISK;
4027 }
4028 set_imsm_ord_tbl_ent(new_map, i, dl->index);
4029 }
4030
4031 dv = update->space;
4032 dev = dv->dev;
4033 update->space = NULL;
4034 imsm_copy_dev(dev, &u->dev);
4035 dv->index = u->dev_idx;
4036 dv->next = super->devlist;
4037 super->devlist = dv;
4038 mpb->num_raid_devs++;
4039
4040 imsm_update_version_info(super);
4041 break;
4042 create_error:
4043 /* mdmon knows how to release update->space, but not
4044 * ((struct intel_dev *) update->space)->dev
4045 */
4046 if (update->space) {
4047 dv = update->space;
4048 free(dv->dev);
4049 }
4050 break;
4051 }
4052 case update_add_disk:
4053
4054 /* we may be able to repair some arrays if disks are
4055 * being added */
4056 if (super->add) {
4057 struct active_array *a;
4058
4059 super->updates_pending++;
4060 for (a = st->arrays; a; a = a->next)
4061 a->check_degraded = 1;
4062 }
4063 /* add some spares to the metadata */
4064 while (super->add) {
4065 struct dl *al;
4066
4067 al = super->add;
4068 super->add = al->next;
4069 al->next = super->disks;
4070 super->disks = al;
4071 dprintf("%s: added %x:%x\n",
4072 __func__, al->major, al->minor);
4073 }
4074
4075 break;
4076 }
4077 }
4078
4079 static void imsm_prepare_update(struct supertype *st,
4080 struct metadata_update *update)
4081 {
4082 /**
4083 * Allocate space to hold new disk entries, raid-device entries or a new
4084 * mpb if necessary. The manager synchronously waits for updates to
4085 * complete in the monitor, so new mpb buffers allocated here can be
4086 * integrated by the monitor thread without worrying about live pointers
4087 * in the manager thread.
4088 */
4089 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
4090 struct intel_super *super = st->sb;
4091 struct imsm_super *mpb = super->anchor;
4092 size_t buf_len;
4093 size_t len = 0;
4094
4095 switch (type) {
4096 case update_create_array: {
4097 struct imsm_update_create_array *u = (void *) update->buf;
4098 struct intel_dev *dv;
4099 struct imsm_dev *dev = &u->dev;
4100 struct imsm_map *map = get_imsm_map(dev, 0);
4101 struct dl *dl;
4102 struct disk_info *inf;
4103 int i;
4104 int activate = 0;
4105
4106 inf = get_disk_info(u);
4107 len = sizeof_imsm_dev(dev, 1);
4108 /* allocate a new super->devlist entry */
4109 dv = malloc(sizeof(*dv));
4110 if (dv) {
4111 dv->dev = malloc(len);
4112 if (dv->dev)
4113 update->space = dv;
4114 else {
4115 free(dv);
4116 update->space = NULL;
4117 }
4118 }
4119
4120 /* count how many spares will be converted to members */
4121 for (i = 0; i < map->num_members; i++) {
4122 dl = serial_to_dl(inf[i].serial, super);
4123 if (!dl) {
4124 /* hmm maybe it failed?, nothing we can do about
4125 * it here
4126 */
4127 continue;
4128 }
4129 if (count_memberships(dl, super) == 0)
4130 activate++;
4131 }
4132 len += activate * sizeof(struct imsm_disk);
4133 break;
4134 default:
4135 break;
4136 }
4137 }
4138
4139 /* check if we need a larger metadata buffer */
4140 if (super->next_buf)
4141 buf_len = super->next_len;
4142 else
4143 buf_len = super->len;
4144
4145 if (__le32_to_cpu(mpb->mpb_size) + len > buf_len) {
4146 /* ok we need a larger buf than what is currently allocated
4147 * if this allocation fails process_update will notice that
4148 * ->next_len is set and ->next_buf is NULL
4149 */
4150 buf_len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + len, 512);
4151 if (super->next_buf)
4152 free(super->next_buf);
4153
4154 super->next_len = buf_len;
4155 if (posix_memalign(&super->next_buf, 512, buf_len) != 0)
4156 super->next_buf = NULL;
4157 }
4158 }
4159
4160 /* must be called while manager is quiesced */
4161 static void imsm_delete(struct intel_super *super, struct dl **dlp, int index)
4162 {
4163 struct imsm_super *mpb = super->anchor;
4164 struct dl *iter;
4165 struct imsm_dev *dev;
4166 struct imsm_map *map;
4167 int i, j, num_members;
4168 __u32 ord;
4169
4170 dprintf("%s: deleting device[%d] from imsm_super\n",
4171 __func__, index);
4172
4173 /* shift all indexes down one */
4174 for (iter = super->disks; iter; iter = iter->next)
4175 if (iter->index > index)
4176 iter->index--;
4177 for (iter = super->missing; iter; iter = iter->next)
4178 if (iter->index > index)
4179 iter->index--;
4180
4181 for (i = 0; i < mpb->num_raid_devs; i++) {
4182 dev = get_imsm_dev(super, i);
4183 map = get_imsm_map(dev, 0);
4184 num_members = map->num_members;
4185 for (j = 0; j < num_members; j++) {
4186 /* update ord entries being careful not to propagate
4187 * ord-flags to the first map
4188 */
4189 ord = get_imsm_ord_tbl_ent(dev, j);
4190
4191 if (ord_to_idx(ord) <= index)
4192 continue;
4193
4194 map = get_imsm_map(dev, 0);
4195 set_imsm_ord_tbl_ent(map, j, ord_to_idx(ord - 1));
4196 map = get_imsm_map(dev, 1);
4197 if (map)
4198 set_imsm_ord_tbl_ent(map, j, ord - 1);
4199 }
4200 }
4201
4202 mpb->num_disks--;
4203 super->updates_pending++;
4204 if (*dlp) {
4205 struct dl *dl = *dlp;
4206
4207 *dlp = (*dlp)->next;
4208 __free_imsm_disk(dl);
4209 }
4210 }
4211 #endif /* MDASSEMBLE */
4212
4213 struct superswitch super_imsm = {
4214 #ifndef MDASSEMBLE
4215 .examine_super = examine_super_imsm,
4216 .brief_examine_super = brief_examine_super_imsm,
4217 .detail_super = detail_super_imsm,
4218 .brief_detail_super = brief_detail_super_imsm,
4219 .write_init_super = write_init_super_imsm,
4220 .validate_geometry = validate_geometry_imsm,
4221 .add_to_super = add_to_super_imsm,
4222 .detail_platform = detail_platform_imsm,
4223 #endif
4224 .match_home = match_home_imsm,
4225 .uuid_from_super= uuid_from_super_imsm,
4226 .getinfo_super = getinfo_super_imsm,
4227 .update_super = update_super_imsm,
4228
4229 .avail_size = avail_size_imsm,
4230
4231 .compare_super = compare_super_imsm,
4232
4233 .load_super = load_super_imsm,
4234 .init_super = init_super_imsm,
4235 .store_super = store_zero_imsm,
4236 .free_super = free_super_imsm,
4237 .match_metadata_desc = match_metadata_desc_imsm,
4238 .container_content = container_content_imsm,
4239 .default_layout = imsm_level_to_layout,
4240
4241 .external = 1,
4242 .name = "imsm",
4243
4244 #ifndef MDASSEMBLE
4245 /* for mdmon */
4246 .open_new = imsm_open_new,
4247 .load_super = load_super_imsm,
4248 .set_array_state= imsm_set_array_state,
4249 .set_disk = imsm_set_disk,
4250 .sync_metadata = imsm_sync_metadata,
4251 .activate_spare = imsm_activate_spare,
4252 .process_update = imsm_process_update,
4253 .prepare_update = imsm_prepare_update,
4254 #endif /* MDASSEMBLE */
4255 };