]> git.ipfire.org Git - thirdparty/mdadm.git/blob - super-intel.c
imsm: include not synced disks in imsm_count_failed
[thirdparty/mdadm.git] / super-intel.c
1 /*
2 * mdadm - Intel(R) Matrix Storage Manager Support
3 *
4 * Copyright (C) 2002-2007 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include "mdadm.h"
21 #include "mdmon.h"
22 #include <values.h>
23 #include <scsi/sg.h>
24 #include <ctype.h>
25
26 /* MPB == Metadata Parameter Block */
27 #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
28 #define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
29 #define MPB_VERSION_RAID0 "1.0.00"
30 #define MPB_VERSION_RAID1 "1.1.00"
31 #define MPB_VERSION_RAID5 "1.2.02"
32 #define MAX_SIGNATURE_LENGTH 32
33 #define MAX_RAID_SERIAL_LEN 16
34 #define MPB_SECTOR_CNT 418
35 #define IMSM_RESERVED_SECTORS 4096
36
37 /* Disk configuration info. */
38 #define IMSM_MAX_DEVICES 255
39 struct imsm_disk {
40 __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */
41 __u32 total_blocks; /* 0xE8 - 0xEB total blocks */
42 __u32 scsi_id; /* 0xEC - 0xEF scsi ID */
43 __u32 status; /* 0xF0 - 0xF3 */
44 #define SPARE_DISK 0x01 /* Spare */
45 #define CONFIGURED_DISK 0x02 /* Member of some RaidDev */
46 #define FAILED_DISK 0x04 /* Permanent failure */
47 #define USABLE_DISK 0x08 /* Fully usable unless FAILED_DISK is set */
48
49 #define IMSM_DISK_FILLERS 5
50 __u32 filler[IMSM_DISK_FILLERS]; /* 0xF4 - 0x107 MPB_DISK_FILLERS for future expansion */
51 };
52
53 /* RAID map configuration infos. */
54 struct imsm_map {
55 __u32 pba_of_lba0; /* start address of partition */
56 __u32 blocks_per_member;/* blocks per member */
57 __u32 num_data_stripes; /* number of data stripes */
58 __u16 blocks_per_strip;
59 __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */
60 #define IMSM_T_STATE_NORMAL 0
61 #define IMSM_T_STATE_UNINITIALIZED 1
62 #define IMSM_T_STATE_DEGRADED 2 /* FIXME: is this correct? */
63 #define IMSM_T_STATE_FAILED 3 /* FIXME: is this correct? */
64 __u8 raid_level;
65 #define IMSM_T_RAID0 0
66 #define IMSM_T_RAID1 1
67 #define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
68 __u8 num_members; /* number of member disks */
69 __u8 reserved[3];
70 __u32 filler[7]; /* expansion area */
71 #define IMSM_ORD_REBUILD (1 << 24)
72 __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members],
73 * top byte contains some flags
74 */
75 } __attribute__ ((packed));
76
77 struct imsm_vol {
78 __u32 reserved[2];
79 __u8 migr_state; /* Normal or Migrating */
80 __u8 migr_type; /* Initializing, Rebuilding, ... */
81 __u8 dirty;
82 __u8 fill[1];
83 __u32 filler[5];
84 struct imsm_map map[1];
85 /* here comes another one if migr_state */
86 } __attribute__ ((packed));
87
88 struct imsm_dev {
89 __u8 volume[MAX_RAID_SERIAL_LEN];
90 __u32 size_low;
91 __u32 size_high;
92 __u32 status; /* Persistent RaidDev status */
93 __u32 reserved_blocks; /* Reserved blocks at beginning of volume */
94 #define IMSM_DEV_FILLERS 12
95 __u32 filler[IMSM_DEV_FILLERS];
96 struct imsm_vol vol;
97 } __attribute__ ((packed));
98
99 struct imsm_super {
100 __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */
101 __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */
102 __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */
103 __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */
104 __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
105 __u32 error_log_size; /* 0x30 - 0x33 in bytes */
106 __u32 attributes; /* 0x34 - 0x37 */
107 __u8 num_disks; /* 0x38 Number of configured disks */
108 __u8 num_raid_devs; /* 0x39 Number of configured volumes */
109 __u8 error_log_pos; /* 0x3A */
110 __u8 fill[1]; /* 0x3B */
111 __u32 cache_size; /* 0x3c - 0x40 in mb */
112 __u32 orig_family_num; /* 0x40 - 0x43 original family num */
113 __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
114 __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
115 #define IMSM_FILLERS 35
116 __u32 filler[IMSM_FILLERS]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */
117 struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
118 /* here comes imsm_dev[num_raid_devs] */
119 /* here comes BBM logs */
120 } __attribute__ ((packed));
121
122 #define BBM_LOG_MAX_ENTRIES 254
123
124 struct bbm_log_entry {
125 __u64 defective_block_start;
126 #define UNREADABLE 0xFFFFFFFF
127 __u32 spare_block_offset;
128 __u16 remapped_marked_count;
129 __u16 disk_ordinal;
130 } __attribute__ ((__packed__));
131
132 struct bbm_log {
133 __u32 signature; /* 0xABADB10C */
134 __u32 entry_count;
135 __u32 reserved_spare_block_count; /* 0 */
136 __u32 reserved; /* 0xFFFF */
137 __u64 first_spare_lba;
138 struct bbm_log_entry mapped_block_entries[BBM_LOG_MAX_ENTRIES];
139 } __attribute__ ((__packed__));
140
141
142 #ifndef MDASSEMBLE
143 static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
144 #endif
145
146 static unsigned int sector_count(__u32 bytes)
147 {
148 return ((bytes + (512-1)) & (~(512-1))) / 512;
149 }
150
151 static unsigned int mpb_sectors(struct imsm_super *mpb)
152 {
153 return sector_count(__le32_to_cpu(mpb->mpb_size));
154 }
155
156 /* internal representation of IMSM metadata */
157 struct intel_super {
158 union {
159 void *buf; /* O_DIRECT buffer for reading/writing metadata */
160 struct imsm_super *anchor; /* immovable parameters */
161 };
162 size_t len; /* size of the 'buf' allocation */
163 int updates_pending; /* count of pending updates for mdmon */
164 int creating_imsm; /* flag to indicate container creation */
165 int current_vol; /* index of raid device undergoing creation */
166 #define IMSM_MAX_RAID_DEVS 2
167 struct imsm_dev *dev_tbl[IMSM_MAX_RAID_DEVS];
168 struct dl {
169 struct dl *next;
170 int index;
171 __u8 serial[MAX_RAID_SERIAL_LEN];
172 int major, minor;
173 char *devname;
174 struct imsm_disk disk;
175 int fd;
176 } *disks;
177 struct bbm_log *bbm_log;
178 };
179
180 struct extent {
181 unsigned long long start, size;
182 };
183
184 /* definition of messages passed to imsm_process_update */
185 enum imsm_update_type {
186 update_activate_spare,
187 update_create_array,
188 };
189
190 struct imsm_update_activate_spare {
191 enum imsm_update_type type;
192 struct dl *dl;
193 int slot;
194 int array;
195 struct imsm_update_activate_spare *next;
196 };
197
198 struct imsm_update_create_array {
199 enum imsm_update_type type;
200 int dev_idx;
201 struct imsm_dev dev;
202 };
203
204 static int imsm_env_devname_as_serial(void)
205 {
206 char *val = getenv("IMSM_DEVNAME_AS_SERIAL");
207
208 if (val && atoi(val) == 1)
209 return 1;
210
211 return 0;
212 }
213
214
215 static struct supertype *match_metadata_desc_imsm(char *arg)
216 {
217 struct supertype *st;
218
219 if (strcmp(arg, "imsm") != 0 &&
220 strcmp(arg, "default") != 0
221 )
222 return NULL;
223
224 st = malloc(sizeof(*st));
225 memset(st, 0, sizeof(*st));
226 st->ss = &super_imsm;
227 st->max_devs = IMSM_MAX_DEVICES;
228 st->minor_version = 0;
229 st->sb = NULL;
230 return st;
231 }
232
233 static __u8 *get_imsm_version(struct imsm_super *mpb)
234 {
235 return &mpb->sig[MPB_SIG_LEN];
236 }
237
238 /* retrieve a disk directly from the anchor when the anchor is known to be
239 * up-to-date, currently only at load time
240 */
241 static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index)
242 {
243 if (index >= mpb->num_disks)
244 return NULL;
245 return &mpb->disk[index];
246 }
247
248 /* retrieve a disk from the parsed metadata */
249 static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index)
250 {
251 struct dl *d;
252
253 for (d = super->disks; d; d = d->next)
254 if (d->index == index)
255 return &d->disk;
256
257 return NULL;
258 }
259
260 /* generate a checksum directly from the anchor when the anchor is known to be
261 * up-to-date, currently only at load or write_super after coalescing
262 */
263 static __u32 __gen_imsm_checksum(struct imsm_super *mpb)
264 {
265 __u32 end = mpb->mpb_size / sizeof(end);
266 __u32 *p = (__u32 *) mpb;
267 __u32 sum = 0;
268
269 while (end--)
270 sum += __le32_to_cpu(*p++);
271
272 return sum - __le32_to_cpu(mpb->check_sum);
273 }
274
275 static size_t sizeof_imsm_map(struct imsm_map *map)
276 {
277 return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1);
278 }
279
280 struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
281 {
282 struct imsm_map *map = &dev->vol.map[0];
283
284 if (second_map && !dev->vol.migr_state)
285 return NULL;
286 else if (second_map) {
287 void *ptr = map;
288
289 return ptr + sizeof_imsm_map(map);
290 } else
291 return map;
292
293 }
294
295 /* return the size of the device.
296 * migr_state increases the returned size if map[0] were to be duplicated
297 */
298 static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state)
299 {
300 size_t size = sizeof(*dev) - sizeof(struct imsm_map) +
301 sizeof_imsm_map(get_imsm_map(dev, 0));
302
303 /* migrating means an additional map */
304 if (dev->vol.migr_state)
305 size += sizeof_imsm_map(get_imsm_map(dev, 1));
306 else if (migr_state)
307 size += sizeof_imsm_map(get_imsm_map(dev, 0));
308
309 return size;
310 }
311
312 static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index)
313 {
314 int offset;
315 int i;
316 void *_mpb = mpb;
317
318 if (index >= mpb->num_raid_devs)
319 return NULL;
320
321 /* devices start after all disks */
322 offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb;
323
324 for (i = 0; i <= index; i++)
325 if (i == index)
326 return _mpb + offset;
327 else
328 offset += sizeof_imsm_dev(_mpb + offset, 0);
329
330 return NULL;
331 }
332
333 static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index)
334 {
335 if (index >= super->anchor->num_raid_devs)
336 return NULL;
337 return super->dev_tbl[index];
338 }
339
340 static __u32 get_imsm_disk_idx(struct imsm_map *map, int slot)
341 {
342 __u32 *ord_tbl = &map->disk_ord_tbl[slot];
343
344 /* top byte identifies disk under rebuild
345 * why not just use the USABLE bit... oh well.
346 */
347 return __le32_to_cpu(*ord_tbl & ~(0xff << 24));
348 }
349
350 static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev, int slot)
351 {
352 struct imsm_map *map;
353
354 if (dev->vol.migr_state)
355 map = get_imsm_map(dev, 0);
356 else
357 map = get_imsm_map(dev, 1);
358
359 return map->disk_ord_tbl[slot];
360 }
361
362 static int get_imsm_raid_level(struct imsm_map *map)
363 {
364 if (map->raid_level == 1) {
365 if (map->num_members == 2)
366 return 1;
367 else
368 return 10;
369 }
370
371 return map->raid_level;
372 }
373
374 static int cmp_extent(const void *av, const void *bv)
375 {
376 const struct extent *a = av;
377 const struct extent *b = bv;
378 if (a->start < b->start)
379 return -1;
380 if (a->start > b->start)
381 return 1;
382 return 0;
383 }
384
385 static struct extent *get_extents(struct intel_super *super, struct dl *dl)
386 {
387 /* find a list of used extents on the given physical device */
388 struct extent *rv, *e;
389 int i, j;
390 int memberships = 0;
391
392 for (i = 0; i < super->anchor->num_raid_devs; i++) {
393 struct imsm_dev *dev = get_imsm_dev(super, i);
394 struct imsm_map *map = get_imsm_map(dev, 0);
395
396 for (j = 0; j < map->num_members; j++) {
397 __u32 index = get_imsm_disk_idx(map, j);
398
399 if (index == dl->index)
400 memberships++;
401 }
402 }
403 rv = malloc(sizeof(struct extent) * (memberships + 1));
404 if (!rv)
405 return NULL;
406 e = rv;
407
408 for (i = 0; i < super->anchor->num_raid_devs; i++) {
409 struct imsm_dev *dev = get_imsm_dev(super, i);
410 struct imsm_map *map = get_imsm_map(dev, 0);
411
412 for (j = 0; j < map->num_members; j++) {
413 __u32 index = get_imsm_disk_idx(map, j);
414
415 if (index == dl->index) {
416 e->start = __le32_to_cpu(map->pba_of_lba0);
417 e->size = __le32_to_cpu(map->blocks_per_member);
418 e++;
419 }
420 }
421 }
422 qsort(rv, memberships, sizeof(*rv), cmp_extent);
423
424 e->start = __le32_to_cpu(dl->disk.total_blocks) -
425 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
426 e->size = 0;
427 return rv;
428 }
429
430 #ifndef MDASSEMBLE
431 static void print_imsm_dev(struct imsm_dev *dev, int index)
432 {
433 __u64 sz;
434 int slot;
435 struct imsm_map *map = get_imsm_map(dev, 0);
436
437 printf("\n");
438 printf("[%s]:\n", dev->volume);
439 printf(" RAID Level : %d\n", get_imsm_raid_level(map));
440 printf(" Members : %d\n", map->num_members);
441 for (slot = 0; slot < map->num_members; slot++)
442 if (index == get_imsm_disk_idx(map, slot))
443 break;
444 if (slot < map->num_members)
445 printf(" This Slot : %d\n", slot);
446 else
447 printf(" This Slot : ?\n");
448 sz = __le32_to_cpu(dev->size_high);
449 sz <<= 32;
450 sz += __le32_to_cpu(dev->size_low);
451 printf(" Array Size : %llu%s\n", (unsigned long long)sz,
452 human_size(sz * 512));
453 sz = __le32_to_cpu(map->blocks_per_member);
454 printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz,
455 human_size(sz * 512));
456 printf(" Sector Offset : %u\n",
457 __le32_to_cpu(map->pba_of_lba0));
458 printf(" Num Stripes : %u\n",
459 __le32_to_cpu(map->num_data_stripes));
460 printf(" Chunk Size : %u KiB\n",
461 __le16_to_cpu(map->blocks_per_strip) / 2);
462 printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
463 printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle");
464 if (dev->vol.migr_state)
465 printf(": %s", dev->vol.migr_type ? "rebuilding" : "initializing");
466 printf("\n");
467 printf(" Map State : %s", map_state_str[map->map_state]);
468 if (dev->vol.migr_state) {
469 struct imsm_map *map = get_imsm_map(dev, 1);
470 printf(", %s", map_state_str[map->map_state]);
471 }
472 printf("\n");
473 printf(" Dirty State : %s\n", dev->vol.dirty ? "dirty" : "clean");
474 }
475
476 static void print_imsm_disk(struct imsm_super *mpb, int index)
477 {
478 struct imsm_disk *disk = __get_imsm_disk(mpb, index);
479 char str[MAX_RAID_SERIAL_LEN];
480 __u32 s;
481 __u64 sz;
482
483 if (index < 0)
484 return;
485
486 printf("\n");
487 snprintf(str, MAX_RAID_SERIAL_LEN, "%s", disk->serial);
488 printf(" Disk%02d Serial : %s\n", index, str);
489 s = __le32_to_cpu(disk->status);
490 printf(" State :%s%s%s%s\n", s&SPARE_DISK ? " spare" : "",
491 s&CONFIGURED_DISK ? " active" : "",
492 s&FAILED_DISK ? " failed" : "",
493 s&USABLE_DISK ? " usable" : "");
494 printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id));
495 sz = __le32_to_cpu(disk->total_blocks) -
496 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS * mpb->num_raid_devs);
497 printf(" Usable Size : %llu%s\n", (unsigned long long)sz,
498 human_size(sz * 512));
499 }
500
501 static void examine_super_imsm(struct supertype *st, char *homehost)
502 {
503 struct intel_super *super = st->sb;
504 struct imsm_super *mpb = super->anchor;
505 char str[MAX_SIGNATURE_LENGTH];
506 int i;
507 __u32 sum;
508
509 snprintf(str, MPB_SIG_LEN, "%s", mpb->sig);
510 printf(" Magic : %s\n", str);
511 snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
512 printf(" Version : %s\n", get_imsm_version(mpb));
513 printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
514 printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num));
515 sum = __le32_to_cpu(mpb->check_sum);
516 printf(" Checksum : %08x %s\n", sum,
517 __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect");
518 printf(" MPB Sectors : %d\n", mpb_sectors(mpb));
519 printf(" Disks : %d\n", mpb->num_disks);
520 printf(" RAID Devices : %d\n", mpb->num_raid_devs);
521 print_imsm_disk(mpb, super->disks->index);
522 if (super->bbm_log) {
523 struct bbm_log *log = super->bbm_log;
524
525 printf("\n");
526 printf("Bad Block Management Log:\n");
527 printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size));
528 printf(" Signature : %x\n", __le32_to_cpu(log->signature));
529 printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count));
530 printf(" Spare Blocks : %d\n", __le32_to_cpu(log->reserved_spare_block_count));
531 printf(" First Spare : %llx\n", __le64_to_cpu(log->first_spare_lba));
532 }
533 for (i = 0; i < mpb->num_raid_devs; i++)
534 print_imsm_dev(__get_imsm_dev(mpb, i), super->disks->index);
535 for (i = 0; i < mpb->num_disks; i++) {
536 if (i == super->disks->index)
537 continue;
538 print_imsm_disk(mpb, i);
539 }
540 }
541
542 static void brief_examine_super_imsm(struct supertype *st)
543 {
544 printf("ARRAY /dev/imsm metadata=imsm\n");
545 }
546
547 static void detail_super_imsm(struct supertype *st, char *homehost)
548 {
549 printf("%s\n", __FUNCTION__);
550 }
551
552 static void brief_detail_super_imsm(struct supertype *st)
553 {
554 printf("%s\n", __FUNCTION__);
555 }
556 #endif
557
558 static int match_home_imsm(struct supertype *st, char *homehost)
559 {
560 printf("%s\n", __FUNCTION__);
561
562 return 0;
563 }
564
565 static void uuid_from_super_imsm(struct supertype *st, int uuid[4])
566 {
567 printf("%s\n", __FUNCTION__);
568 }
569
570 #if 0
571 static void
572 get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p)
573 {
574 __u8 *v = get_imsm_version(mpb);
575 __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH;
576 char major[] = { 0, 0, 0 };
577 char minor[] = { 0 ,0, 0 };
578 char patch[] = { 0, 0, 0 };
579 char *ver_parse[] = { major, minor, patch };
580 int i, j;
581
582 i = j = 0;
583 while (*v != '\0' && v < end) {
584 if (*v != '.' && j < 2)
585 ver_parse[i][j++] = *v;
586 else {
587 i++;
588 j = 0;
589 }
590 v++;
591 }
592
593 *m = strtol(minor, NULL, 0);
594 *p = strtol(patch, NULL, 0);
595 }
596 #endif
597
598 static int imsm_level_to_layout(int level)
599 {
600 switch (level) {
601 case 0:
602 case 1:
603 return 0;
604 case 5:
605 case 6:
606 return ALGORITHM_LEFT_ASYMMETRIC;
607 case 10:
608 return 0x102; //FIXME is this correct?
609 }
610 return -1;
611 }
612
613 static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info)
614 {
615 struct intel_super *super = st->sb;
616 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
617 struct imsm_map *map = get_imsm_map(dev, 0);
618
619 info->container_member = super->current_vol;
620 info->array.raid_disks = map->num_members;
621 info->array.level = get_imsm_raid_level(map);
622 info->array.layout = imsm_level_to_layout(info->array.level);
623 info->array.md_minor = -1;
624 info->array.ctime = 0;
625 info->array.utime = 0;
626 info->array.chunk_size = __le16_to_cpu(map->blocks_per_strip * 512);
627
628 info->data_offset = __le32_to_cpu(map->pba_of_lba0);
629 info->component_size = __le32_to_cpu(map->blocks_per_member);
630
631 info->disk.major = 0;
632 info->disk.minor = 0;
633
634 sprintf(info->text_version, "/%s/%d",
635 devnum2devname(st->container_dev),
636 info->container_member);
637 }
638
639
640 static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info)
641 {
642 struct intel_super *super = st->sb;
643 struct imsm_disk *disk;
644 __u32 s;
645
646 if (super->current_vol >= 0) {
647 getinfo_super_imsm_volume(st, info);
648 return;
649 }
650
651 /* Set raid_disks to zero so that Assemble will always pull in valid
652 * spares
653 */
654 info->array.raid_disks = 0;
655 info->array.level = LEVEL_CONTAINER;
656 info->array.layout = 0;
657 info->array.md_minor = -1;
658 info->array.ctime = 0; /* N/A for imsm */
659 info->array.utime = 0;
660 info->array.chunk_size = 0;
661
662 info->disk.major = 0;
663 info->disk.minor = 0;
664 info->disk.raid_disk = -1;
665 info->reshape_active = 0;
666 strcpy(info->text_version, "imsm");
667 info->disk.number = -1;
668 info->disk.state = 0;
669
670 if (super->disks) {
671 disk = &super->disks->disk;
672 info->disk.number = super->disks->index;
673 info->disk.raid_disk = super->disks->index;
674 info->data_offset = __le32_to_cpu(disk->total_blocks) -
675 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
676 info->component_size = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
677 s = __le32_to_cpu(disk->status);
678 info->disk.state = s & CONFIGURED_DISK ? (1 << MD_DISK_ACTIVE) : 0;
679 info->disk.state |= s & FAILED_DISK ? (1 << MD_DISK_FAULTY) : 0;
680 info->disk.state |= s & USABLE_DISK ? (1 << MD_DISK_SYNC) : 0;
681 }
682 }
683
684 static int update_super_imsm(struct supertype *st, struct mdinfo *info,
685 char *update, char *devname, int verbose,
686 int uuid_set, char *homehost)
687 {
688 /* FIXME */
689
690 /* For 'assemble' and 'force' we need to return non-zero if any
691 * change was made. For others, the return value is ignored.
692 * Update options are:
693 * force-one : This device looks a bit old but needs to be included,
694 * update age info appropriately.
695 * assemble: clear any 'faulty' flag to allow this device to
696 * be assembled.
697 * force-array: Array is degraded but being forced, mark it clean
698 * if that will be needed to assemble it.
699 *
700 * newdev: not used ????
701 * grow: Array has gained a new device - this is currently for
702 * linear only
703 * resync: mark as dirty so a resync will happen.
704 * name: update the name - preserving the homehost
705 *
706 * Following are not relevant for this imsm:
707 * sparc2.2 : update from old dodgey metadata
708 * super-minor: change the preferred_minor number
709 * summaries: update redundant counters.
710 * uuid: Change the uuid of the array to match watch is given
711 * homehost: update the recorded homehost
712 * _reshape_progress: record new reshape_progress position.
713 */
714 int rv = 0;
715 //struct intel_super *super = st->sb;
716 //struct imsm_super *mpb = super->mpb;
717
718 if (strcmp(update, "grow") == 0) {
719 }
720 if (strcmp(update, "resync") == 0) {
721 /* dev->vol.dirty = 1; */
722 }
723
724 /* IMSM has no concept of UUID or homehost */
725
726 return rv;
727 }
728
729 static size_t disks_to_mpb_size(int disks)
730 {
731 size_t size;
732
733 size = sizeof(struct imsm_super);
734 size += (disks - 1) * sizeof(struct imsm_disk);
735 size += 2 * sizeof(struct imsm_dev);
736 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
737 size += (4 - 2) * sizeof(struct imsm_map);
738 /* 4 possible disk_ord_tbl's */
739 size += 4 * (disks - 1) * sizeof(__u32);
740
741 return size;
742 }
743
744 static __u64 avail_size_imsm(struct supertype *st, __u64 devsize)
745 {
746 if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS))
747 return 0;
748
749 return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
750 }
751
752 static int compare_super_imsm(struct supertype *st, struct supertype *tst)
753 {
754 /*
755 * return:
756 * 0 same, or first was empty, and second was copied
757 * 1 second had wrong number
758 * 2 wrong uuid
759 * 3 wrong other info
760 */
761 struct intel_super *first = st->sb;
762 struct intel_super *sec = tst->sb;
763
764 if (!first) {
765 st->sb = tst->sb;
766 tst->sb = NULL;
767 return 0;
768 }
769
770 if (memcmp(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH) != 0)
771 return 3;
772
773 /* if an anchor does not have num_raid_devs set then it is a free
774 * floating spare
775 */
776 if (first->anchor->num_raid_devs > 0 &&
777 sec->anchor->num_raid_devs > 0) {
778 if (first->anchor->family_num != sec->anchor->family_num)
779 return 3;
780 if (first->anchor->mpb_size != sec->anchor->mpb_size)
781 return 3;
782 if (first->anchor->check_sum != sec->anchor->check_sum)
783 return 3;
784 }
785
786 return 0;
787 }
788
789 static void fd2devname(int fd, char *name)
790 {
791 struct stat st;
792 char path[256];
793 char dname[100];
794 char *nm;
795 int rv;
796
797 name[0] = '\0';
798 if (fstat(fd, &st) != 0)
799 return;
800 sprintf(path, "/sys/dev/block/%d:%d",
801 major(st.st_rdev), minor(st.st_rdev));
802
803 rv = readlink(path, dname, sizeof(dname));
804 if (rv <= 0)
805 return;
806
807 dname[rv] = '\0';
808 nm = strrchr(dname, '/');
809 nm++;
810 snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
811 }
812
813
814 extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
815
816 static int imsm_read_serial(int fd, char *devname,
817 __u8 serial[MAX_RAID_SERIAL_LEN])
818 {
819 unsigned char scsi_serial[255];
820 int rv;
821 int rsp_len;
822 int i, cnt;
823
824 memset(scsi_serial, 0, sizeof(scsi_serial));
825
826 if (imsm_env_devname_as_serial()) {
827 char name[MAX_RAID_SERIAL_LEN];
828
829 fd2devname(fd, name);
830 strcpy((char *) serial, name);
831 return 0;
832 }
833
834 rv = scsi_get_serial(fd, scsi_serial, sizeof(scsi_serial));
835
836 if (rv != 0) {
837 if (devname)
838 fprintf(stderr,
839 Name ": Failed to retrieve serial for %s\n",
840 devname);
841 return rv;
842 }
843
844 rsp_len = scsi_serial[3];
845 for (i = 0, cnt = 0; i < rsp_len; i++) {
846 if (!isspace(scsi_serial[4 + i]))
847 serial[cnt++] = scsi_serial[4 + i];
848 if (cnt == MAX_RAID_SERIAL_LEN)
849 break;
850 }
851
852 serial[MAX_RAID_SERIAL_LEN - 1] = '\0';
853
854 return 0;
855 }
856
857 static int
858 load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
859 {
860 struct dl *dl;
861 struct stat stb;
862 int rv;
863 int i;
864 int alloc = 1;
865 __u8 serial[MAX_RAID_SERIAL_LEN];
866
867 rv = imsm_read_serial(fd, devname, serial);
868
869 if (rv != 0)
870 return 2;
871
872 /* check if this is a disk we have seen before. it may be a spare in
873 * super->disks while the current anchor believes it is a raid member,
874 * check if we need to update dl->index
875 */
876 for (dl = super->disks; dl; dl = dl->next)
877 if (memcmp(dl->serial, serial, MAX_RAID_SERIAL_LEN) == 0)
878 break;
879
880 if (!dl)
881 dl = malloc(sizeof(*dl));
882 else
883 alloc = 0;
884
885 if (!dl) {
886 if (devname)
887 fprintf(stderr,
888 Name ": failed to allocate disk buffer for %s\n",
889 devname);
890 return 2;
891 }
892
893 if (alloc) {
894 fstat(fd, &stb);
895 dl->major = major(stb.st_rdev);
896 dl->minor = minor(stb.st_rdev);
897 dl->next = super->disks;
898 dl->fd = keep_fd ? fd : -1;
899 dl->devname = devname ? strdup(devname) : NULL;
900 strncpy((char *) dl->serial, (char *) serial, MAX_RAID_SERIAL_LEN);
901 } else if (keep_fd) {
902 close(dl->fd);
903 dl->fd = fd;
904 }
905
906 /* look up this disk's index in the current anchor */
907 for (i = 0; i < super->anchor->num_disks; i++) {
908 struct imsm_disk *disk_iter;
909
910 disk_iter = __get_imsm_disk(super->anchor, i);
911
912 if (memcmp(disk_iter->serial, dl->serial,
913 MAX_RAID_SERIAL_LEN) == 0) {
914 __u32 status;
915
916 dl->disk = *disk_iter;
917 status = __le32_to_cpu(dl->disk.status);
918 /* only set index on disks that are a member of a
919 * populated contianer, i.e. one with raid_devs
920 */
921 if (status & SPARE_DISK)
922 dl->index = -1;
923 else
924 dl->index = i;
925 break;
926 }
927 }
928
929 if (i == super->anchor->num_disks && alloc) {
930 if (devname)
931 fprintf(stderr,
932 Name ": failed to load disk with serial \'%s\' for %s\n",
933 dl->serial, devname);
934 free(dl);
935 return 1;
936 }
937 if (i == super->anchor->num_disks && dl->index >= 0) {
938 if (devname)
939 fprintf(stderr,
940 Name ": confused... disk %d with serial \'%s\' "
941 "is not listed in the current anchor\n",
942 dl->index, dl->serial);
943 return 1;
944 }
945
946 if (alloc)
947 super->disks = dl;
948
949 return 0;
950 }
951
952 static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src)
953 {
954 memcpy(dest, src, sizeof_imsm_dev(src, 0));
955 }
956
957 static void dup_map(struct imsm_dev *dev)
958 {
959 struct imsm_map *dest = get_imsm_map(dev, 1);
960 struct imsm_map *src = get_imsm_map(dev, 0);
961
962 memcpy(dest, src, sizeof_imsm_map(src));
963 }
964
965 static int parse_raid_devices(struct intel_super *super)
966 {
967 int i;
968 struct imsm_dev *dev_new;
969 size_t len;
970
971 for (i = 0; i < super->anchor->num_raid_devs; i++) {
972 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
973
974 len = sizeof_imsm_dev(dev_iter, 1);
975 dev_new = malloc(len);
976 if (!dev_new)
977 return 1;
978 imsm_copy_dev(dev_new, dev_iter);
979 super->dev_tbl[i] = dev_new;
980 }
981
982 return 0;
983 }
984
985 /* retrieve a pointer to the bbm log which starts after all raid devices */
986 struct bbm_log *__get_imsm_bbm_log(struct imsm_super *mpb)
987 {
988 void *ptr = NULL;
989
990 if (__le32_to_cpu(mpb->bbm_log_size)) {
991 ptr = mpb;
992 ptr += mpb->mpb_size - __le32_to_cpu(mpb->bbm_log_size);
993 }
994
995 return ptr;
996 }
997
998 static void __free_imsm(struct intel_super *super, int free_disks);
999
1000 /* load_imsm_mpb - read matrix metadata
1001 * allocates super->mpb to be freed by free_super
1002 */
1003 static int load_imsm_mpb(int fd, struct intel_super *super, char *devname)
1004 {
1005 unsigned long long dsize;
1006 unsigned long long sectors;
1007 struct stat;
1008 struct imsm_super *anchor;
1009 __u32 check_sum;
1010 int rc;
1011
1012 get_dev_size(fd, NULL, &dsize);
1013
1014 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) {
1015 if (devname)
1016 fprintf(stderr,
1017 Name ": Cannot seek to anchor block on %s: %s\n",
1018 devname, strerror(errno));
1019 return 1;
1020 }
1021
1022 if (posix_memalign((void**)&anchor, 512, 512) != 0) {
1023 if (devname)
1024 fprintf(stderr,
1025 Name ": Failed to allocate imsm anchor buffer"
1026 " on %s\n", devname);
1027 return 1;
1028 }
1029 if (read(fd, anchor, 512) != 512) {
1030 if (devname)
1031 fprintf(stderr,
1032 Name ": Cannot read anchor block on %s: %s\n",
1033 devname, strerror(errno));
1034 free(anchor);
1035 return 1;
1036 }
1037
1038 if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) {
1039 if (devname)
1040 fprintf(stderr,
1041 Name ": no IMSM anchor on %s\n", devname);
1042 free(anchor);
1043 return 2;
1044 }
1045
1046 __free_imsm(super, 0);
1047 super->len = __le32_to_cpu(anchor->mpb_size);
1048 super->len = ROUND_UP(anchor->mpb_size, 512);
1049 if (posix_memalign(&super->buf, 512, super->len) != 0) {
1050 if (devname)
1051 fprintf(stderr,
1052 Name ": unable to allocate %zu byte mpb buffer\n",
1053 super->len);
1054 free(anchor);
1055 return 2;
1056 }
1057 memcpy(super->buf, anchor, 512);
1058
1059 sectors = mpb_sectors(anchor) - 1;
1060 free(anchor);
1061 if (!sectors) {
1062 rc = load_imsm_disk(fd, super, devname, 0);
1063 if (rc == 0)
1064 rc = parse_raid_devices(super);
1065 return rc;
1066 }
1067
1068 /* read the extended mpb */
1069 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0) {
1070 if (devname)
1071 fprintf(stderr,
1072 Name ": Cannot seek to extended mpb on %s: %s\n",
1073 devname, strerror(errno));
1074 return 1;
1075 }
1076
1077 if (read(fd, super->buf + 512, super->len - 512) != super->len - 512) {
1078 if (devname)
1079 fprintf(stderr,
1080 Name ": Cannot read extended mpb on %s: %s\n",
1081 devname, strerror(errno));
1082 return 2;
1083 }
1084
1085 check_sum = __gen_imsm_checksum(super->anchor);
1086 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
1087 if (devname)
1088 fprintf(stderr,
1089 Name ": IMSM checksum %x != %x on %s\n",
1090 check_sum, __le32_to_cpu(super->anchor->check_sum),
1091 devname);
1092 return 2;
1093 }
1094
1095 /* FIXME the BBM log is disk specific so we cannot use this global
1096 * buffer for all disks. Ok for now since we only look at the global
1097 * bbm_log_size parameter to gate assembly
1098 */
1099 super->bbm_log = __get_imsm_bbm_log(super->anchor);
1100
1101 rc = load_imsm_disk(fd, super, devname, 0);
1102 if (rc == 0)
1103 rc = parse_raid_devices(super);
1104 return rc;
1105 }
1106
1107 static void free_imsm_disks(struct intel_super *super)
1108 {
1109 while (super->disks) {
1110 struct dl *d = super->disks;
1111
1112 super->disks = d->next;
1113 if (d->fd >= 0)
1114 close(d->fd);
1115 if (d->devname)
1116 free(d->devname);
1117 free(d);
1118 }
1119 }
1120
1121 /* free all the pieces hanging off of a super pointer */
1122 static void __free_imsm(struct intel_super *super, int free_disks)
1123 {
1124 int i;
1125
1126 if (super->buf) {
1127 free(super->buf);
1128 super->buf = NULL;
1129 }
1130 if (free_disks)
1131 free_imsm_disks(super);
1132 for (i = 0; i < IMSM_MAX_RAID_DEVS; i++)
1133 if (super->dev_tbl[i]) {
1134 free(super->dev_tbl[i]);
1135 super->dev_tbl[i] = NULL;
1136 }
1137 }
1138
1139 static void free_imsm(struct intel_super *super)
1140 {
1141 __free_imsm(super, 1);
1142 free(super);
1143 }
1144
1145 static void free_super_imsm(struct supertype *st)
1146 {
1147 struct intel_super *super = st->sb;
1148
1149 if (!super)
1150 return;
1151
1152 free_imsm(super);
1153 st->sb = NULL;
1154 }
1155
1156 static struct intel_super *alloc_super(int creating_imsm)
1157 {
1158 struct intel_super *super = malloc(sizeof(*super));
1159
1160 if (super) {
1161 memset(super, 0, sizeof(*super));
1162 super->creating_imsm = creating_imsm;
1163 super->current_vol = -1;
1164 }
1165
1166 return super;
1167 }
1168
1169 #ifndef MDASSEMBLE
1170 static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
1171 char *devname, int keep_fd)
1172 {
1173 struct mdinfo *sra;
1174 struct intel_super *super;
1175 struct mdinfo *sd, *best = NULL;
1176 __u32 bestgen = 0;
1177 __u32 gen;
1178 char nm[20];
1179 int dfd;
1180 int rv;
1181
1182 /* check if this disk is a member of an active array */
1183 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
1184 if (!sra)
1185 return 1;
1186
1187 if (sra->array.major_version != -1 ||
1188 sra->array.minor_version != -2 ||
1189 strcmp(sra->text_version, "imsm") != 0)
1190 return 1;
1191
1192 super = alloc_super(0);
1193 if (!super)
1194 return 1;
1195
1196 /* find the most up to date disk in this array, skipping spares */
1197 for (sd = sra->devs; sd; sd = sd->next) {
1198 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
1199 dfd = dev_open(nm, keep_fd ? O_RDWR : O_RDONLY);
1200 if (!dfd) {
1201 free_imsm(super);
1202 return 2;
1203 }
1204 rv = load_imsm_mpb(dfd, super, NULL);
1205 if (!keep_fd)
1206 close(dfd);
1207 if (rv == 0) {
1208 if (super->anchor->num_raid_devs == 0)
1209 gen = 0;
1210 else
1211 gen = __le32_to_cpu(super->anchor->generation_num);
1212 if (!best || gen > bestgen) {
1213 bestgen = gen;
1214 best = sd;
1215 }
1216 } else {
1217 free_imsm(super);
1218 return 2;
1219 }
1220 }
1221
1222 if (!best) {
1223 free_imsm(super);
1224 return 1;
1225 }
1226
1227 /* load the most up to date anchor */
1228 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
1229 dfd = dev_open(nm, O_RDONLY);
1230 if (!dfd) {
1231 free_imsm(super);
1232 return 1;
1233 }
1234 rv = load_imsm_mpb(dfd, super, NULL);
1235 close(dfd);
1236 if (rv != 0) {
1237 free_imsm(super);
1238 return 2;
1239 }
1240
1241 /* re-parse the disk list with the current anchor */
1242 for (sd = sra->devs ; sd ; sd = sd->next) {
1243 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
1244 dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
1245 if (!dfd) {
1246 free_imsm(super);
1247 return 2;
1248 }
1249 load_imsm_disk(dfd, super, NULL, keep_fd);
1250 if (!keep_fd)
1251 close(dfd);
1252 }
1253
1254 if (st->subarray[0]) {
1255 if (atoi(st->subarray) <= super->anchor->num_raid_devs)
1256 super->current_vol = atoi(st->subarray);
1257 else
1258 return 1;
1259 }
1260
1261 *sbp = super;
1262 if (st->ss == NULL) {
1263 st->ss = &super_imsm;
1264 st->minor_version = 0;
1265 st->max_devs = IMSM_MAX_DEVICES;
1266 st->container_dev = fd2devnum(fd);
1267 }
1268
1269 return 0;
1270 }
1271 #endif
1272
1273 static int load_super_imsm(struct supertype *st, int fd, char *devname)
1274 {
1275 struct intel_super *super;
1276 int rv;
1277
1278 #ifndef MDASSEMBLE
1279 if (load_super_imsm_all(st, fd, &st->sb, devname, 1) == 0)
1280 return 0;
1281 #endif
1282 if (st->subarray[0])
1283 return 1; /* FIXME */
1284
1285 super = alloc_super(0);
1286 if (!super) {
1287 fprintf(stderr,
1288 Name ": malloc of %zu failed.\n",
1289 sizeof(*super));
1290 return 1;
1291 }
1292
1293 rv = load_imsm_mpb(fd, super, devname);
1294
1295 if (rv) {
1296 if (devname)
1297 fprintf(stderr,
1298 Name ": Failed to load all information "
1299 "sections on %s\n", devname);
1300 free_imsm(super);
1301 return rv;
1302 }
1303
1304 st->sb = super;
1305 if (st->ss == NULL) {
1306 st->ss = &super_imsm;
1307 st->minor_version = 0;
1308 st->max_devs = IMSM_MAX_DEVICES;
1309 }
1310
1311 return 0;
1312 }
1313
1314 static __u16 info_to_blocks_per_strip(mdu_array_info_t *info)
1315 {
1316 if (info->level == 1)
1317 return 128;
1318 return info->chunk_size >> 9;
1319 }
1320
1321 static __u32 info_to_num_data_stripes(mdu_array_info_t *info)
1322 {
1323 __u32 num_stripes;
1324
1325 num_stripes = (info->size * 2) / info_to_blocks_per_strip(info);
1326 if (info->level == 1)
1327 num_stripes /= 2;
1328
1329 return num_stripes;
1330 }
1331
1332 static __u32 info_to_blocks_per_member(mdu_array_info_t *info)
1333 {
1334 return (info->size * 2) & ~(info_to_blocks_per_strip(info) - 1);
1335 }
1336
1337 static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
1338 unsigned long long size, char *name,
1339 char *homehost, int *uuid)
1340 {
1341 /* We are creating a volume inside a pre-existing container.
1342 * so st->sb is already set.
1343 */
1344 struct intel_super *super = st->sb;
1345 struct imsm_super *mpb = super->anchor;
1346 struct imsm_dev *dev;
1347 struct imsm_vol *vol;
1348 struct imsm_map *map;
1349 int idx = mpb->num_raid_devs;
1350 int i;
1351 unsigned long long array_blocks;
1352 __u32 offset = 0;
1353 size_t size_old, size_new;
1354
1355 if (mpb->num_raid_devs >= 2) {
1356 fprintf(stderr, Name": This imsm-container already has the "
1357 "maximum of 2 volumes\n");
1358 return 0;
1359 }
1360
1361 /* ensure the mpb is large enough for the new data */
1362 size_old = __le32_to_cpu(mpb->mpb_size);
1363 size_new = disks_to_mpb_size(info->nr_disks);
1364 if (size_new > size_old) {
1365 void *mpb_new;
1366 size_t size_round = ROUND_UP(size_new, 512);
1367
1368 if (posix_memalign(&mpb_new, 512, size_round) != 0) {
1369 fprintf(stderr, Name": could not allocate new mpb\n");
1370 return 0;
1371 }
1372 memcpy(mpb_new, mpb, size_old);
1373 free(mpb);
1374 mpb = mpb_new;
1375 super->anchor = mpb_new;
1376 mpb->mpb_size = __cpu_to_le32(size_new);
1377 memset(mpb_new + size_old, 0, size_round - size_old);
1378 }
1379 super->current_vol = idx;
1380 /* when creating the first raid device in this container set num_disks
1381 * to zero, i.e. delete this spare and add raid member devices in
1382 * add_to_super_imsm_volume()
1383 */
1384 if (super->current_vol == 0)
1385 mpb->num_disks = 0;
1386 sprintf(st->subarray, "%d", idx);
1387 dev = malloc(sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
1388 if (!dev) {
1389 fprintf(stderr, Name": could not allocate raid device\n");
1390 return 0;
1391 }
1392 strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
1393 array_blocks = calc_array_size(info->level, info->raid_disks,
1394 info->layout, info->chunk_size,
1395 info->size*2);
1396 dev->size_low = __cpu_to_le32((__u32) array_blocks);
1397 dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
1398 dev->status = __cpu_to_le32(0);
1399 dev->reserved_blocks = __cpu_to_le32(0);
1400 vol = &dev->vol;
1401 vol->migr_state = 0;
1402 vol->migr_type = 0;
1403 vol->dirty = 0;
1404 for (i = 0; i < idx; i++) {
1405 struct imsm_dev *prev = get_imsm_dev(super, i);
1406 struct imsm_map *pmap = get_imsm_map(prev, 0);
1407
1408 offset += __le32_to_cpu(pmap->blocks_per_member);
1409 offset += IMSM_RESERVED_SECTORS;
1410 }
1411 map = get_imsm_map(dev, 0);
1412 map->pba_of_lba0 = __cpu_to_le32(offset);
1413 map->blocks_per_member = __cpu_to_le32(info_to_blocks_per_member(info));
1414 map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
1415 map->num_data_stripes = __cpu_to_le32(info_to_num_data_stripes(info));
1416 map->map_state = info->level ? IMSM_T_STATE_UNINITIALIZED :
1417 IMSM_T_STATE_NORMAL;
1418
1419 if (info->level == 1 && info->raid_disks > 2) {
1420 fprintf(stderr, Name": imsm does not support more than 2 disks"
1421 "in a raid1 volume\n");
1422 return 0;
1423 }
1424 if (info->level == 10)
1425 map->raid_level = 1;
1426 else
1427 map->raid_level = info->level;
1428
1429 map->num_members = info->raid_disks;
1430 for (i = 0; i < map->num_members; i++) {
1431 /* initialized in add_to_super */
1432 map->disk_ord_tbl[i] = __cpu_to_le32(0);
1433 }
1434 mpb->num_raid_devs++;
1435 super->dev_tbl[super->current_vol] = dev;
1436
1437 return 1;
1438 }
1439
1440 static int init_super_imsm(struct supertype *st, mdu_array_info_t *info,
1441 unsigned long long size, char *name,
1442 char *homehost, int *uuid)
1443 {
1444 /* This is primarily called by Create when creating a new array.
1445 * We will then get add_to_super called for each component, and then
1446 * write_init_super called to write it out to each device.
1447 * For IMSM, Create can create on fresh devices or on a pre-existing
1448 * array.
1449 * To create on a pre-existing array a different method will be called.
1450 * This one is just for fresh drives.
1451 */
1452 struct intel_super *super;
1453 struct imsm_super *mpb;
1454 size_t mpb_size;
1455
1456 if (!info) {
1457 st->sb = NULL;
1458 return 0;
1459 }
1460 if (st->sb)
1461 return init_super_imsm_volume(st, info, size, name, homehost,
1462 uuid);
1463
1464 super = alloc_super(1);
1465 if (!super)
1466 return 0;
1467 mpb_size = disks_to_mpb_size(info->nr_disks);
1468 if (posix_memalign(&super->buf, 512, mpb_size) != 0) {
1469 free(super);
1470 return 0;
1471 }
1472 mpb = super->buf;
1473 memset(mpb, 0, mpb_size);
1474
1475 memcpy(mpb->sig, MPB_SIGNATURE, strlen(MPB_SIGNATURE));
1476 memcpy(mpb->sig + strlen(MPB_SIGNATURE), MPB_VERSION_RAID5,
1477 strlen(MPB_VERSION_RAID5));
1478 mpb->mpb_size = mpb_size;
1479
1480 st->sb = super;
1481 return 1;
1482 }
1483
1484 static void add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
1485 int fd, char *devname)
1486 {
1487 struct intel_super *super = st->sb;
1488 struct imsm_super *mpb = super->anchor;
1489 struct dl *dl;
1490 struct imsm_dev *dev;
1491 struct imsm_map *map;
1492 __u32 status;
1493
1494 dev = get_imsm_dev(super, super->current_vol);
1495 map = get_imsm_map(dev, 0);
1496
1497 for (dl = super->disks; dl ; dl = dl->next)
1498 if (dl->major == dk->major &&
1499 dl->minor == dk->minor)
1500 break;
1501
1502 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
1503 return;
1504
1505 /* add a pristine spare to the metadata */
1506 if (dl->index < 0) {
1507 dl->index = super->anchor->num_disks;
1508 super->anchor->num_disks++;
1509 }
1510 map->disk_ord_tbl[dk->number] = __cpu_to_le32(dl->index);
1511 status = CONFIGURED_DISK | USABLE_DISK;
1512 dl->disk.status = __cpu_to_le32(status);
1513
1514 /* if we are creating the first raid device update the family number */
1515 if (super->current_vol == 0) {
1516 __u32 sum;
1517 struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
1518 struct imsm_disk *_disk = __get_imsm_disk(mpb, dl->index);
1519
1520 *_dev = *dev;
1521 *_disk = dl->disk;
1522 sum = __gen_imsm_checksum(mpb);
1523 mpb->family_num = __cpu_to_le32(sum);
1524 }
1525 }
1526
1527 static void add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
1528 int fd, char *devname)
1529 {
1530 struct intel_super *super = st->sb;
1531 struct dl *dd;
1532 unsigned long long size;
1533 __u32 status, id;
1534 int rv;
1535 struct stat stb;
1536
1537 if (super->current_vol >= 0) {
1538 add_to_super_imsm_volume(st, dk, fd, devname);
1539 return;
1540 }
1541
1542 fstat(fd, &stb);
1543 dd = malloc(sizeof(*dd));
1544 if (!dd) {
1545 fprintf(stderr,
1546 Name ": malloc failed %s:%d.\n", __func__, __LINE__);
1547 abort();
1548 }
1549 memset(dd, 0, sizeof(*dd));
1550 dd->major = major(stb.st_rdev);
1551 dd->minor = minor(stb.st_rdev);
1552 dd->index = -1;
1553 dd->devname = devname ? strdup(devname) : NULL;
1554 dd->next = super->disks;
1555 dd->fd = fd;
1556 rv = imsm_read_serial(fd, devname, dd->serial);
1557 if (rv) {
1558 fprintf(stderr,
1559 Name ": failed to retrieve scsi serial, aborting\n");
1560 free(dd);
1561 abort();
1562 }
1563
1564 get_dev_size(fd, NULL, &size);
1565 size /= 512;
1566 status = USABLE_DISK | SPARE_DISK;
1567 strcpy((char *) dd->disk.serial, (char *) dd->serial);
1568 dd->disk.total_blocks = __cpu_to_le32(size);
1569 dd->disk.status = __cpu_to_le32(status);
1570 if (sysfs_disk_to_scsi_id(fd, &id) == 0)
1571 dd->disk.scsi_id = __cpu_to_le32(id);
1572 else
1573 dd->disk.scsi_id = __cpu_to_le32(0);
1574 super->disks = dd;
1575 }
1576
1577 static int store_imsm_mpb(int fd, struct intel_super *super);
1578
1579 /* spare records have their own family number and do not have any defined raid
1580 * devices
1581 */
1582 static int write_super_imsm_spares(struct intel_super *super, int doclose)
1583 {
1584 struct imsm_super mpb_save;
1585 struct imsm_super *mpb = super->anchor;
1586 __u32 sum;
1587 struct dl *d;
1588
1589 mpb_save = *mpb;
1590 mpb->num_raid_devs = 0;
1591 mpb->num_disks = 1;
1592 mpb->mpb_size = sizeof(struct imsm_super);
1593 mpb->generation_num = __cpu_to_le32(1UL);
1594
1595 for (d = super->disks; d; d = d->next) {
1596 if (d->index >= 0)
1597 continue;
1598
1599 mpb->disk[0] = d->disk;
1600 sum = __gen_imsm_checksum(mpb);
1601 mpb->family_num = __cpu_to_le32(sum);
1602 sum = __gen_imsm_checksum(mpb);
1603 mpb->check_sum = __cpu_to_le32(sum);
1604
1605 if (store_imsm_mpb(d->fd, super)) {
1606 fprintf(stderr, "%s: failed for device %d:%d %s\n",
1607 __func__, d->major, d->minor, strerror(errno));
1608 *mpb = mpb_save;
1609 return 1;
1610 }
1611 if (doclose) {
1612 close(d->fd);
1613 d->fd = -1;
1614 }
1615 }
1616
1617 *mpb = mpb_save;
1618 return 0;
1619 }
1620
1621 static int write_super_imsm(struct intel_super *super, int doclose)
1622 {
1623 struct imsm_super *mpb = super->anchor;
1624 struct dl *d;
1625 __u32 generation;
1626 __u32 sum;
1627 int spares = 0;
1628 int raid_disks = 0;
1629 int i;
1630 __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk);
1631
1632 /* 'generation' is incremented everytime the metadata is written */
1633 generation = __le32_to_cpu(mpb->generation_num);
1634 generation++;
1635 mpb->generation_num = __cpu_to_le32(generation);
1636
1637 for (d = super->disks; d; d = d->next) {
1638 if (d->index < 0)
1639 spares++;
1640 else {
1641 raid_disks++;
1642 mpb->disk[d->index] = d->disk;
1643 mpb_size += sizeof(struct imsm_disk);
1644 }
1645 }
1646 if (raid_disks != mpb->num_disks) {
1647 fprintf(stderr, "%s: expected %d disks only found %d\n",
1648 __func__, mpb->num_disks, raid_disks);
1649 return 1;
1650 }
1651
1652 for (i = 0; i < mpb->num_raid_devs; i++) {
1653 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1654
1655 imsm_copy_dev(dev, super->dev_tbl[i]);
1656 mpb_size += sizeof_imsm_dev(dev, 0);
1657 }
1658 mpb_size += __le32_to_cpu(mpb->bbm_log_size);
1659 mpb->mpb_size = __cpu_to_le32(mpb_size);
1660
1661 /* recalculate checksum */
1662 sum = __gen_imsm_checksum(mpb);
1663 mpb->check_sum = __cpu_to_le32(sum);
1664
1665 /* write the mpb for disks that compose raid devices */
1666 for (d = super->disks; d ; d = d->next) {
1667 if (d->index < 0)
1668 continue;
1669 if (store_imsm_mpb(d->fd, super)) {
1670 fprintf(stderr, "%s: failed for device %d:%d %s\n",
1671 __func__, d->major, d->minor, strerror(errno));
1672 return 1;
1673 }
1674 if (doclose) {
1675 close(d->fd);
1676 d->fd = -1;
1677 }
1678 }
1679
1680 if (spares)
1681 return write_super_imsm_spares(super, doclose);
1682
1683 return 0;
1684 }
1685
1686 static int write_init_super_imsm(struct supertype *st)
1687 {
1688 if (st->update_tail) {
1689 /* queue the recently created array as a metadata update */
1690 size_t len;
1691 struct imsm_update_create_array *u;
1692 struct intel_super *super = st->sb;
1693 struct imsm_dev *dev;
1694 struct dl *d;
1695
1696 if (super->current_vol < 0 ||
1697 !(dev = get_imsm_dev(super, super->current_vol))) {
1698 fprintf(stderr, "%s: could not determine sub-array\n",
1699 __func__);
1700 return 1;
1701 }
1702
1703
1704 len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0);
1705 u = malloc(len);
1706 if (!u) {
1707 fprintf(stderr, "%s: failed to allocate update buffer\n",
1708 __func__);
1709 return 1;
1710 }
1711
1712 u->type = update_create_array;
1713 u->dev_idx = super->current_vol;
1714 imsm_copy_dev(&u->dev, dev);
1715 append_metadata_update(st, u, len);
1716
1717 for (d = super->disks; d ; d = d->next) {
1718 close(d->fd);
1719 d->fd = -1;
1720 }
1721
1722 return 0;
1723 } else
1724 return write_super_imsm(st->sb, 1);
1725 }
1726
1727 static int store_zero_imsm(struct supertype *st, int fd)
1728 {
1729 unsigned long long dsize;
1730 void *buf;
1731
1732 get_dev_size(fd, NULL, &dsize);
1733
1734 /* first block is stored on second to last sector of the disk */
1735 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
1736 return 1;
1737
1738 if (posix_memalign(&buf, 512, 512) != 0)
1739 return 1;
1740
1741 memset(buf, 0, 512);
1742 if (write(fd, buf, 512) != 512)
1743 return 1;
1744 return 0;
1745 }
1746
1747 static int validate_geometry_imsm_container(struct supertype *st, int level,
1748 int layout, int raiddisks, int chunk,
1749 unsigned long long size, char *dev,
1750 unsigned long long *freesize,
1751 int verbose)
1752 {
1753 int fd;
1754 unsigned long long ldsize;
1755
1756 if (level != LEVEL_CONTAINER)
1757 return 0;
1758 if (!dev)
1759 return 1;
1760
1761 fd = open(dev, O_RDONLY|O_EXCL, 0);
1762 if (fd < 0) {
1763 if (verbose)
1764 fprintf(stderr, Name ": imsm: Cannot open %s: %s\n",
1765 dev, strerror(errno));
1766 return 0;
1767 }
1768 if (!get_dev_size(fd, dev, &ldsize)) {
1769 close(fd);
1770 return 0;
1771 }
1772 close(fd);
1773
1774 *freesize = avail_size_imsm(st, ldsize >> 9);
1775
1776 return 1;
1777 }
1778
1779 /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
1780 * FIX ME add ahci details
1781 */
1782 static int validate_geometry_imsm_volume(struct supertype *st, int level,
1783 int layout, int raiddisks, int chunk,
1784 unsigned long long size, char *dev,
1785 unsigned long long *freesize,
1786 int verbose)
1787 {
1788 struct stat stb;
1789 struct intel_super *super = st->sb;
1790 struct dl *dl;
1791 unsigned long long pos = 0;
1792 unsigned long long maxsize;
1793 struct extent *e;
1794 int i;
1795
1796 if (level == LEVEL_CONTAINER)
1797 return 0;
1798
1799 if (level == 1 && raiddisks > 2) {
1800 if (verbose)
1801 fprintf(stderr, Name ": imsm does not support more "
1802 "than 2 in a raid1 configuration\n");
1803 return 0;
1804 }
1805
1806 /* We must have the container info already read in. */
1807 if (!super)
1808 return 0;
1809
1810 if (!dev) {
1811 /* General test: make sure there is space for
1812 * 'raiddisks' device extents of size 'size' at a given
1813 * offset
1814 */
1815 unsigned long long minsize = size*2 /* convert to blocks */;
1816 unsigned long long start_offset = ~0ULL;
1817 int dcnt = 0;
1818 if (minsize == 0)
1819 minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1820 for (dl = super->disks; dl ; dl = dl->next) {
1821 int found = 0;
1822
1823 pos = 0;
1824 i = 0;
1825 e = get_extents(super, dl);
1826 if (!e) continue;
1827 do {
1828 unsigned long long esize;
1829 esize = e[i].start - pos;
1830 if (esize >= minsize)
1831 found = 1;
1832 if (found && start_offset == ~0ULL) {
1833 start_offset = pos;
1834 break;
1835 } else if (found && pos != start_offset) {
1836 found = 0;
1837 break;
1838 }
1839 pos = e[i].start + e[i].size;
1840 i++;
1841 } while (e[i-1].size);
1842 if (found)
1843 dcnt++;
1844 free(e);
1845 }
1846 if (dcnt < raiddisks) {
1847 if (verbose)
1848 fprintf(stderr, Name ": imsm: Not enough "
1849 "devices with space for this array "
1850 "(%d < %d)\n",
1851 dcnt, raiddisks);
1852 return 0;
1853 }
1854 return 1;
1855 }
1856 /* This device must be a member of the set */
1857 if (stat(dev, &stb) < 0)
1858 return 0;
1859 if ((S_IFMT & stb.st_mode) != S_IFBLK)
1860 return 0;
1861 for (dl = super->disks ; dl ; dl = dl->next) {
1862 if (dl->major == major(stb.st_rdev) &&
1863 dl->minor == minor(stb.st_rdev))
1864 break;
1865 }
1866 if (!dl) {
1867 if (verbose)
1868 fprintf(stderr, Name ": %s is not in the "
1869 "same imsm set\n", dev);
1870 return 0;
1871 }
1872 e = get_extents(super, dl);
1873 maxsize = 0;
1874 i = 0;
1875 if (e) do {
1876 unsigned long long esize;
1877 esize = e[i].start - pos;
1878 if (esize >= maxsize)
1879 maxsize = esize;
1880 pos = e[i].start + e[i].size;
1881 i++;
1882 } while (e[i-1].size);
1883 *freesize = maxsize;
1884
1885 return 1;
1886 }
1887
1888 int imsm_bbm_log_size(struct imsm_super *mpb)
1889 {
1890 return __le32_to_cpu(mpb->bbm_log_size);
1891 }
1892
1893 static int validate_geometry_imsm(struct supertype *st, int level, int layout,
1894 int raiddisks, int chunk, unsigned long long size,
1895 char *dev, unsigned long long *freesize,
1896 int verbose)
1897 {
1898 int fd, cfd;
1899 struct mdinfo *sra;
1900
1901 /* if given unused devices create a container
1902 * if given given devices in a container create a member volume
1903 */
1904 if (level == LEVEL_CONTAINER) {
1905 /* Must be a fresh device to add to a container */
1906 return validate_geometry_imsm_container(st, level, layout,
1907 raiddisks, chunk, size,
1908 dev, freesize,
1909 verbose);
1910 }
1911
1912 if (st->sb) {
1913 /* creating in a given container */
1914 return validate_geometry_imsm_volume(st, level, layout,
1915 raiddisks, chunk, size,
1916 dev, freesize, verbose);
1917 }
1918
1919 /* limit creation to the following levels */
1920 if (!dev)
1921 switch (level) {
1922 case 0:
1923 case 1:
1924 case 10:
1925 case 5:
1926 break;
1927 default:
1928 return 1;
1929 }
1930
1931 /* This device needs to be a device in an 'imsm' container */
1932 fd = open(dev, O_RDONLY|O_EXCL, 0);
1933 if (fd >= 0) {
1934 if (verbose)
1935 fprintf(stderr,
1936 Name ": Cannot create this array on device %s\n",
1937 dev);
1938 close(fd);
1939 return 0;
1940 }
1941 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
1942 if (verbose)
1943 fprintf(stderr, Name ": Cannot open %s: %s\n",
1944 dev, strerror(errno));
1945 return 0;
1946 }
1947 /* Well, it is in use by someone, maybe an 'imsm' container. */
1948 cfd = open_container(fd);
1949 if (cfd < 0) {
1950 close(fd);
1951 if (verbose)
1952 fprintf(stderr, Name ": Cannot use %s: It is busy\n",
1953 dev);
1954 return 0;
1955 }
1956 sra = sysfs_read(cfd, 0, GET_VERSION);
1957 close(fd);
1958 if (sra && sra->array.major_version == -1 &&
1959 strcmp(sra->text_version, "imsm") == 0) {
1960 /* This is a member of a imsm container. Load the container
1961 * and try to create a volume
1962 */
1963 struct intel_super *super;
1964
1965 if (load_super_imsm_all(st, cfd, (void **) &super, NULL, 1) == 0) {
1966 st->sb = super;
1967 st->container_dev = fd2devnum(cfd);
1968 close(cfd);
1969 return validate_geometry_imsm_volume(st, level, layout,
1970 raiddisks, chunk,
1971 size, dev,
1972 freesize, verbose);
1973 }
1974 close(cfd);
1975 } else /* may belong to another container */
1976 return 0;
1977
1978 return 1;
1979 }
1980
1981 static struct mdinfo *container_content_imsm(struct supertype *st)
1982 {
1983 /* Given a container loaded by load_super_imsm_all,
1984 * extract information about all the arrays into
1985 * an mdinfo tree.
1986 *
1987 * For each imsm_dev create an mdinfo, fill it in,
1988 * then look for matching devices in super->disks
1989 * and create appropriate device mdinfo.
1990 */
1991 struct intel_super *super = st->sb;
1992 struct imsm_super *mpb = super->anchor;
1993 struct mdinfo *rest = NULL;
1994 int i;
1995
1996 /* do not assemble arrays that might have bad blocks */
1997 if (imsm_bbm_log_size(super->anchor)) {
1998 fprintf(stderr, Name ": BBM log found in metadata. "
1999 "Cannot activate array(s).\n");
2000 return NULL;
2001 }
2002
2003 for (i = 0; i < mpb->num_raid_devs; i++) {
2004 struct imsm_dev *dev = get_imsm_dev(super, i);
2005 struct imsm_vol *vol = &dev->vol;
2006 struct imsm_map *map = get_imsm_map(dev, 0);
2007 struct mdinfo *this;
2008 int slot;
2009
2010 this = malloc(sizeof(*this));
2011 memset(this, 0, sizeof(*this));
2012 this->next = rest;
2013
2014 this->array.level = get_imsm_raid_level(map);
2015 this->array.raid_disks = map->num_members;
2016 this->array.layout = imsm_level_to_layout(this->array.level);
2017 this->array.md_minor = -1;
2018 this->array.ctime = 0;
2019 this->array.utime = 0;
2020 this->array.chunk_size = __le16_to_cpu(map->blocks_per_strip) << 9;
2021 this->array.state = !vol->dirty;
2022 this->container_member = i;
2023 if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
2024 dev->vol.dirty || dev->vol.migr_state)
2025 this->resync_start = 0;
2026 else
2027 this->resync_start = ~0ULL;
2028
2029 strncpy(this->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
2030 this->name[MAX_RAID_SERIAL_LEN] = 0;
2031
2032 sprintf(this->text_version, "/%s/%d",
2033 devnum2devname(st->container_dev),
2034 this->container_member);
2035
2036 memset(this->uuid, 0, sizeof(this->uuid));
2037
2038 this->component_size = __le32_to_cpu(map->blocks_per_member);
2039
2040 for (slot = 0 ; slot < map->num_members; slot++) {
2041 struct mdinfo *info_d;
2042 struct dl *d;
2043 int idx;
2044 int skip;
2045 __u32 s;
2046 __u32 ord;
2047
2048 skip = 0;
2049 idx = get_imsm_disk_idx(map, slot);
2050 ord = get_imsm_ord_tbl_ent(dev, slot);
2051 for (d = super->disks; d ; d = d->next)
2052 if (d->index == idx)
2053 break;
2054
2055 if (d == NULL)
2056 skip = 1;
2057
2058 s = d ? __le32_to_cpu(d->disk.status) : 0;
2059 if (s & FAILED_DISK)
2060 skip = 1;
2061 if (!(s & USABLE_DISK))
2062 skip = 1;
2063 if (ord & IMSM_ORD_REBUILD)
2064 skip = 1;
2065
2066 /*
2067 * if we skip some disks the array will be assmebled degraded;
2068 * reset resync start to avoid a dirty-degraded situation
2069 *
2070 * FIXME handle dirty degraded
2071 */
2072 if (skip && !dev->vol.dirty)
2073 this->resync_start = ~0ULL;
2074 if (skip)
2075 continue;
2076
2077 info_d = malloc(sizeof(*info_d));
2078 if (!info_d) {
2079 fprintf(stderr, Name ": failed to allocate disk"
2080 " for volume %s\n", (char *) dev->volume);
2081 free(this);
2082 this = rest;
2083 break;
2084 }
2085 memset(info_d, 0, sizeof(*info_d));
2086 info_d->next = this->devs;
2087 this->devs = info_d;
2088
2089 info_d->disk.number = d->index;
2090 info_d->disk.major = d->major;
2091 info_d->disk.minor = d->minor;
2092 info_d->disk.raid_disk = slot;
2093
2094 this->array.working_disks++;
2095
2096 info_d->events = __le32_to_cpu(mpb->generation_num);
2097 info_d->data_offset = __le32_to_cpu(map->pba_of_lba0);
2098 info_d->component_size = __le32_to_cpu(map->blocks_per_member);
2099 if (d->devname)
2100 strcpy(info_d->name, d->devname);
2101 }
2102 rest = this;
2103 }
2104
2105 return rest;
2106 }
2107
2108
2109 static int imsm_open_new(struct supertype *c, struct active_array *a,
2110 char *inst)
2111 {
2112 struct intel_super *super = c->sb;
2113 struct imsm_super *mpb = super->anchor;
2114
2115 if (atoi(inst) >= mpb->num_raid_devs) {
2116 fprintf(stderr, "%s: subarry index %d, out of range\n",
2117 __func__, atoi(inst));
2118 return -ENODEV;
2119 }
2120
2121 dprintf("imsm: open_new %s\n", inst);
2122 a->info.container_member = atoi(inst);
2123 return 0;
2124 }
2125
2126 static __u8 imsm_check_degraded(struct intel_super *super, int n, int failed)
2127 {
2128 struct imsm_dev *dev = get_imsm_dev(super, n);
2129 struct imsm_map *map = get_imsm_map(dev, 0);
2130
2131 if (!failed)
2132 return map->map_state == IMSM_T_STATE_UNINITIALIZED ?
2133 IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL;
2134
2135 switch (get_imsm_raid_level(map)) {
2136 case 0:
2137 return IMSM_T_STATE_FAILED;
2138 break;
2139 case 1:
2140 if (failed < map->num_members)
2141 return IMSM_T_STATE_DEGRADED;
2142 else
2143 return IMSM_T_STATE_FAILED;
2144 break;
2145 case 10:
2146 {
2147 /**
2148 * check to see if any mirrors have failed,
2149 * otherwise we are degraded
2150 */
2151 int device_per_mirror = 2; /* FIXME is this always the case?
2152 * and are they always adjacent?
2153 */
2154 int failed = 0;
2155 int i;
2156
2157 for (i = 0; i < map->num_members; i++) {
2158 int idx = get_imsm_disk_idx(map, i);
2159 struct imsm_disk *disk = get_imsm_disk(super, idx);
2160
2161 if (__le32_to_cpu(disk->status) & FAILED_DISK)
2162 failed++;
2163
2164 if (failed >= device_per_mirror)
2165 return IMSM_T_STATE_FAILED;
2166
2167 /* reset 'failed' for next mirror set */
2168 if (!((i + 1) % device_per_mirror))
2169 failed = 0;
2170 }
2171
2172 return IMSM_T_STATE_DEGRADED;
2173 }
2174 case 5:
2175 if (failed < 2)
2176 return IMSM_T_STATE_DEGRADED;
2177 else
2178 return IMSM_T_STATE_FAILED;
2179 break;
2180 default:
2181 break;
2182 }
2183
2184 return map->map_state;
2185 }
2186
2187 static int imsm_count_failed(struct intel_super *super, struct imsm_map *map)
2188 {
2189 int i;
2190 int failed = 0;
2191 struct imsm_disk *disk;
2192
2193 for (i = 0; i < map->num_members; i++) {
2194 int idx = get_imsm_disk_idx(map, i);
2195
2196 disk = get_imsm_disk(super, idx);
2197 if (__le32_to_cpu(disk->status) & FAILED_DISK)
2198 failed++;
2199 else if (!(__le32_to_cpu(disk->status) & USABLE_DISK))
2200 failed++;
2201 }
2202
2203 return failed;
2204 }
2205
2206 static void imsm_set_array_state(struct active_array *a, int consistent)
2207 {
2208 int inst = a->info.container_member;
2209 struct intel_super *super = a->container->sb;
2210 struct imsm_dev *dev = get_imsm_dev(super, inst);
2211 struct imsm_map *map = get_imsm_map(dev, 0);
2212 int dirty = !consistent;
2213 int failed;
2214 __u8 map_state;
2215
2216 failed = imsm_count_failed(super, map);
2217 map_state = imsm_check_degraded(super, inst, failed);
2218
2219 if (consistent && !dev->vol.dirty &&
2220 (dev->vol.migr_state || map_state != IMSM_T_STATE_NORMAL))
2221 a->resync_start = 0ULL;
2222
2223 if (a->resync_start == ~0ULL) {
2224 /* complete recovery or initial resync */
2225 if (map->map_state != map_state) {
2226 dprintf("imsm: map_state %d: %d\n",
2227 inst, map_state);
2228 map->map_state = map_state;
2229 super->updates_pending++;
2230 }
2231 if (dev->vol.migr_state) {
2232 dprintf("imsm: mark resync complete\n");
2233 dev->vol.migr_state = 0;
2234 dev->vol.migr_type = 0;
2235 super->updates_pending++;
2236 }
2237 } else if (!dev->vol.migr_state) {
2238 dprintf("imsm: mark '%s' (%llu)\n",
2239 failed ? "rebuild" : "initializing", a->resync_start);
2240 /* mark that we are rebuilding */
2241 map->map_state = failed ? map_state : IMSM_T_STATE_NORMAL;
2242 dev->vol.migr_state = 1;
2243 dev->vol.migr_type = failed ? 1 : 0;
2244 dup_map(dev);
2245 a->check_degraded = 1;
2246 super->updates_pending++;
2247 }
2248
2249 /* mark dirty / clean */
2250 if (dirty != dev->vol.dirty) {
2251 dprintf("imsm: mark '%s' (%llu)\n",
2252 dirty ? "dirty" : "clean", a->resync_start);
2253 dev->vol.dirty = dirty;
2254 super->updates_pending++;
2255 }
2256 }
2257
2258 static void imsm_set_disk(struct active_array *a, int n, int state)
2259 {
2260 int inst = a->info.container_member;
2261 struct intel_super *super = a->container->sb;
2262 struct imsm_dev *dev = get_imsm_dev(super, inst);
2263 struct imsm_map *map = get_imsm_map(dev, 0);
2264 struct imsm_disk *disk;
2265 __u32 status;
2266 int failed = 0;
2267 int new_failure = 0;
2268
2269 if (n > map->num_members)
2270 fprintf(stderr, "imsm: set_disk %d out of range 0..%d\n",
2271 n, map->num_members - 1);
2272
2273 if (n < 0)
2274 return;
2275
2276 dprintf("imsm: set_disk %d:%x\n", n, state);
2277
2278 disk = get_imsm_disk(super, get_imsm_disk_idx(map, n));
2279
2280 /* check for new failures */
2281 status = __le32_to_cpu(disk->status);
2282 if ((state & DS_FAULTY) && !(status & FAILED_DISK)) {
2283 status |= FAILED_DISK;
2284 disk->status = __cpu_to_le32(status);
2285 new_failure = 1;
2286 super->updates_pending++;
2287 }
2288 /* check if in_sync */
2289 if ((state & DS_INSYNC) && !(status & USABLE_DISK)) {
2290 status |= USABLE_DISK;
2291 disk->status = __cpu_to_le32(status);
2292 super->updates_pending++;
2293 }
2294
2295 /* the number of failures have changed, count up 'failed' to determine
2296 * degraded / failed status
2297 */
2298 if (new_failure && map->map_state != IMSM_T_STATE_FAILED)
2299 failed = imsm_count_failed(super, map);
2300
2301 /* determine map_state based on failed or in_sync count */
2302 if (failed)
2303 map->map_state = imsm_check_degraded(super, inst, failed);
2304 else if (map->map_state == IMSM_T_STATE_DEGRADED) {
2305 struct mdinfo *d;
2306 int working = 0;
2307
2308 for (d = a->info.devs ; d ; d = d->next)
2309 if (d->curr_state & DS_INSYNC)
2310 working++;
2311
2312 if (working == a->info.array.raid_disks) {
2313 map->map_state = IMSM_T_STATE_NORMAL;
2314 dev->vol.migr_state = 0;
2315 dev->vol.migr_type = 0;
2316 super->updates_pending++;
2317 }
2318 }
2319 }
2320
2321 static int store_imsm_mpb(int fd, struct intel_super *super)
2322 {
2323 struct imsm_super *mpb = super->anchor;
2324 __u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
2325 unsigned long long dsize;
2326 unsigned long long sectors;
2327
2328 get_dev_size(fd, NULL, &dsize);
2329
2330 if (mpb_size > 512) {
2331 /* -1 to account for anchor */
2332 sectors = mpb_sectors(mpb) - 1;
2333
2334 /* write the extended mpb to the sectors preceeding the anchor */
2335 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0)
2336 return 1;
2337
2338 if (write(fd, super->buf + 512, 512 * sectors) != 512 * sectors)
2339 return 1;
2340 }
2341
2342 /* first block is stored on second to last sector of the disk */
2343 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
2344 return 1;
2345
2346 if (write(fd, super->buf, 512) != 512)
2347 return 1;
2348
2349 return 0;
2350 }
2351
2352 static void imsm_sync_metadata(struct supertype *container)
2353 {
2354 struct intel_super *super = container->sb;
2355
2356 if (!super->updates_pending)
2357 return;
2358
2359 write_super_imsm(super, 0);
2360
2361 super->updates_pending = 0;
2362 }
2363
2364 static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a)
2365 {
2366 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
2367 struct imsm_map *map = get_imsm_map(dev, 0);
2368 int i = get_imsm_disk_idx(map, idx);
2369 struct dl *dl;
2370
2371 for (dl = super->disks; dl; dl = dl->next)
2372 if (dl->index == i)
2373 break;
2374
2375 if (__le32_to_cpu(dl->disk.status) & FAILED_DISK)
2376 dl = NULL;
2377
2378 if (dl)
2379 dprintf("%s: found %x:%x\n", __func__, dl->major, dl->minor);
2380
2381 return dl;
2382 }
2383
2384 static struct dl *imsm_add_spare(struct intel_super *super, int idx, struct active_array *a)
2385 {
2386 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
2387 struct imsm_map *map = get_imsm_map(dev, 0);
2388 unsigned long long esize;
2389 unsigned long long pos;
2390 struct mdinfo *d;
2391 struct extent *ex;
2392 int j;
2393 int found;
2394 __u32 array_start;
2395 __u32 status;
2396 struct dl *dl;
2397
2398 for (dl = super->disks; dl; dl = dl->next) {
2399 /* If in this array, skip */
2400 for (d = a->info.devs ; d ; d = d->next)
2401 if (d->disk.major == dl->major &&
2402 d->disk.minor == dl->minor) {
2403 dprintf("%x:%x already in array\n", dl->major, dl->minor);
2404 break;
2405 }
2406 if (d)
2407 continue;
2408
2409 /* skip marked in use or failed drives */
2410 status = __le32_to_cpu(dl->disk.status);
2411 if (status & FAILED_DISK || status & CONFIGURED_DISK) {
2412 dprintf("%x:%x status ( %s%s)\n",
2413 dl->major, dl->minor,
2414 status & FAILED_DISK ? "failed " : "",
2415 status & CONFIGURED_DISK ? "configured " : "");
2416 continue;
2417 }
2418
2419 /* Does this unused device have the requisite free space?
2420 * We need a->info.component_size sectors
2421 */
2422 ex = get_extents(super, dl);
2423 if (!ex) {
2424 dprintf("cannot get extents\n");
2425 continue;
2426 }
2427 found = 0;
2428 j = 0;
2429 pos = 0;
2430 array_start = __le32_to_cpu(map->pba_of_lba0);
2431
2432 do {
2433 /* check that we can start at pba_of_lba0 with
2434 * a->info.component_size of space
2435 */
2436 esize = ex[j].start - pos;
2437 if (array_start >= pos &&
2438 array_start + a->info.component_size < ex[j].start) {
2439 found = 1;
2440 break;
2441 }
2442 pos = ex[j].start + ex[j].size;
2443 j++;
2444
2445 } while (ex[j-1].size);
2446
2447 free(ex);
2448 if (!found) {
2449 dprintf("%x:%x does not have %llu at %d\n",
2450 dl->major, dl->minor,
2451 a->info.component_size,
2452 __le32_to_cpu(map->pba_of_lba0));
2453 /* No room */
2454 continue;
2455 } else
2456 break;
2457 }
2458
2459 return dl;
2460 }
2461
2462 static struct mdinfo *imsm_activate_spare(struct active_array *a,
2463 struct metadata_update **updates)
2464 {
2465 /**
2466 * Find a device with unused free space and use it to replace a
2467 * failed/vacant region in an array. We replace failed regions one a
2468 * array at a time. The result is that a new spare disk will be added
2469 * to the first failed array and after the monitor has finished
2470 * propagating failures the remainder will be consumed.
2471 *
2472 * FIXME add a capability for mdmon to request spares from another
2473 * container.
2474 */
2475
2476 struct intel_super *super = a->container->sb;
2477 int inst = a->info.container_member;
2478 struct imsm_dev *dev = get_imsm_dev(super, inst);
2479 struct imsm_map *map = get_imsm_map(dev, 0);
2480 int failed = a->info.array.raid_disks;
2481 struct mdinfo *rv = NULL;
2482 struct mdinfo *d;
2483 struct mdinfo *di;
2484 struct metadata_update *mu;
2485 struct dl *dl;
2486 struct imsm_update_activate_spare *u;
2487 int num_spares = 0;
2488 int i;
2489
2490 for (d = a->info.devs ; d ; d = d->next) {
2491 if ((d->curr_state & DS_FAULTY) &&
2492 d->state_fd >= 0)
2493 /* wait for Removal to happen */
2494 return NULL;
2495 if (d->state_fd >= 0)
2496 failed--;
2497 }
2498
2499 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
2500 inst, failed, a->info.array.raid_disks, a->info.array.level);
2501 if (imsm_check_degraded(super, inst, failed) != IMSM_T_STATE_DEGRADED)
2502 return NULL;
2503
2504 /* For each slot, if it is not working, find a spare */
2505 for (i = 0; i < a->info.array.raid_disks; i++) {
2506 for (d = a->info.devs ; d ; d = d->next)
2507 if (d->disk.raid_disk == i)
2508 break;
2509 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
2510 if (d && (d->state_fd >= 0))
2511 continue;
2512
2513 /*
2514 * OK, this device needs recovery. Try to re-add the previous
2515 * occupant of this slot, if this fails add a new spare
2516 */
2517 dl = imsm_readd(super, i, a);
2518 if (!dl)
2519 dl = imsm_add_spare(super, i, a);
2520 if (!dl)
2521 continue;
2522
2523 /* found a usable disk with enough space */
2524 di = malloc(sizeof(*di));
2525 memset(di, 0, sizeof(*di));
2526
2527 /* dl->index will be -1 in the case we are activating a
2528 * pristine spare. imsm_process_update() will create a
2529 * new index in this case. Once a disk is found to be
2530 * failed in all member arrays it is kicked from the
2531 * metadata
2532 */
2533 di->disk.number = dl->index;
2534
2535 /* (ab)use di->devs to store a pointer to the device
2536 * we chose
2537 */
2538 di->devs = (struct mdinfo *) dl;
2539
2540 di->disk.raid_disk = i;
2541 di->disk.major = dl->major;
2542 di->disk.minor = dl->minor;
2543 di->disk.state = 0;
2544 di->data_offset = __le32_to_cpu(map->pba_of_lba0);
2545 di->component_size = a->info.component_size;
2546 di->container_member = inst;
2547 di->next = rv;
2548 rv = di;
2549 num_spares++;
2550 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
2551 i, di->data_offset);
2552
2553 break;
2554 }
2555
2556 if (!rv)
2557 /* No spares found */
2558 return rv;
2559 /* Now 'rv' has a list of devices to return.
2560 * Create a metadata_update record to update the
2561 * disk_ord_tbl for the array
2562 */
2563 mu = malloc(sizeof(*mu));
2564 mu->buf = malloc(sizeof(struct imsm_update_activate_spare) * num_spares);
2565 mu->space = NULL;
2566 mu->len = sizeof(struct imsm_update_activate_spare) * num_spares;
2567 mu->next = *updates;
2568 u = (struct imsm_update_activate_spare *) mu->buf;
2569
2570 for (di = rv ; di ; di = di->next) {
2571 u->type = update_activate_spare;
2572 u->dl = (struct dl *) di->devs;
2573 di->devs = NULL;
2574 u->slot = di->disk.raid_disk;
2575 u->array = inst;
2576 u->next = u + 1;
2577 u++;
2578 }
2579 (u-1)->next = NULL;
2580 *updates = mu;
2581
2582 return rv;
2583 }
2584
2585 static int disks_overlap(struct imsm_map *m1, struct imsm_map *m2)
2586 {
2587 int i;
2588 int j;
2589 int idx;
2590
2591 for (i = 0; i < m1->num_members; i++) {
2592 idx = get_imsm_disk_idx(m1, i);
2593 for (j = 0; j < m2->num_members; j++)
2594 if (idx == get_imsm_disk_idx(m2, j))
2595 return 1;
2596 }
2597
2598 return 0;
2599 }
2600
2601 static void imsm_process_update(struct supertype *st,
2602 struct metadata_update *update)
2603 {
2604 /**
2605 * crack open the metadata_update envelope to find the update record
2606 * update can be one of:
2607 * update_activate_spare - a spare device has replaced a failed
2608 * device in an array, update the disk_ord_tbl. If this disk is
2609 * present in all member arrays then also clear the SPARE_DISK
2610 * flag
2611 */
2612 struct intel_super *super = st->sb;
2613 struct imsm_super *mpb = super->anchor;
2614 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
2615
2616 switch (type) {
2617 case update_activate_spare: {
2618 struct imsm_update_activate_spare *u = (void *) update->buf;
2619 struct imsm_dev *dev = get_imsm_dev(super, u->array);
2620 struct imsm_map *map = get_imsm_map(dev, 0);
2621 struct active_array *a;
2622 struct imsm_disk *disk;
2623 __u32 status;
2624 struct dl *dl;
2625 unsigned int found;
2626 int victim;
2627 int i;
2628
2629 for (dl = super->disks; dl; dl = dl->next)
2630 if (dl == u->dl)
2631 break;
2632
2633 if (!dl) {
2634 fprintf(stderr, "error: imsm_activate_spare passed "
2635 "an unknown disk (index: %d serial: %s)\n",
2636 u->dl->index, u->dl->serial);
2637 return;
2638 }
2639
2640 super->updates_pending++;
2641
2642 /* adding a pristine spare, assign a new index */
2643 if (dl->index < 0) {
2644 dl->index = super->anchor->num_disks;
2645 super->anchor->num_disks++;
2646 }
2647 victim = get_imsm_disk_idx(map, u->slot);
2648 map->disk_ord_tbl[u->slot] = __cpu_to_le32(dl->index);
2649 disk = &dl->disk;
2650 status = __le32_to_cpu(disk->status);
2651 status |= CONFIGURED_DISK;
2652 status &= ~(SPARE_DISK | USABLE_DISK);
2653 disk->status = __cpu_to_le32(status);
2654
2655 /* count arrays using the victim in the metadata */
2656 found = 0;
2657 for (a = st->arrays; a ; a = a->next) {
2658 dev = get_imsm_dev(super, a->info.container_member);
2659 map = get_imsm_map(dev, 0);
2660 for (i = 0; i < map->num_members; i++)
2661 if (victim == get_imsm_disk_idx(map, i))
2662 found++;
2663 }
2664
2665 /* clear some flags if the victim is no longer being
2666 * utilized anywhere
2667 */
2668 if (!found) {
2669 disk = get_imsm_disk(super, victim);
2670 status = __le32_to_cpu(disk->status);
2671 status &= ~(CONFIGURED_DISK | USABLE_DISK);
2672 disk->status = __cpu_to_le32(status);
2673 /* at this point the disk can be removed from the
2674 * metadata, however we need to guarantee that we do
2675 * not race with any manager thread routine that relies
2676 * on dl->index or map->disk_ord_tbl
2677 */
2678 }
2679 break;
2680 }
2681 case update_create_array: {
2682 /* someone wants to create a new array, we need to be aware of
2683 * a few races/collisions:
2684 * 1/ 'Create' called by two separate instances of mdadm
2685 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
2686 * devices that have since been assimilated via
2687 * activate_spare.
2688 * In the event this update can not be carried out mdadm will
2689 * (FIX ME) notice that its update did not take hold.
2690 */
2691 struct imsm_update_create_array *u = (void *) update->buf;
2692 struct imsm_dev *dev;
2693 struct imsm_map *map, *new_map;
2694 unsigned long long start, end;
2695 unsigned long long new_start, new_end;
2696 int i;
2697 int overlap = 0;
2698
2699 /* handle racing creates: first come first serve */
2700 if (u->dev_idx < mpb->num_raid_devs) {
2701 dprintf("%s: subarray %d already defined\n",
2702 __func__, u->dev_idx);
2703 return;
2704 }
2705
2706 /* check update is next in sequence */
2707 if (u->dev_idx != mpb->num_raid_devs) {
2708 dprintf("%s: can not create array %d expected index %d\n",
2709 __func__, u->dev_idx, mpb->num_raid_devs);
2710 return;
2711 }
2712
2713 new_map = get_imsm_map(&u->dev, 0);
2714 new_start = __le32_to_cpu(new_map->pba_of_lba0);
2715 new_end = new_start + __le32_to_cpu(new_map->blocks_per_member);
2716
2717 /* handle activate_spare versus create race:
2718 * check to make sure that overlapping arrays do not include
2719 * overalpping disks
2720 */
2721 for (i = 0; i < mpb->num_raid_devs; i++) {
2722 dev = get_imsm_dev(super, i);
2723 map = get_imsm_map(dev, 0);
2724 start = __le32_to_cpu(map->pba_of_lba0);
2725 end = start + __le32_to_cpu(map->blocks_per_member);
2726 if ((new_start >= start && new_start <= end) ||
2727 (start >= new_start && start <= new_end))
2728 overlap = 1;
2729 if (overlap && disks_overlap(map, new_map)) {
2730 dprintf("%s: arrays overlap\n", __func__);
2731 return;
2732 }
2733 }
2734 /* check num_members sanity */
2735 if (new_map->num_members > mpb->num_disks) {
2736 dprintf("%s: num_disks out of range\n", __func__);
2737 return;
2738 }
2739
2740 /* check that prepare update was successful */
2741 if (!update->space) {
2742 dprintf("%s: prepare update failed\n", __func__);
2743 return;
2744 }
2745
2746 super->updates_pending++;
2747 dev = update->space;
2748 update->space = NULL;
2749 imsm_copy_dev(dev, &u->dev);
2750 super->dev_tbl[u->dev_idx] = dev;
2751 mpb->num_raid_devs++;
2752
2753 /* fix up flags, if arrays overlap then the drives can not be
2754 * spares
2755 */
2756 for (i = 0; i < map->num_members; i++) {
2757 struct imsm_disk *disk;
2758 __u32 status;
2759
2760 disk = get_imsm_disk(super, get_imsm_disk_idx(map, i));
2761 status = __le32_to_cpu(disk->status);
2762 status |= CONFIGURED_DISK;
2763 if (overlap)
2764 status &= ~SPARE_DISK;
2765 disk->status = __cpu_to_le32(status);
2766 }
2767 break;
2768 }
2769 }
2770 }
2771
2772 static void imsm_prepare_update(struct supertype *st,
2773 struct metadata_update *update)
2774 {
2775 /**
2776 * Allocate space to hold new disk entries, raid-device entries or a
2777 * new mpb if necessary. We currently maintain an mpb large enough to
2778 * hold 2 subarrays for the given number of disks. This may not be
2779 * sufficient when reshaping.
2780 *
2781 * FIX ME handle the reshape case.
2782 *
2783 * The monitor will be able to safely change super->mpb by arranging
2784 * for it to be freed in check_update_queue(). I.e. the monitor thread
2785 * will start using the new pointer and the manager can continue to use
2786 * the old value until check_update_queue() runs.
2787 */
2788 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
2789
2790 switch (type) {
2791 case update_create_array: {
2792 struct imsm_update_create_array *u = (void *) update->buf;
2793 size_t len = sizeof_imsm_dev(&u->dev, 1);
2794
2795 update->space = malloc(len);
2796 break;
2797 default:
2798 break;
2799 }
2800 }
2801
2802 return;
2803 }
2804
2805 struct superswitch super_imsm = {
2806 #ifndef MDASSEMBLE
2807 .examine_super = examine_super_imsm,
2808 .brief_examine_super = brief_examine_super_imsm,
2809 .detail_super = detail_super_imsm,
2810 .brief_detail_super = brief_detail_super_imsm,
2811 .write_init_super = write_init_super_imsm,
2812 #endif
2813 .match_home = match_home_imsm,
2814 .uuid_from_super= uuid_from_super_imsm,
2815 .getinfo_super = getinfo_super_imsm,
2816 .update_super = update_super_imsm,
2817
2818 .avail_size = avail_size_imsm,
2819
2820 .compare_super = compare_super_imsm,
2821
2822 .load_super = load_super_imsm,
2823 .init_super = init_super_imsm,
2824 .add_to_super = add_to_super_imsm,
2825 .store_super = store_zero_imsm,
2826 .free_super = free_super_imsm,
2827 .match_metadata_desc = match_metadata_desc_imsm,
2828 .container_content = container_content_imsm,
2829
2830 .validate_geometry = validate_geometry_imsm,
2831 .external = 1,
2832
2833 /* for mdmon */
2834 .open_new = imsm_open_new,
2835 .load_super = load_super_imsm,
2836 .set_array_state= imsm_set_array_state,
2837 .set_disk = imsm_set_disk,
2838 .sync_metadata = imsm_sync_metadata,
2839 .activate_spare = imsm_activate_spare,
2840 .process_update = imsm_process_update,
2841 .prepare_update = imsm_prepare_update,
2842 };