]> git.ipfire.org Git - thirdparty/mdadm.git/blame_incremental - super-intel.c
mdmon: remove devices from container
[thirdparty/mdadm.git] / super-intel.c
... / ...
CommitLineData
1/*
2 * mdadm - Intel(R) Matrix Storage Manager Support
3 *
4 * Copyright (C) 2002-2007 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include "mdadm.h"
21#include "mdmon.h"
22#include <values.h>
23#include <scsi/sg.h>
24#include <ctype.h>
25
26/* MPB == Metadata Parameter Block */
27#define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
28#define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
29#define MPB_VERSION_RAID0 "1.0.00"
30#define MPB_VERSION_RAID1 "1.1.00"
31#define MPB_VERSION_RAID5 "1.2.02"
32#define MAX_SIGNATURE_LENGTH 32
33#define MAX_RAID_SERIAL_LEN 16
34#define MPB_SECTOR_CNT 418
35#define IMSM_RESERVED_SECTORS 4096
36
37/* Disk configuration info. */
38#define IMSM_MAX_DEVICES 255
39struct imsm_disk {
40 __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */
41 __u32 total_blocks; /* 0xE8 - 0xEB total blocks */
42 __u32 scsi_id; /* 0xEC - 0xEF scsi ID */
43 __u32 status; /* 0xF0 - 0xF3 */
44#define SPARE_DISK 0x01 /* Spare */
45#define CONFIGURED_DISK 0x02 /* Member of some RaidDev */
46#define FAILED_DISK 0x04 /* Permanent failure */
47#define USABLE_DISK 0x08 /* Fully usable unless FAILED_DISK is set */
48
49#define IMSM_DISK_FILLERS 5
50 __u32 filler[IMSM_DISK_FILLERS]; /* 0xF4 - 0x107 MPB_DISK_FILLERS for future expansion */
51};
52
53/* RAID map configuration infos. */
54struct imsm_map {
55 __u32 pba_of_lba0; /* start address of partition */
56 __u32 blocks_per_member;/* blocks per member */
57 __u32 num_data_stripes; /* number of data stripes */
58 __u16 blocks_per_strip;
59 __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */
60#define IMSM_T_STATE_NORMAL 0
61#define IMSM_T_STATE_UNINITIALIZED 1
62#define IMSM_T_STATE_DEGRADED 2 /* FIXME: is this correct? */
63#define IMSM_T_STATE_FAILED 3 /* FIXME: is this correct? */
64 __u8 raid_level;
65#define IMSM_T_RAID0 0
66#define IMSM_T_RAID1 1
67#define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
68 __u8 num_members; /* number of member disks */
69 __u8 reserved[3];
70 __u32 filler[7]; /* expansion area */
71#define IMSM_ORD_REBUILD (1 << 24)
72 __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members],
73 * top byte contains some flags
74 */
75} __attribute__ ((packed));
76
77struct imsm_vol {
78 __u32 reserved[2];
79 __u8 migr_state; /* Normal or Migrating */
80 __u8 migr_type; /* Initializing, Rebuilding, ... */
81 __u8 dirty;
82 __u8 fill[1];
83 __u32 filler[5];
84 struct imsm_map map[1];
85 /* here comes another one if migr_state */
86} __attribute__ ((packed));
87
88struct imsm_dev {
89 __u8 volume[MAX_RAID_SERIAL_LEN];
90 __u32 size_low;
91 __u32 size_high;
92 __u32 status; /* Persistent RaidDev status */
93 __u32 reserved_blocks; /* Reserved blocks at beginning of volume */
94#define IMSM_DEV_FILLERS 12
95 __u32 filler[IMSM_DEV_FILLERS];
96 struct imsm_vol vol;
97} __attribute__ ((packed));
98
99struct imsm_super {
100 __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */
101 __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */
102 __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */
103 __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */
104 __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
105 __u32 error_log_size; /* 0x30 - 0x33 in bytes */
106 __u32 attributes; /* 0x34 - 0x37 */
107 __u8 num_disks; /* 0x38 Number of configured disks */
108 __u8 num_raid_devs; /* 0x39 Number of configured volumes */
109 __u8 error_log_pos; /* 0x3A */
110 __u8 fill[1]; /* 0x3B */
111 __u32 cache_size; /* 0x3c - 0x40 in mb */
112 __u32 orig_family_num; /* 0x40 - 0x43 original family num */
113 __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
114 __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
115#define IMSM_FILLERS 35
116 __u32 filler[IMSM_FILLERS]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */
117 struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
118 /* here comes imsm_dev[num_raid_devs] */
119 /* here comes BBM logs */
120} __attribute__ ((packed));
121
122#define BBM_LOG_MAX_ENTRIES 254
123
124struct bbm_log_entry {
125 __u64 defective_block_start;
126#define UNREADABLE 0xFFFFFFFF
127 __u32 spare_block_offset;
128 __u16 remapped_marked_count;
129 __u16 disk_ordinal;
130} __attribute__ ((__packed__));
131
132struct bbm_log {
133 __u32 signature; /* 0xABADB10C */
134 __u32 entry_count;
135 __u32 reserved_spare_block_count; /* 0 */
136 __u32 reserved; /* 0xFFFF */
137 __u64 first_spare_lba;
138 struct bbm_log_entry mapped_block_entries[BBM_LOG_MAX_ENTRIES];
139} __attribute__ ((__packed__));
140
141
142#ifndef MDASSEMBLE
143static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
144#endif
145
146static unsigned int sector_count(__u32 bytes)
147{
148 return ((bytes + (512-1)) & (~(512-1))) / 512;
149}
150
151static unsigned int mpb_sectors(struct imsm_super *mpb)
152{
153 return sector_count(__le32_to_cpu(mpb->mpb_size));
154}
155
156/* internal representation of IMSM metadata */
157struct intel_super {
158 union {
159 void *buf; /* O_DIRECT buffer for reading/writing metadata */
160 struct imsm_super *anchor; /* immovable parameters */
161 };
162 size_t len; /* size of the 'buf' allocation */
163 int updates_pending; /* count of pending updates for mdmon */
164 int creating_imsm; /* flag to indicate container creation */
165 int current_vol; /* index of raid device undergoing creation */
166 #define IMSM_MAX_RAID_DEVS 2
167 struct imsm_dev *dev_tbl[IMSM_MAX_RAID_DEVS];
168 struct dl {
169 struct dl *next;
170 int index;
171 __u8 serial[MAX_RAID_SERIAL_LEN];
172 int major, minor;
173 char *devname;
174 struct imsm_disk disk;
175 int fd;
176 } *disks;
177 struct bbm_log *bbm_log;
178};
179
180struct extent {
181 unsigned long long start, size;
182};
183
184/* definition of messages passed to imsm_process_update */
185enum imsm_update_type {
186 update_activate_spare,
187 update_create_array,
188};
189
190struct imsm_update_activate_spare {
191 enum imsm_update_type type;
192 struct dl *dl;
193 int slot;
194 int array;
195 struct imsm_update_activate_spare *next;
196};
197
198struct imsm_update_create_array {
199 enum imsm_update_type type;
200 int dev_idx;
201 struct imsm_dev dev;
202};
203
204static int imsm_env_devname_as_serial(void)
205{
206 char *val = getenv("IMSM_DEVNAME_AS_SERIAL");
207
208 if (val && atoi(val) == 1)
209 return 1;
210
211 return 0;
212}
213
214
215static struct supertype *match_metadata_desc_imsm(char *arg)
216{
217 struct supertype *st;
218
219 if (strcmp(arg, "imsm") != 0 &&
220 strcmp(arg, "default") != 0
221 )
222 return NULL;
223
224 st = malloc(sizeof(*st));
225 memset(st, 0, sizeof(*st));
226 st->ss = &super_imsm;
227 st->max_devs = IMSM_MAX_DEVICES;
228 st->minor_version = 0;
229 st->sb = NULL;
230 return st;
231}
232
233static __u8 *get_imsm_version(struct imsm_super *mpb)
234{
235 return &mpb->sig[MPB_SIG_LEN];
236}
237
238/* retrieve a disk directly from the anchor when the anchor is known to be
239 * up-to-date, currently only at load time
240 */
241static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index)
242{
243 if (index >= mpb->num_disks)
244 return NULL;
245 return &mpb->disk[index];
246}
247
248/* retrieve a disk from the parsed metadata */
249static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index)
250{
251 struct dl *d;
252
253 for (d = super->disks; d; d = d->next)
254 if (d->index == index)
255 return &d->disk;
256
257 return NULL;
258}
259
260/* generate a checksum directly from the anchor when the anchor is known to be
261 * up-to-date, currently only at load or write_super after coalescing
262 */
263static __u32 __gen_imsm_checksum(struct imsm_super *mpb)
264{
265 __u32 end = mpb->mpb_size / sizeof(end);
266 __u32 *p = (__u32 *) mpb;
267 __u32 sum = 0;
268
269 while (end--)
270 sum += __le32_to_cpu(*p++);
271
272 return sum - __le32_to_cpu(mpb->check_sum);
273}
274
275static size_t sizeof_imsm_map(struct imsm_map *map)
276{
277 return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1);
278}
279
280struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
281{
282 struct imsm_map *map = &dev->vol.map[0];
283
284 if (second_map && !dev->vol.migr_state)
285 return NULL;
286 else if (second_map) {
287 void *ptr = map;
288
289 return ptr + sizeof_imsm_map(map);
290 } else
291 return map;
292
293}
294
295/* return the size of the device.
296 * migr_state increases the returned size if map[0] were to be duplicated
297 */
298static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state)
299{
300 size_t size = sizeof(*dev) - sizeof(struct imsm_map) +
301 sizeof_imsm_map(get_imsm_map(dev, 0));
302
303 /* migrating means an additional map */
304 if (dev->vol.migr_state)
305 size += sizeof_imsm_map(get_imsm_map(dev, 1));
306 else if (migr_state)
307 size += sizeof_imsm_map(get_imsm_map(dev, 0));
308
309 return size;
310}
311
312static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index)
313{
314 int offset;
315 int i;
316 void *_mpb = mpb;
317
318 if (index >= mpb->num_raid_devs)
319 return NULL;
320
321 /* devices start after all disks */
322 offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb;
323
324 for (i = 0; i <= index; i++)
325 if (i == index)
326 return _mpb + offset;
327 else
328 offset += sizeof_imsm_dev(_mpb + offset, 0);
329
330 return NULL;
331}
332
333static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index)
334{
335 if (index >= super->anchor->num_raid_devs)
336 return NULL;
337 return super->dev_tbl[index];
338}
339
340static __u32 get_imsm_disk_idx(struct imsm_map *map, int slot)
341{
342 __u32 *ord_tbl = &map->disk_ord_tbl[slot];
343
344 /* top byte identifies disk under rebuild
345 * why not just use the USABLE bit... oh well.
346 */
347 return __le32_to_cpu(*ord_tbl & ~(0xff << 24));
348}
349
350static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev, int slot)
351{
352 struct imsm_map *map;
353
354 if (dev->vol.migr_state)
355 map = get_imsm_map(dev, 0);
356 else
357 map = get_imsm_map(dev, 1);
358
359 return map->disk_ord_tbl[slot];
360}
361
362static int get_imsm_raid_level(struct imsm_map *map)
363{
364 if (map->raid_level == 1) {
365 if (map->num_members == 2)
366 return 1;
367 else
368 return 10;
369 }
370
371 return map->raid_level;
372}
373
374static int cmp_extent(const void *av, const void *bv)
375{
376 const struct extent *a = av;
377 const struct extent *b = bv;
378 if (a->start < b->start)
379 return -1;
380 if (a->start > b->start)
381 return 1;
382 return 0;
383}
384
385static struct extent *get_extents(struct intel_super *super, struct dl *dl)
386{
387 /* find a list of used extents on the given physical device */
388 struct extent *rv, *e;
389 int i, j;
390 int memberships = 0;
391
392 for (i = 0; i < super->anchor->num_raid_devs; i++) {
393 struct imsm_dev *dev = get_imsm_dev(super, i);
394 struct imsm_map *map = get_imsm_map(dev, 0);
395
396 for (j = 0; j < map->num_members; j++) {
397 __u32 index = get_imsm_disk_idx(map, j);
398
399 if (index == dl->index)
400 memberships++;
401 }
402 }
403 rv = malloc(sizeof(struct extent) * (memberships + 1));
404 if (!rv)
405 return NULL;
406 e = rv;
407
408 for (i = 0; i < super->anchor->num_raid_devs; i++) {
409 struct imsm_dev *dev = get_imsm_dev(super, i);
410 struct imsm_map *map = get_imsm_map(dev, 0);
411
412 for (j = 0; j < map->num_members; j++) {
413 __u32 index = get_imsm_disk_idx(map, j);
414
415 if (index == dl->index) {
416 e->start = __le32_to_cpu(map->pba_of_lba0);
417 e->size = __le32_to_cpu(map->blocks_per_member);
418 e++;
419 }
420 }
421 }
422 qsort(rv, memberships, sizeof(*rv), cmp_extent);
423
424 e->start = __le32_to_cpu(dl->disk.total_blocks) -
425 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
426 e->size = 0;
427 return rv;
428}
429
430#ifndef MDASSEMBLE
431static void print_imsm_dev(struct imsm_dev *dev, int index)
432{
433 __u64 sz;
434 int slot;
435 struct imsm_map *map = get_imsm_map(dev, 0);
436
437 printf("\n");
438 printf("[%s]:\n", dev->volume);
439 printf(" RAID Level : %d\n", get_imsm_raid_level(map));
440 printf(" Members : %d\n", map->num_members);
441 for (slot = 0; slot < map->num_members; slot++)
442 if (index == get_imsm_disk_idx(map, slot))
443 break;
444 if (slot < map->num_members)
445 printf(" This Slot : %d\n", slot);
446 else
447 printf(" This Slot : ?\n");
448 sz = __le32_to_cpu(dev->size_high);
449 sz <<= 32;
450 sz += __le32_to_cpu(dev->size_low);
451 printf(" Array Size : %llu%s\n", (unsigned long long)sz,
452 human_size(sz * 512));
453 sz = __le32_to_cpu(map->blocks_per_member);
454 printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz,
455 human_size(sz * 512));
456 printf(" Sector Offset : %u\n",
457 __le32_to_cpu(map->pba_of_lba0));
458 printf(" Num Stripes : %u\n",
459 __le32_to_cpu(map->num_data_stripes));
460 printf(" Chunk Size : %u KiB\n",
461 __le16_to_cpu(map->blocks_per_strip) / 2);
462 printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
463 printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle");
464 if (dev->vol.migr_state)
465 printf(": %s", dev->vol.migr_type ? "rebuilding" : "initializing");
466 printf("\n");
467 printf(" Map State : %s", map_state_str[map->map_state]);
468 if (dev->vol.migr_state) {
469 struct imsm_map *map = get_imsm_map(dev, 1);
470 printf(", %s", map_state_str[map->map_state]);
471 }
472 printf("\n");
473 printf(" Dirty State : %s\n", dev->vol.dirty ? "dirty" : "clean");
474}
475
476static void print_imsm_disk(struct imsm_super *mpb, int index)
477{
478 struct imsm_disk *disk = __get_imsm_disk(mpb, index);
479 char str[MAX_RAID_SERIAL_LEN];
480 __u32 s;
481 __u64 sz;
482
483 if (index < 0)
484 return;
485
486 printf("\n");
487 snprintf(str, MAX_RAID_SERIAL_LEN, "%s", disk->serial);
488 printf(" Disk%02d Serial : %s\n", index, str);
489 s = __le32_to_cpu(disk->status);
490 printf(" State :%s%s%s%s\n", s&SPARE_DISK ? " spare" : "",
491 s&CONFIGURED_DISK ? " active" : "",
492 s&FAILED_DISK ? " failed" : "",
493 s&USABLE_DISK ? " usable" : "");
494 printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id));
495 sz = __le32_to_cpu(disk->total_blocks) -
496 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS * mpb->num_raid_devs);
497 printf(" Usable Size : %llu%s\n", (unsigned long long)sz,
498 human_size(sz * 512));
499}
500
501static void examine_super_imsm(struct supertype *st, char *homehost)
502{
503 struct intel_super *super = st->sb;
504 struct imsm_super *mpb = super->anchor;
505 char str[MAX_SIGNATURE_LENGTH];
506 int i;
507 __u32 sum;
508
509 snprintf(str, MPB_SIG_LEN, "%s", mpb->sig);
510 printf(" Magic : %s\n", str);
511 snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
512 printf(" Version : %s\n", get_imsm_version(mpb));
513 printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
514 printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num));
515 sum = __le32_to_cpu(mpb->check_sum);
516 printf(" Checksum : %08x %s\n", sum,
517 __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect");
518 printf(" MPB Sectors : %d\n", mpb_sectors(mpb));
519 printf(" Disks : %d\n", mpb->num_disks);
520 printf(" RAID Devices : %d\n", mpb->num_raid_devs);
521 print_imsm_disk(mpb, super->disks->index);
522 if (super->bbm_log) {
523 struct bbm_log *log = super->bbm_log;
524
525 printf("\n");
526 printf("Bad Block Management Log:\n");
527 printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size));
528 printf(" Signature : %x\n", __le32_to_cpu(log->signature));
529 printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count));
530 printf(" Spare Blocks : %d\n", __le32_to_cpu(log->reserved_spare_block_count));
531 printf(" First Spare : %llx\n", __le64_to_cpu(log->first_spare_lba));
532 }
533 for (i = 0; i < mpb->num_raid_devs; i++)
534 print_imsm_dev(__get_imsm_dev(mpb, i), super->disks->index);
535 for (i = 0; i < mpb->num_disks; i++) {
536 if (i == super->disks->index)
537 continue;
538 print_imsm_disk(mpb, i);
539 }
540}
541
542static void brief_examine_super_imsm(struct supertype *st)
543{
544 printf("ARRAY /dev/imsm metadata=imsm\n");
545}
546
547static void detail_super_imsm(struct supertype *st, char *homehost)
548{
549 printf("%s\n", __FUNCTION__);
550}
551
552static void brief_detail_super_imsm(struct supertype *st)
553{
554 printf("%s\n", __FUNCTION__);
555}
556#endif
557
558static int match_home_imsm(struct supertype *st, char *homehost)
559{
560 printf("%s\n", __FUNCTION__);
561
562 return 0;
563}
564
565static void uuid_from_super_imsm(struct supertype *st, int uuid[4])
566{
567 printf("%s\n", __FUNCTION__);
568}
569
570#if 0
571static void
572get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p)
573{
574 __u8 *v = get_imsm_version(mpb);
575 __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH;
576 char major[] = { 0, 0, 0 };
577 char minor[] = { 0 ,0, 0 };
578 char patch[] = { 0, 0, 0 };
579 char *ver_parse[] = { major, minor, patch };
580 int i, j;
581
582 i = j = 0;
583 while (*v != '\0' && v < end) {
584 if (*v != '.' && j < 2)
585 ver_parse[i][j++] = *v;
586 else {
587 i++;
588 j = 0;
589 }
590 v++;
591 }
592
593 *m = strtol(minor, NULL, 0);
594 *p = strtol(patch, NULL, 0);
595}
596#endif
597
598static int imsm_level_to_layout(int level)
599{
600 switch (level) {
601 case 0:
602 case 1:
603 return 0;
604 case 5:
605 case 6:
606 return ALGORITHM_LEFT_ASYMMETRIC;
607 case 10:
608 return 0x102; //FIXME is this correct?
609 }
610 return -1;
611}
612
613static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info)
614{
615 struct intel_super *super = st->sb;
616 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
617 struct imsm_map *map = get_imsm_map(dev, 0);
618
619 info->container_member = super->current_vol;
620 info->array.raid_disks = map->num_members;
621 info->array.level = get_imsm_raid_level(map);
622 info->array.layout = imsm_level_to_layout(info->array.level);
623 info->array.md_minor = -1;
624 info->array.ctime = 0;
625 info->array.utime = 0;
626 info->array.chunk_size = __le16_to_cpu(map->blocks_per_strip * 512);
627
628 info->data_offset = __le32_to_cpu(map->pba_of_lba0);
629 info->component_size = __le32_to_cpu(map->blocks_per_member);
630
631 info->disk.major = 0;
632 info->disk.minor = 0;
633
634 sprintf(info->text_version, "/%s/%d",
635 devnum2devname(st->container_dev),
636 info->container_member);
637}
638
639
640static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info)
641{
642 struct intel_super *super = st->sb;
643 struct imsm_disk *disk;
644 __u32 s;
645
646 if (super->current_vol >= 0) {
647 getinfo_super_imsm_volume(st, info);
648 return;
649 }
650
651 /* Set raid_disks to zero so that Assemble will always pull in valid
652 * spares
653 */
654 info->array.raid_disks = 0;
655 info->array.level = LEVEL_CONTAINER;
656 info->array.layout = 0;
657 info->array.md_minor = -1;
658 info->array.ctime = 0; /* N/A for imsm */
659 info->array.utime = 0;
660 info->array.chunk_size = 0;
661
662 info->disk.major = 0;
663 info->disk.minor = 0;
664 info->disk.raid_disk = -1;
665 info->reshape_active = 0;
666 strcpy(info->text_version, "imsm");
667 info->disk.number = -1;
668 info->disk.state = 0;
669
670 if (super->disks) {
671 disk = &super->disks->disk;
672 info->disk.number = super->disks->index;
673 info->disk.raid_disk = super->disks->index;
674 info->data_offset = __le32_to_cpu(disk->total_blocks) -
675 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
676 info->component_size = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
677 s = __le32_to_cpu(disk->status);
678 info->disk.state = s & CONFIGURED_DISK ? (1 << MD_DISK_ACTIVE) : 0;
679 info->disk.state |= s & FAILED_DISK ? (1 << MD_DISK_FAULTY) : 0;
680 info->disk.state |= s & USABLE_DISK ? (1 << MD_DISK_SYNC) : 0;
681 }
682}
683
684static int update_super_imsm(struct supertype *st, struct mdinfo *info,
685 char *update, char *devname, int verbose,
686 int uuid_set, char *homehost)
687{
688 /* FIXME */
689
690 /* For 'assemble' and 'force' we need to return non-zero if any
691 * change was made. For others, the return value is ignored.
692 * Update options are:
693 * force-one : This device looks a bit old but needs to be included,
694 * update age info appropriately.
695 * assemble: clear any 'faulty' flag to allow this device to
696 * be assembled.
697 * force-array: Array is degraded but being forced, mark it clean
698 * if that will be needed to assemble it.
699 *
700 * newdev: not used ????
701 * grow: Array has gained a new device - this is currently for
702 * linear only
703 * resync: mark as dirty so a resync will happen.
704 * name: update the name - preserving the homehost
705 *
706 * Following are not relevant for this imsm:
707 * sparc2.2 : update from old dodgey metadata
708 * super-minor: change the preferred_minor number
709 * summaries: update redundant counters.
710 * uuid: Change the uuid of the array to match watch is given
711 * homehost: update the recorded homehost
712 * _reshape_progress: record new reshape_progress position.
713 */
714 int rv = 0;
715 //struct intel_super *super = st->sb;
716 //struct imsm_super *mpb = super->mpb;
717
718 if (strcmp(update, "grow") == 0) {
719 }
720 if (strcmp(update, "resync") == 0) {
721 /* dev->vol.dirty = 1; */
722 }
723
724 /* IMSM has no concept of UUID or homehost */
725
726 return rv;
727}
728
729static size_t disks_to_mpb_size(int disks)
730{
731 size_t size;
732
733 size = sizeof(struct imsm_super);
734 size += (disks - 1) * sizeof(struct imsm_disk);
735 size += 2 * sizeof(struct imsm_dev);
736 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
737 size += (4 - 2) * sizeof(struct imsm_map);
738 /* 4 possible disk_ord_tbl's */
739 size += 4 * (disks - 1) * sizeof(__u32);
740
741 return size;
742}
743
744static __u64 avail_size_imsm(struct supertype *st, __u64 devsize)
745{
746 if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS))
747 return 0;
748
749 return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
750}
751
752static int compare_super_imsm(struct supertype *st, struct supertype *tst)
753{
754 /*
755 * return:
756 * 0 same, or first was empty, and second was copied
757 * 1 second had wrong number
758 * 2 wrong uuid
759 * 3 wrong other info
760 */
761 struct intel_super *first = st->sb;
762 struct intel_super *sec = tst->sb;
763
764 if (!first) {
765 st->sb = tst->sb;
766 tst->sb = NULL;
767 return 0;
768 }
769
770 if (memcmp(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH) != 0)
771 return 3;
772
773 /* if an anchor does not have num_raid_devs set then it is a free
774 * floating spare
775 */
776 if (first->anchor->num_raid_devs > 0 &&
777 sec->anchor->num_raid_devs > 0) {
778 if (first->anchor->family_num != sec->anchor->family_num)
779 return 3;
780 if (first->anchor->mpb_size != sec->anchor->mpb_size)
781 return 3;
782 if (first->anchor->check_sum != sec->anchor->check_sum)
783 return 3;
784 }
785
786 return 0;
787}
788
789static void fd2devname(int fd, char *name)
790{
791 struct stat st;
792 char path[256];
793 char dname[100];
794 char *nm;
795 int rv;
796
797 name[0] = '\0';
798 if (fstat(fd, &st) != 0)
799 return;
800 sprintf(path, "/sys/dev/block/%d:%d",
801 major(st.st_rdev), minor(st.st_rdev));
802
803 rv = readlink(path, dname, sizeof(dname));
804 if (rv <= 0)
805 return;
806
807 dname[rv] = '\0';
808 nm = strrchr(dname, '/');
809 nm++;
810 snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
811}
812
813
814extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
815
816static int imsm_read_serial(int fd, char *devname,
817 __u8 serial[MAX_RAID_SERIAL_LEN])
818{
819 unsigned char scsi_serial[255];
820 int rv;
821 int rsp_len;
822 int i, cnt;
823
824 memset(scsi_serial, 0, sizeof(scsi_serial));
825
826 if (imsm_env_devname_as_serial()) {
827 char name[MAX_RAID_SERIAL_LEN];
828
829 fd2devname(fd, name);
830 strcpy((char *) serial, name);
831 return 0;
832 }
833
834 rv = scsi_get_serial(fd, scsi_serial, sizeof(scsi_serial));
835
836 if (rv != 0) {
837 if (devname)
838 fprintf(stderr,
839 Name ": Failed to retrieve serial for %s\n",
840 devname);
841 return rv;
842 }
843
844 rsp_len = scsi_serial[3];
845 for (i = 0, cnt = 0; i < rsp_len; i++) {
846 if (!isspace(scsi_serial[4 + i]))
847 serial[cnt++] = scsi_serial[4 + i];
848 if (cnt == MAX_RAID_SERIAL_LEN)
849 break;
850 }
851
852 serial[MAX_RAID_SERIAL_LEN - 1] = '\0';
853
854 return 0;
855}
856
857static int
858load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
859{
860 struct dl *dl;
861 struct stat stb;
862 int rv;
863 int i;
864 int alloc = 1;
865 __u8 serial[MAX_RAID_SERIAL_LEN];
866
867 rv = imsm_read_serial(fd, devname, serial);
868
869 if (rv != 0)
870 return 2;
871
872 /* check if this is a disk we have seen before. it may be a spare in
873 * super->disks while the current anchor believes it is a raid member,
874 * check if we need to update dl->index
875 */
876 for (dl = super->disks; dl; dl = dl->next)
877 if (memcmp(dl->serial, serial, MAX_RAID_SERIAL_LEN) == 0)
878 break;
879
880 if (!dl)
881 dl = malloc(sizeof(*dl));
882 else
883 alloc = 0;
884
885 if (!dl) {
886 if (devname)
887 fprintf(stderr,
888 Name ": failed to allocate disk buffer for %s\n",
889 devname);
890 return 2;
891 }
892
893 if (alloc) {
894 fstat(fd, &stb);
895 dl->major = major(stb.st_rdev);
896 dl->minor = minor(stb.st_rdev);
897 dl->next = super->disks;
898 dl->fd = keep_fd ? fd : -1;
899 dl->devname = devname ? strdup(devname) : NULL;
900 strncpy((char *) dl->serial, (char *) serial, MAX_RAID_SERIAL_LEN);
901 } else if (keep_fd) {
902 close(dl->fd);
903 dl->fd = fd;
904 }
905
906 /* look up this disk's index in the current anchor */
907 for (i = 0; i < super->anchor->num_disks; i++) {
908 struct imsm_disk *disk_iter;
909
910 disk_iter = __get_imsm_disk(super->anchor, i);
911
912 if (memcmp(disk_iter->serial, dl->serial,
913 MAX_RAID_SERIAL_LEN) == 0) {
914 __u32 status;
915
916 dl->disk = *disk_iter;
917 status = __le32_to_cpu(dl->disk.status);
918 /* only set index on disks that are a member of a
919 * populated contianer, i.e. one with raid_devs
920 */
921 if (status & SPARE_DISK)
922 dl->index = -1;
923 else
924 dl->index = i;
925 break;
926 }
927 }
928
929 if (i == super->anchor->num_disks && alloc) {
930 if (devname)
931 fprintf(stderr,
932 Name ": failed to load disk with serial \'%s\' for %s\n",
933 dl->serial, devname);
934 free(dl);
935 return 1;
936 }
937 if (i == super->anchor->num_disks && dl->index >= 0) {
938 if (devname)
939 fprintf(stderr,
940 Name ": confused... disk %d with serial \'%s\' "
941 "is not listed in the current anchor\n",
942 dl->index, dl->serial);
943 return 1;
944 }
945
946 if (alloc)
947 super->disks = dl;
948
949 return 0;
950}
951
952static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src)
953{
954 memcpy(dest, src, sizeof_imsm_dev(src, 0));
955}
956
957static void dup_map(struct imsm_dev *dev)
958{
959 struct imsm_map *dest = get_imsm_map(dev, 1);
960 struct imsm_map *src = get_imsm_map(dev, 0);
961
962 memcpy(dest, src, sizeof_imsm_map(src));
963}
964
965static int parse_raid_devices(struct intel_super *super)
966{
967 int i;
968 struct imsm_dev *dev_new;
969 size_t len;
970
971 for (i = 0; i < super->anchor->num_raid_devs; i++) {
972 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
973
974 len = sizeof_imsm_dev(dev_iter, 1);
975 dev_new = malloc(len);
976 if (!dev_new)
977 return 1;
978 imsm_copy_dev(dev_new, dev_iter);
979 super->dev_tbl[i] = dev_new;
980 }
981
982 return 0;
983}
984
985/* retrieve a pointer to the bbm log which starts after all raid devices */
986struct bbm_log *__get_imsm_bbm_log(struct imsm_super *mpb)
987{
988 void *ptr = NULL;
989
990 if (__le32_to_cpu(mpb->bbm_log_size)) {
991 ptr = mpb;
992 ptr += mpb->mpb_size - __le32_to_cpu(mpb->bbm_log_size);
993 }
994
995 return ptr;
996}
997
998static void __free_imsm(struct intel_super *super, int free_disks);
999
1000/* load_imsm_mpb - read matrix metadata
1001 * allocates super->mpb to be freed by free_super
1002 */
1003static int load_imsm_mpb(int fd, struct intel_super *super, char *devname)
1004{
1005 unsigned long long dsize;
1006 unsigned long long sectors;
1007 struct stat;
1008 struct imsm_super *anchor;
1009 __u32 check_sum;
1010 int rc;
1011
1012 get_dev_size(fd, NULL, &dsize);
1013
1014 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) {
1015 if (devname)
1016 fprintf(stderr,
1017 Name ": Cannot seek to anchor block on %s: %s\n",
1018 devname, strerror(errno));
1019 return 1;
1020 }
1021
1022 if (posix_memalign((void**)&anchor, 512, 512) != 0) {
1023 if (devname)
1024 fprintf(stderr,
1025 Name ": Failed to allocate imsm anchor buffer"
1026 " on %s\n", devname);
1027 return 1;
1028 }
1029 if (read(fd, anchor, 512) != 512) {
1030 if (devname)
1031 fprintf(stderr,
1032 Name ": Cannot read anchor block on %s: %s\n",
1033 devname, strerror(errno));
1034 free(anchor);
1035 return 1;
1036 }
1037
1038 if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) {
1039 if (devname)
1040 fprintf(stderr,
1041 Name ": no IMSM anchor on %s\n", devname);
1042 free(anchor);
1043 return 2;
1044 }
1045
1046 __free_imsm(super, 0);
1047 super->len = __le32_to_cpu(anchor->mpb_size);
1048 super->len = ROUND_UP(anchor->mpb_size, 512);
1049 if (posix_memalign(&super->buf, 512, super->len) != 0) {
1050 if (devname)
1051 fprintf(stderr,
1052 Name ": unable to allocate %zu byte mpb buffer\n",
1053 super->len);
1054 free(anchor);
1055 return 2;
1056 }
1057 memcpy(super->buf, anchor, 512);
1058
1059 sectors = mpb_sectors(anchor) - 1;
1060 free(anchor);
1061 if (!sectors) {
1062 rc = load_imsm_disk(fd, super, devname, 0);
1063 if (rc == 0)
1064 rc = parse_raid_devices(super);
1065 return rc;
1066 }
1067
1068 /* read the extended mpb */
1069 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0) {
1070 if (devname)
1071 fprintf(stderr,
1072 Name ": Cannot seek to extended mpb on %s: %s\n",
1073 devname, strerror(errno));
1074 return 1;
1075 }
1076
1077 if (read(fd, super->buf + 512, super->len - 512) != super->len - 512) {
1078 if (devname)
1079 fprintf(stderr,
1080 Name ": Cannot read extended mpb on %s: %s\n",
1081 devname, strerror(errno));
1082 return 2;
1083 }
1084
1085 check_sum = __gen_imsm_checksum(super->anchor);
1086 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
1087 if (devname)
1088 fprintf(stderr,
1089 Name ": IMSM checksum %x != %x on %s\n",
1090 check_sum, __le32_to_cpu(super->anchor->check_sum),
1091 devname);
1092 return 2;
1093 }
1094
1095 /* FIXME the BBM log is disk specific so we cannot use this global
1096 * buffer for all disks. Ok for now since we only look at the global
1097 * bbm_log_size parameter to gate assembly
1098 */
1099 super->bbm_log = __get_imsm_bbm_log(super->anchor);
1100
1101 rc = load_imsm_disk(fd, super, devname, 0);
1102 if (rc == 0)
1103 rc = parse_raid_devices(super);
1104 return rc;
1105}
1106
1107static void __free_imsm_disk(struct dl *d)
1108{
1109 if (d->fd >= 0)
1110 close(d->fd);
1111 if (d->devname)
1112 free(d->devname);
1113 free(d);
1114
1115}
1116static void free_imsm_disks(struct intel_super *super)
1117{
1118 while (super->disks) {
1119 struct dl *d = super->disks;
1120
1121 super->disks = d->next;
1122 __free_imsm_disk(d);
1123 }
1124}
1125
1126/* free all the pieces hanging off of a super pointer */
1127static void __free_imsm(struct intel_super *super, int free_disks)
1128{
1129 int i;
1130
1131 if (super->buf) {
1132 free(super->buf);
1133 super->buf = NULL;
1134 }
1135 if (free_disks)
1136 free_imsm_disks(super);
1137 for (i = 0; i < IMSM_MAX_RAID_DEVS; i++)
1138 if (super->dev_tbl[i]) {
1139 free(super->dev_tbl[i]);
1140 super->dev_tbl[i] = NULL;
1141 }
1142}
1143
1144static void free_imsm(struct intel_super *super)
1145{
1146 __free_imsm(super, 1);
1147 free(super);
1148}
1149
1150static void free_super_imsm(struct supertype *st)
1151{
1152 struct intel_super *super = st->sb;
1153
1154 if (!super)
1155 return;
1156
1157 free_imsm(super);
1158 st->sb = NULL;
1159}
1160
1161static struct intel_super *alloc_super(int creating_imsm)
1162{
1163 struct intel_super *super = malloc(sizeof(*super));
1164
1165 if (super) {
1166 memset(super, 0, sizeof(*super));
1167 super->creating_imsm = creating_imsm;
1168 super->current_vol = -1;
1169 }
1170
1171 return super;
1172}
1173
1174#ifndef MDASSEMBLE
1175static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
1176 char *devname, int keep_fd)
1177{
1178 struct mdinfo *sra;
1179 struct intel_super *super;
1180 struct mdinfo *sd, *best = NULL;
1181 __u32 bestgen = 0;
1182 __u32 gen;
1183 char nm[20];
1184 int dfd;
1185 int rv;
1186
1187 /* check if this disk is a member of an active array */
1188 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
1189 if (!sra)
1190 return 1;
1191
1192 if (sra->array.major_version != -1 ||
1193 sra->array.minor_version != -2 ||
1194 strcmp(sra->text_version, "imsm") != 0)
1195 return 1;
1196
1197 super = alloc_super(0);
1198 if (!super)
1199 return 1;
1200
1201 /* find the most up to date disk in this array, skipping spares */
1202 for (sd = sra->devs; sd; sd = sd->next) {
1203 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
1204 dfd = dev_open(nm, keep_fd ? O_RDWR : O_RDONLY);
1205 if (!dfd) {
1206 free_imsm(super);
1207 return 2;
1208 }
1209 rv = load_imsm_mpb(dfd, super, NULL);
1210 if (!keep_fd)
1211 close(dfd);
1212 if (rv == 0) {
1213 if (super->anchor->num_raid_devs == 0)
1214 gen = 0;
1215 else
1216 gen = __le32_to_cpu(super->anchor->generation_num);
1217 if (!best || gen > bestgen) {
1218 bestgen = gen;
1219 best = sd;
1220 }
1221 } else {
1222 free_imsm(super);
1223 return 2;
1224 }
1225 }
1226
1227 if (!best) {
1228 free_imsm(super);
1229 return 1;
1230 }
1231
1232 /* load the most up to date anchor */
1233 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
1234 dfd = dev_open(nm, O_RDONLY);
1235 if (!dfd) {
1236 free_imsm(super);
1237 return 1;
1238 }
1239 rv = load_imsm_mpb(dfd, super, NULL);
1240 close(dfd);
1241 if (rv != 0) {
1242 free_imsm(super);
1243 return 2;
1244 }
1245
1246 /* re-parse the disk list with the current anchor */
1247 for (sd = sra->devs ; sd ; sd = sd->next) {
1248 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
1249 dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
1250 if (!dfd) {
1251 free_imsm(super);
1252 return 2;
1253 }
1254 load_imsm_disk(dfd, super, NULL, keep_fd);
1255 if (!keep_fd)
1256 close(dfd);
1257 }
1258
1259 if (st->subarray[0]) {
1260 if (atoi(st->subarray) <= super->anchor->num_raid_devs)
1261 super->current_vol = atoi(st->subarray);
1262 else
1263 return 1;
1264 }
1265
1266 *sbp = super;
1267 if (st->ss == NULL) {
1268 st->ss = &super_imsm;
1269 st->minor_version = 0;
1270 st->max_devs = IMSM_MAX_DEVICES;
1271 st->container_dev = fd2devnum(fd);
1272 }
1273
1274 return 0;
1275}
1276#endif
1277
1278static int load_super_imsm(struct supertype *st, int fd, char *devname)
1279{
1280 struct intel_super *super;
1281 int rv;
1282
1283#ifndef MDASSEMBLE
1284 if (load_super_imsm_all(st, fd, &st->sb, devname, 1) == 0)
1285 return 0;
1286#endif
1287 if (st->subarray[0])
1288 return 1; /* FIXME */
1289
1290 super = alloc_super(0);
1291 if (!super) {
1292 fprintf(stderr,
1293 Name ": malloc of %zu failed.\n",
1294 sizeof(*super));
1295 return 1;
1296 }
1297
1298 rv = load_imsm_mpb(fd, super, devname);
1299
1300 if (rv) {
1301 if (devname)
1302 fprintf(stderr,
1303 Name ": Failed to load all information "
1304 "sections on %s\n", devname);
1305 free_imsm(super);
1306 return rv;
1307 }
1308
1309 st->sb = super;
1310 if (st->ss == NULL) {
1311 st->ss = &super_imsm;
1312 st->minor_version = 0;
1313 st->max_devs = IMSM_MAX_DEVICES;
1314 }
1315
1316 return 0;
1317}
1318
1319static __u16 info_to_blocks_per_strip(mdu_array_info_t *info)
1320{
1321 if (info->level == 1)
1322 return 128;
1323 return info->chunk_size >> 9;
1324}
1325
1326static __u32 info_to_num_data_stripes(mdu_array_info_t *info)
1327{
1328 __u32 num_stripes;
1329
1330 num_stripes = (info->size * 2) / info_to_blocks_per_strip(info);
1331 if (info->level == 1)
1332 num_stripes /= 2;
1333
1334 return num_stripes;
1335}
1336
1337static __u32 info_to_blocks_per_member(mdu_array_info_t *info)
1338{
1339 return (info->size * 2) & ~(info_to_blocks_per_strip(info) - 1);
1340}
1341
1342static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
1343 unsigned long long size, char *name,
1344 char *homehost, int *uuid)
1345{
1346 /* We are creating a volume inside a pre-existing container.
1347 * so st->sb is already set.
1348 */
1349 struct intel_super *super = st->sb;
1350 struct imsm_super *mpb = super->anchor;
1351 struct imsm_dev *dev;
1352 struct imsm_vol *vol;
1353 struct imsm_map *map;
1354 int idx = mpb->num_raid_devs;
1355 int i;
1356 unsigned long long array_blocks;
1357 __u32 offset = 0;
1358 size_t size_old, size_new;
1359
1360 if (mpb->num_raid_devs >= 2) {
1361 fprintf(stderr, Name": This imsm-container already has the "
1362 "maximum of 2 volumes\n");
1363 return 0;
1364 }
1365
1366 /* ensure the mpb is large enough for the new data */
1367 size_old = __le32_to_cpu(mpb->mpb_size);
1368 size_new = disks_to_mpb_size(info->nr_disks);
1369 if (size_new > size_old) {
1370 void *mpb_new;
1371 size_t size_round = ROUND_UP(size_new, 512);
1372
1373 if (posix_memalign(&mpb_new, 512, size_round) != 0) {
1374 fprintf(stderr, Name": could not allocate new mpb\n");
1375 return 0;
1376 }
1377 memcpy(mpb_new, mpb, size_old);
1378 free(mpb);
1379 mpb = mpb_new;
1380 super->anchor = mpb_new;
1381 mpb->mpb_size = __cpu_to_le32(size_new);
1382 memset(mpb_new + size_old, 0, size_round - size_old);
1383 }
1384 super->current_vol = idx;
1385 /* when creating the first raid device in this container set num_disks
1386 * to zero, i.e. delete this spare and add raid member devices in
1387 * add_to_super_imsm_volume()
1388 */
1389 if (super->current_vol == 0)
1390 mpb->num_disks = 0;
1391 sprintf(st->subarray, "%d", idx);
1392 dev = malloc(sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
1393 if (!dev) {
1394 fprintf(stderr, Name": could not allocate raid device\n");
1395 return 0;
1396 }
1397 strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
1398 array_blocks = calc_array_size(info->level, info->raid_disks,
1399 info->layout, info->chunk_size,
1400 info->size*2);
1401 dev->size_low = __cpu_to_le32((__u32) array_blocks);
1402 dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
1403 dev->status = __cpu_to_le32(0);
1404 dev->reserved_blocks = __cpu_to_le32(0);
1405 vol = &dev->vol;
1406 vol->migr_state = 0;
1407 vol->migr_type = 0;
1408 vol->dirty = 0;
1409 for (i = 0; i < idx; i++) {
1410 struct imsm_dev *prev = get_imsm_dev(super, i);
1411 struct imsm_map *pmap = get_imsm_map(prev, 0);
1412
1413 offset += __le32_to_cpu(pmap->blocks_per_member);
1414 offset += IMSM_RESERVED_SECTORS;
1415 }
1416 map = get_imsm_map(dev, 0);
1417 map->pba_of_lba0 = __cpu_to_le32(offset);
1418 map->blocks_per_member = __cpu_to_le32(info_to_blocks_per_member(info));
1419 map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
1420 map->num_data_stripes = __cpu_to_le32(info_to_num_data_stripes(info));
1421 map->map_state = info->level ? IMSM_T_STATE_UNINITIALIZED :
1422 IMSM_T_STATE_NORMAL;
1423
1424 if (info->level == 1 && info->raid_disks > 2) {
1425 fprintf(stderr, Name": imsm does not support more than 2 disks"
1426 "in a raid1 volume\n");
1427 return 0;
1428 }
1429 if (info->level == 10)
1430 map->raid_level = 1;
1431 else
1432 map->raid_level = info->level;
1433
1434 map->num_members = info->raid_disks;
1435 for (i = 0; i < map->num_members; i++) {
1436 /* initialized in add_to_super */
1437 map->disk_ord_tbl[i] = __cpu_to_le32(0);
1438 }
1439 mpb->num_raid_devs++;
1440 super->dev_tbl[super->current_vol] = dev;
1441
1442 return 1;
1443}
1444
1445static int init_super_imsm(struct supertype *st, mdu_array_info_t *info,
1446 unsigned long long size, char *name,
1447 char *homehost, int *uuid)
1448{
1449 /* This is primarily called by Create when creating a new array.
1450 * We will then get add_to_super called for each component, and then
1451 * write_init_super called to write it out to each device.
1452 * For IMSM, Create can create on fresh devices or on a pre-existing
1453 * array.
1454 * To create on a pre-existing array a different method will be called.
1455 * This one is just for fresh drives.
1456 */
1457 struct intel_super *super;
1458 struct imsm_super *mpb;
1459 size_t mpb_size;
1460
1461 if (!info) {
1462 st->sb = NULL;
1463 return 0;
1464 }
1465 if (st->sb)
1466 return init_super_imsm_volume(st, info, size, name, homehost,
1467 uuid);
1468
1469 super = alloc_super(1);
1470 if (!super)
1471 return 0;
1472 mpb_size = disks_to_mpb_size(info->nr_disks);
1473 if (posix_memalign(&super->buf, 512, mpb_size) != 0) {
1474 free(super);
1475 return 0;
1476 }
1477 mpb = super->buf;
1478 memset(mpb, 0, mpb_size);
1479
1480 memcpy(mpb->sig, MPB_SIGNATURE, strlen(MPB_SIGNATURE));
1481 memcpy(mpb->sig + strlen(MPB_SIGNATURE), MPB_VERSION_RAID5,
1482 strlen(MPB_VERSION_RAID5));
1483 mpb->mpb_size = mpb_size;
1484
1485 st->sb = super;
1486 return 1;
1487}
1488
1489static void add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
1490 int fd, char *devname)
1491{
1492 struct intel_super *super = st->sb;
1493 struct imsm_super *mpb = super->anchor;
1494 struct dl *dl;
1495 struct imsm_dev *dev;
1496 struct imsm_map *map;
1497 __u32 status;
1498
1499 dev = get_imsm_dev(super, super->current_vol);
1500 map = get_imsm_map(dev, 0);
1501
1502 for (dl = super->disks; dl ; dl = dl->next)
1503 if (dl->major == dk->major &&
1504 dl->minor == dk->minor)
1505 break;
1506
1507 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
1508 return;
1509
1510 /* add a pristine spare to the metadata */
1511 if (dl->index < 0) {
1512 dl->index = super->anchor->num_disks;
1513 super->anchor->num_disks++;
1514 }
1515 map->disk_ord_tbl[dk->number] = __cpu_to_le32(dl->index);
1516 status = CONFIGURED_DISK | USABLE_DISK;
1517 dl->disk.status = __cpu_to_le32(status);
1518
1519 /* if we are creating the first raid device update the family number */
1520 if (super->current_vol == 0) {
1521 __u32 sum;
1522 struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
1523 struct imsm_disk *_disk = __get_imsm_disk(mpb, dl->index);
1524
1525 *_dev = *dev;
1526 *_disk = dl->disk;
1527 sum = __gen_imsm_checksum(mpb);
1528 mpb->family_num = __cpu_to_le32(sum);
1529 }
1530}
1531
1532static void add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
1533 int fd, char *devname)
1534{
1535 struct intel_super *super = st->sb;
1536 struct dl *dd;
1537 unsigned long long size;
1538 __u32 status, id;
1539 int rv;
1540 struct stat stb;
1541
1542 if (super->current_vol >= 0) {
1543 add_to_super_imsm_volume(st, dk, fd, devname);
1544 return;
1545 }
1546
1547 fstat(fd, &stb);
1548 dd = malloc(sizeof(*dd));
1549 if (!dd) {
1550 fprintf(stderr,
1551 Name ": malloc failed %s:%d.\n", __func__, __LINE__);
1552 abort();
1553 }
1554 memset(dd, 0, sizeof(*dd));
1555 dd->major = major(stb.st_rdev);
1556 dd->minor = minor(stb.st_rdev);
1557 dd->index = -1;
1558 dd->devname = devname ? strdup(devname) : NULL;
1559 dd->next = super->disks;
1560 dd->fd = fd;
1561 rv = imsm_read_serial(fd, devname, dd->serial);
1562 if (rv) {
1563 fprintf(stderr,
1564 Name ": failed to retrieve scsi serial, aborting\n");
1565 free(dd);
1566 abort();
1567 }
1568
1569 get_dev_size(fd, NULL, &size);
1570 size /= 512;
1571 status = USABLE_DISK | SPARE_DISK;
1572 strcpy((char *) dd->disk.serial, (char *) dd->serial);
1573 dd->disk.total_blocks = __cpu_to_le32(size);
1574 dd->disk.status = __cpu_to_le32(status);
1575 if (sysfs_disk_to_scsi_id(fd, &id) == 0)
1576 dd->disk.scsi_id = __cpu_to_le32(id);
1577 else
1578 dd->disk.scsi_id = __cpu_to_le32(0);
1579 super->disks = dd;
1580}
1581
1582static int store_imsm_mpb(int fd, struct intel_super *super);
1583
1584/* spare records have their own family number and do not have any defined raid
1585 * devices
1586 */
1587static int write_super_imsm_spares(struct intel_super *super, int doclose)
1588{
1589 struct imsm_super mpb_save;
1590 struct imsm_super *mpb = super->anchor;
1591 __u32 sum;
1592 struct dl *d;
1593
1594 mpb_save = *mpb;
1595 mpb->num_raid_devs = 0;
1596 mpb->num_disks = 1;
1597 mpb->mpb_size = sizeof(struct imsm_super);
1598 mpb->generation_num = __cpu_to_le32(1UL);
1599
1600 for (d = super->disks; d; d = d->next) {
1601 if (d->index >= 0)
1602 continue;
1603
1604 mpb->disk[0] = d->disk;
1605 sum = __gen_imsm_checksum(mpb);
1606 mpb->family_num = __cpu_to_le32(sum);
1607 sum = __gen_imsm_checksum(mpb);
1608 mpb->check_sum = __cpu_to_le32(sum);
1609
1610 if (store_imsm_mpb(d->fd, super)) {
1611 fprintf(stderr, "%s: failed for device %d:%d %s\n",
1612 __func__, d->major, d->minor, strerror(errno));
1613 *mpb = mpb_save;
1614 return 1;
1615 }
1616 if (doclose) {
1617 close(d->fd);
1618 d->fd = -1;
1619 }
1620 }
1621
1622 *mpb = mpb_save;
1623 return 0;
1624}
1625
1626static int write_super_imsm(struct intel_super *super, int doclose)
1627{
1628 struct imsm_super *mpb = super->anchor;
1629 struct dl *d;
1630 __u32 generation;
1631 __u32 sum;
1632 int spares = 0;
1633 int raid_disks = 0;
1634 int i;
1635 __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk);
1636
1637 /* 'generation' is incremented everytime the metadata is written */
1638 generation = __le32_to_cpu(mpb->generation_num);
1639 generation++;
1640 mpb->generation_num = __cpu_to_le32(generation);
1641
1642 for (d = super->disks; d; d = d->next) {
1643 if (d->index < 0)
1644 spares++;
1645 else {
1646 raid_disks++;
1647 mpb->disk[d->index] = d->disk;
1648 mpb_size += sizeof(struct imsm_disk);
1649 }
1650 }
1651 if (raid_disks != mpb->num_disks) {
1652 fprintf(stderr, "%s: expected %d disks only found %d\n",
1653 __func__, mpb->num_disks, raid_disks);
1654 return 1;
1655 }
1656
1657 for (i = 0; i < mpb->num_raid_devs; i++) {
1658 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1659
1660 imsm_copy_dev(dev, super->dev_tbl[i]);
1661 mpb_size += sizeof_imsm_dev(dev, 0);
1662 }
1663 mpb_size += __le32_to_cpu(mpb->bbm_log_size);
1664 mpb->mpb_size = __cpu_to_le32(mpb_size);
1665
1666 /* recalculate checksum */
1667 sum = __gen_imsm_checksum(mpb);
1668 mpb->check_sum = __cpu_to_le32(sum);
1669
1670 /* write the mpb for disks that compose raid devices */
1671 for (d = super->disks; d ; d = d->next) {
1672 if (d->index < 0)
1673 continue;
1674 if (store_imsm_mpb(d->fd, super)) {
1675 fprintf(stderr, "%s: failed for device %d:%d %s\n",
1676 __func__, d->major, d->minor, strerror(errno));
1677 return 1;
1678 }
1679 if (doclose) {
1680 close(d->fd);
1681 d->fd = -1;
1682 }
1683 }
1684
1685 if (spares)
1686 return write_super_imsm_spares(super, doclose);
1687
1688 return 0;
1689}
1690
1691static int write_init_super_imsm(struct supertype *st)
1692{
1693 if (st->update_tail) {
1694 /* queue the recently created array as a metadata update */
1695 size_t len;
1696 struct imsm_update_create_array *u;
1697 struct intel_super *super = st->sb;
1698 struct imsm_dev *dev;
1699 struct dl *d;
1700
1701 if (super->current_vol < 0 ||
1702 !(dev = get_imsm_dev(super, super->current_vol))) {
1703 fprintf(stderr, "%s: could not determine sub-array\n",
1704 __func__);
1705 return 1;
1706 }
1707
1708
1709 len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0);
1710 u = malloc(len);
1711 if (!u) {
1712 fprintf(stderr, "%s: failed to allocate update buffer\n",
1713 __func__);
1714 return 1;
1715 }
1716
1717 u->type = update_create_array;
1718 u->dev_idx = super->current_vol;
1719 imsm_copy_dev(&u->dev, dev);
1720 append_metadata_update(st, u, len);
1721
1722 for (d = super->disks; d ; d = d->next) {
1723 close(d->fd);
1724 d->fd = -1;
1725 }
1726
1727 return 0;
1728 } else
1729 return write_super_imsm(st->sb, 1);
1730}
1731
1732static int store_zero_imsm(struct supertype *st, int fd)
1733{
1734 unsigned long long dsize;
1735 void *buf;
1736
1737 get_dev_size(fd, NULL, &dsize);
1738
1739 /* first block is stored on second to last sector of the disk */
1740 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
1741 return 1;
1742
1743 if (posix_memalign(&buf, 512, 512) != 0)
1744 return 1;
1745
1746 memset(buf, 0, 512);
1747 if (write(fd, buf, 512) != 512)
1748 return 1;
1749 return 0;
1750}
1751
1752static int validate_geometry_imsm_container(struct supertype *st, int level,
1753 int layout, int raiddisks, int chunk,
1754 unsigned long long size, char *dev,
1755 unsigned long long *freesize,
1756 int verbose)
1757{
1758 int fd;
1759 unsigned long long ldsize;
1760
1761 if (level != LEVEL_CONTAINER)
1762 return 0;
1763 if (!dev)
1764 return 1;
1765
1766 fd = open(dev, O_RDONLY|O_EXCL, 0);
1767 if (fd < 0) {
1768 if (verbose)
1769 fprintf(stderr, Name ": imsm: Cannot open %s: %s\n",
1770 dev, strerror(errno));
1771 return 0;
1772 }
1773 if (!get_dev_size(fd, dev, &ldsize)) {
1774 close(fd);
1775 return 0;
1776 }
1777 close(fd);
1778
1779 *freesize = avail_size_imsm(st, ldsize >> 9);
1780
1781 return 1;
1782}
1783
1784/* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
1785 * FIX ME add ahci details
1786 */
1787static int validate_geometry_imsm_volume(struct supertype *st, int level,
1788 int layout, int raiddisks, int chunk,
1789 unsigned long long size, char *dev,
1790 unsigned long long *freesize,
1791 int verbose)
1792{
1793 struct stat stb;
1794 struct intel_super *super = st->sb;
1795 struct dl *dl;
1796 unsigned long long pos = 0;
1797 unsigned long long maxsize;
1798 struct extent *e;
1799 int i;
1800
1801 if (level == LEVEL_CONTAINER)
1802 return 0;
1803
1804 if (level == 1 && raiddisks > 2) {
1805 if (verbose)
1806 fprintf(stderr, Name ": imsm does not support more "
1807 "than 2 in a raid1 configuration\n");
1808 return 0;
1809 }
1810
1811 /* We must have the container info already read in. */
1812 if (!super)
1813 return 0;
1814
1815 if (!dev) {
1816 /* General test: make sure there is space for
1817 * 'raiddisks' device extents of size 'size' at a given
1818 * offset
1819 */
1820 unsigned long long minsize = size*2 /* convert to blocks */;
1821 unsigned long long start_offset = ~0ULL;
1822 int dcnt = 0;
1823 if (minsize == 0)
1824 minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1825 for (dl = super->disks; dl ; dl = dl->next) {
1826 int found = 0;
1827
1828 pos = 0;
1829 i = 0;
1830 e = get_extents(super, dl);
1831 if (!e) continue;
1832 do {
1833 unsigned long long esize;
1834 esize = e[i].start - pos;
1835 if (esize >= minsize)
1836 found = 1;
1837 if (found && start_offset == ~0ULL) {
1838 start_offset = pos;
1839 break;
1840 } else if (found && pos != start_offset) {
1841 found = 0;
1842 break;
1843 }
1844 pos = e[i].start + e[i].size;
1845 i++;
1846 } while (e[i-1].size);
1847 if (found)
1848 dcnt++;
1849 free(e);
1850 }
1851 if (dcnt < raiddisks) {
1852 if (verbose)
1853 fprintf(stderr, Name ": imsm: Not enough "
1854 "devices with space for this array "
1855 "(%d < %d)\n",
1856 dcnt, raiddisks);
1857 return 0;
1858 }
1859 return 1;
1860 }
1861 /* This device must be a member of the set */
1862 if (stat(dev, &stb) < 0)
1863 return 0;
1864 if ((S_IFMT & stb.st_mode) != S_IFBLK)
1865 return 0;
1866 for (dl = super->disks ; dl ; dl = dl->next) {
1867 if (dl->major == major(stb.st_rdev) &&
1868 dl->minor == minor(stb.st_rdev))
1869 break;
1870 }
1871 if (!dl) {
1872 if (verbose)
1873 fprintf(stderr, Name ": %s is not in the "
1874 "same imsm set\n", dev);
1875 return 0;
1876 }
1877 e = get_extents(super, dl);
1878 maxsize = 0;
1879 i = 0;
1880 if (e) do {
1881 unsigned long long esize;
1882 esize = e[i].start - pos;
1883 if (esize >= maxsize)
1884 maxsize = esize;
1885 pos = e[i].start + e[i].size;
1886 i++;
1887 } while (e[i-1].size);
1888 *freesize = maxsize;
1889
1890 return 1;
1891}
1892
1893int imsm_bbm_log_size(struct imsm_super *mpb)
1894{
1895 return __le32_to_cpu(mpb->bbm_log_size);
1896}
1897
1898static int validate_geometry_imsm(struct supertype *st, int level, int layout,
1899 int raiddisks, int chunk, unsigned long long size,
1900 char *dev, unsigned long long *freesize,
1901 int verbose)
1902{
1903 int fd, cfd;
1904 struct mdinfo *sra;
1905
1906 /* if given unused devices create a container
1907 * if given given devices in a container create a member volume
1908 */
1909 if (level == LEVEL_CONTAINER) {
1910 /* Must be a fresh device to add to a container */
1911 return validate_geometry_imsm_container(st, level, layout,
1912 raiddisks, chunk, size,
1913 dev, freesize,
1914 verbose);
1915 }
1916
1917 if (st->sb) {
1918 /* creating in a given container */
1919 return validate_geometry_imsm_volume(st, level, layout,
1920 raiddisks, chunk, size,
1921 dev, freesize, verbose);
1922 }
1923
1924 /* limit creation to the following levels */
1925 if (!dev)
1926 switch (level) {
1927 case 0:
1928 case 1:
1929 case 10:
1930 case 5:
1931 break;
1932 default:
1933 return 1;
1934 }
1935
1936 /* This device needs to be a device in an 'imsm' container */
1937 fd = open(dev, O_RDONLY|O_EXCL, 0);
1938 if (fd >= 0) {
1939 if (verbose)
1940 fprintf(stderr,
1941 Name ": Cannot create this array on device %s\n",
1942 dev);
1943 close(fd);
1944 return 0;
1945 }
1946 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
1947 if (verbose)
1948 fprintf(stderr, Name ": Cannot open %s: %s\n",
1949 dev, strerror(errno));
1950 return 0;
1951 }
1952 /* Well, it is in use by someone, maybe an 'imsm' container. */
1953 cfd = open_container(fd);
1954 if (cfd < 0) {
1955 close(fd);
1956 if (verbose)
1957 fprintf(stderr, Name ": Cannot use %s: It is busy\n",
1958 dev);
1959 return 0;
1960 }
1961 sra = sysfs_read(cfd, 0, GET_VERSION);
1962 close(fd);
1963 if (sra && sra->array.major_version == -1 &&
1964 strcmp(sra->text_version, "imsm") == 0) {
1965 /* This is a member of a imsm container. Load the container
1966 * and try to create a volume
1967 */
1968 struct intel_super *super;
1969
1970 if (load_super_imsm_all(st, cfd, (void **) &super, NULL, 1) == 0) {
1971 st->sb = super;
1972 st->container_dev = fd2devnum(cfd);
1973 close(cfd);
1974 return validate_geometry_imsm_volume(st, level, layout,
1975 raiddisks, chunk,
1976 size, dev,
1977 freesize, verbose);
1978 }
1979 close(cfd);
1980 } else /* may belong to another container */
1981 return 0;
1982
1983 return 1;
1984}
1985
1986static struct mdinfo *container_content_imsm(struct supertype *st)
1987{
1988 /* Given a container loaded by load_super_imsm_all,
1989 * extract information about all the arrays into
1990 * an mdinfo tree.
1991 *
1992 * For each imsm_dev create an mdinfo, fill it in,
1993 * then look for matching devices in super->disks
1994 * and create appropriate device mdinfo.
1995 */
1996 struct intel_super *super = st->sb;
1997 struct imsm_super *mpb = super->anchor;
1998 struct mdinfo *rest = NULL;
1999 int i;
2000
2001 /* do not assemble arrays that might have bad blocks */
2002 if (imsm_bbm_log_size(super->anchor)) {
2003 fprintf(stderr, Name ": BBM log found in metadata. "
2004 "Cannot activate array(s).\n");
2005 return NULL;
2006 }
2007
2008 for (i = 0; i < mpb->num_raid_devs; i++) {
2009 struct imsm_dev *dev = get_imsm_dev(super, i);
2010 struct imsm_vol *vol = &dev->vol;
2011 struct imsm_map *map = get_imsm_map(dev, 0);
2012 struct mdinfo *this;
2013 int slot;
2014
2015 this = malloc(sizeof(*this));
2016 memset(this, 0, sizeof(*this));
2017 this->next = rest;
2018
2019 this->array.level = get_imsm_raid_level(map);
2020 this->array.raid_disks = map->num_members;
2021 this->array.layout = imsm_level_to_layout(this->array.level);
2022 this->array.md_minor = -1;
2023 this->array.ctime = 0;
2024 this->array.utime = 0;
2025 this->array.chunk_size = __le16_to_cpu(map->blocks_per_strip) << 9;
2026 this->array.state = !vol->dirty;
2027 this->container_member = i;
2028 if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
2029 dev->vol.dirty || dev->vol.migr_state)
2030 this->resync_start = 0;
2031 else
2032 this->resync_start = ~0ULL;
2033
2034 strncpy(this->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
2035 this->name[MAX_RAID_SERIAL_LEN] = 0;
2036
2037 sprintf(this->text_version, "/%s/%d",
2038 devnum2devname(st->container_dev),
2039 this->container_member);
2040
2041 memset(this->uuid, 0, sizeof(this->uuid));
2042
2043 this->component_size = __le32_to_cpu(map->blocks_per_member);
2044
2045 for (slot = 0 ; slot < map->num_members; slot++) {
2046 struct mdinfo *info_d;
2047 struct dl *d;
2048 int idx;
2049 int skip;
2050 __u32 s;
2051 __u32 ord;
2052
2053 skip = 0;
2054 idx = get_imsm_disk_idx(map, slot);
2055 ord = get_imsm_ord_tbl_ent(dev, slot);
2056 for (d = super->disks; d ; d = d->next)
2057 if (d->index == idx)
2058 break;
2059
2060 if (d == NULL)
2061 skip = 1;
2062
2063 s = d ? __le32_to_cpu(d->disk.status) : 0;
2064 if (s & FAILED_DISK)
2065 skip = 1;
2066 if (!(s & USABLE_DISK))
2067 skip = 1;
2068 if (ord & IMSM_ORD_REBUILD)
2069 skip = 1;
2070
2071 /*
2072 * if we skip some disks the array will be assmebled degraded;
2073 * reset resync start to avoid a dirty-degraded situation
2074 *
2075 * FIXME handle dirty degraded
2076 */
2077 if (skip && !dev->vol.dirty)
2078 this->resync_start = ~0ULL;
2079 if (skip)
2080 continue;
2081
2082 info_d = malloc(sizeof(*info_d));
2083 if (!info_d) {
2084 fprintf(stderr, Name ": failed to allocate disk"
2085 " for volume %s\n", (char *) dev->volume);
2086 free(this);
2087 this = rest;
2088 break;
2089 }
2090 memset(info_d, 0, sizeof(*info_d));
2091 info_d->next = this->devs;
2092 this->devs = info_d;
2093
2094 info_d->disk.number = d->index;
2095 info_d->disk.major = d->major;
2096 info_d->disk.minor = d->minor;
2097 info_d->disk.raid_disk = slot;
2098
2099 this->array.working_disks++;
2100
2101 info_d->events = __le32_to_cpu(mpb->generation_num);
2102 info_d->data_offset = __le32_to_cpu(map->pba_of_lba0);
2103 info_d->component_size = __le32_to_cpu(map->blocks_per_member);
2104 if (d->devname)
2105 strcpy(info_d->name, d->devname);
2106 }
2107 rest = this;
2108 }
2109
2110 return rest;
2111}
2112
2113
2114static int imsm_open_new(struct supertype *c, struct active_array *a,
2115 char *inst)
2116{
2117 struct intel_super *super = c->sb;
2118 struct imsm_super *mpb = super->anchor;
2119
2120 if (atoi(inst) >= mpb->num_raid_devs) {
2121 fprintf(stderr, "%s: subarry index %d, out of range\n",
2122 __func__, atoi(inst));
2123 return -ENODEV;
2124 }
2125
2126 dprintf("imsm: open_new %s\n", inst);
2127 a->info.container_member = atoi(inst);
2128 return 0;
2129}
2130
2131static __u8 imsm_check_degraded(struct intel_super *super, int n, int failed)
2132{
2133 struct imsm_dev *dev = get_imsm_dev(super, n);
2134 struct imsm_map *map = get_imsm_map(dev, 0);
2135
2136 if (!failed)
2137 return map->map_state == IMSM_T_STATE_UNINITIALIZED ?
2138 IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL;
2139
2140 switch (get_imsm_raid_level(map)) {
2141 case 0:
2142 return IMSM_T_STATE_FAILED;
2143 break;
2144 case 1:
2145 if (failed < map->num_members)
2146 return IMSM_T_STATE_DEGRADED;
2147 else
2148 return IMSM_T_STATE_FAILED;
2149 break;
2150 case 10:
2151 {
2152 /**
2153 * check to see if any mirrors have failed,
2154 * otherwise we are degraded
2155 */
2156 int device_per_mirror = 2; /* FIXME is this always the case?
2157 * and are they always adjacent?
2158 */
2159 int failed = 0;
2160 int i;
2161
2162 for (i = 0; i < map->num_members; i++) {
2163 int idx = get_imsm_disk_idx(map, i);
2164 struct imsm_disk *disk = get_imsm_disk(super, idx);
2165
2166 if (__le32_to_cpu(disk->status) & FAILED_DISK)
2167 failed++;
2168
2169 if (failed >= device_per_mirror)
2170 return IMSM_T_STATE_FAILED;
2171
2172 /* reset 'failed' for next mirror set */
2173 if (!((i + 1) % device_per_mirror))
2174 failed = 0;
2175 }
2176
2177 return IMSM_T_STATE_DEGRADED;
2178 }
2179 case 5:
2180 if (failed < 2)
2181 return IMSM_T_STATE_DEGRADED;
2182 else
2183 return IMSM_T_STATE_FAILED;
2184 break;
2185 default:
2186 break;
2187 }
2188
2189 return map->map_state;
2190}
2191
2192static int imsm_count_failed(struct intel_super *super, struct imsm_map *map)
2193{
2194 int i;
2195 int failed = 0;
2196 struct imsm_disk *disk;
2197
2198 for (i = 0; i < map->num_members; i++) {
2199 int idx = get_imsm_disk_idx(map, i);
2200
2201 disk = get_imsm_disk(super, idx);
2202 if (__le32_to_cpu(disk->status) & FAILED_DISK)
2203 failed++;
2204 else if (!(__le32_to_cpu(disk->status) & USABLE_DISK))
2205 failed++;
2206 }
2207
2208 return failed;
2209}
2210
2211static int imsm_set_array_state(struct active_array *a, int consistent)
2212{
2213 int inst = a->info.container_member;
2214 struct intel_super *super = a->container->sb;
2215 struct imsm_dev *dev = get_imsm_dev(super, inst);
2216 struct imsm_map *map = get_imsm_map(dev, 0);
2217 int dirty = !consistent;
2218 int failed;
2219 __u8 map_state;
2220
2221 failed = imsm_count_failed(super, map);
2222 map_state = imsm_check_degraded(super, inst, failed);
2223
2224 if (consistent && !dev->vol.dirty &&
2225 (dev->vol.migr_state || map_state != IMSM_T_STATE_NORMAL))
2226 a->resync_start = 0ULL;
2227 if (consistent == 2 && a->resync_start != ~0ULL)
2228 consistent = 0;
2229
2230 if (a->resync_start == ~0ULL) {
2231 /* complete recovery or initial resync */
2232 if (map->map_state != map_state) {
2233 dprintf("imsm: map_state %d: %d\n",
2234 inst, map_state);
2235 map->map_state = map_state;
2236 super->updates_pending++;
2237 }
2238 if (dev->vol.migr_state) {
2239 dprintf("imsm: mark resync complete\n");
2240 dev->vol.migr_state = 0;
2241 dev->vol.migr_type = 0;
2242 super->updates_pending++;
2243 }
2244 } else if (!dev->vol.migr_state) {
2245 dprintf("imsm: mark '%s' (%llu)\n",
2246 failed ? "rebuild" : "initializing", a->resync_start);
2247 /* mark that we are rebuilding */
2248 map->map_state = failed ? map_state : IMSM_T_STATE_NORMAL;
2249 dev->vol.migr_state = 1;
2250 dev->vol.migr_type = failed ? 1 : 0;
2251 dup_map(dev);
2252 a->check_degraded = 1;
2253 super->updates_pending++;
2254 }
2255
2256 /* mark dirty / clean */
2257 if (dirty != dev->vol.dirty) {
2258 dprintf("imsm: mark '%s' (%llu)\n",
2259 dirty ? "dirty" : "clean", a->resync_start);
2260 dev->vol.dirty = dirty;
2261 super->updates_pending++;
2262 }
2263 return consistent;
2264}
2265
2266static void imsm_set_disk(struct active_array *a, int n, int state)
2267{
2268 int inst = a->info.container_member;
2269 struct intel_super *super = a->container->sb;
2270 struct imsm_dev *dev = get_imsm_dev(super, inst);
2271 struct imsm_map *map = get_imsm_map(dev, 0);
2272 struct imsm_disk *disk;
2273 __u32 status;
2274 int failed = 0;
2275 int new_failure = 0;
2276
2277 if (n > map->num_members)
2278 fprintf(stderr, "imsm: set_disk %d out of range 0..%d\n",
2279 n, map->num_members - 1);
2280
2281 if (n < 0)
2282 return;
2283
2284 dprintf("imsm: set_disk %d:%x\n", n, state);
2285
2286 disk = get_imsm_disk(super, get_imsm_disk_idx(map, n));
2287
2288 /* check for new failures */
2289 status = __le32_to_cpu(disk->status);
2290 if ((state & DS_FAULTY) && !(status & FAILED_DISK)) {
2291 status |= FAILED_DISK;
2292 disk->status = __cpu_to_le32(status);
2293 new_failure = 1;
2294 super->updates_pending++;
2295 }
2296 /* check if in_sync */
2297 if ((state & DS_INSYNC) && !(status & USABLE_DISK)) {
2298 status |= USABLE_DISK;
2299 disk->status = __cpu_to_le32(status);
2300 super->updates_pending++;
2301 }
2302
2303 /* the number of failures have changed, count up 'failed' to determine
2304 * degraded / failed status
2305 */
2306 if (new_failure && map->map_state != IMSM_T_STATE_FAILED)
2307 failed = imsm_count_failed(super, map);
2308
2309 /* determine map_state based on failed or in_sync count */
2310 if (failed)
2311 map->map_state = imsm_check_degraded(super, inst, failed);
2312 else if (map->map_state == IMSM_T_STATE_DEGRADED) {
2313 struct mdinfo *d;
2314 int working = 0;
2315
2316 for (d = a->info.devs ; d ; d = d->next)
2317 if (d->curr_state & DS_INSYNC)
2318 working++;
2319
2320 if (working == a->info.array.raid_disks) {
2321 map->map_state = IMSM_T_STATE_NORMAL;
2322 dev->vol.migr_state = 0;
2323 dev->vol.migr_type = 0;
2324 super->updates_pending++;
2325 }
2326 }
2327}
2328
2329static int store_imsm_mpb(int fd, struct intel_super *super)
2330{
2331 struct imsm_super *mpb = super->anchor;
2332 __u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
2333 unsigned long long dsize;
2334 unsigned long long sectors;
2335
2336 get_dev_size(fd, NULL, &dsize);
2337
2338 if (mpb_size > 512) {
2339 /* -1 to account for anchor */
2340 sectors = mpb_sectors(mpb) - 1;
2341
2342 /* write the extended mpb to the sectors preceeding the anchor */
2343 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0)
2344 return 1;
2345
2346 if (write(fd, super->buf + 512, 512 * sectors) != 512 * sectors)
2347 return 1;
2348 }
2349
2350 /* first block is stored on second to last sector of the disk */
2351 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
2352 return 1;
2353
2354 if (write(fd, super->buf, 512) != 512)
2355 return 1;
2356
2357 return 0;
2358}
2359
2360static void imsm_sync_metadata(struct supertype *container)
2361{
2362 struct intel_super *super = container->sb;
2363
2364 if (!super->updates_pending)
2365 return;
2366
2367 write_super_imsm(super, 0);
2368
2369 super->updates_pending = 0;
2370}
2371
2372static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a)
2373{
2374 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
2375 struct imsm_map *map = get_imsm_map(dev, 0);
2376 int i = get_imsm_disk_idx(map, idx);
2377 struct dl *dl;
2378
2379 for (dl = super->disks; dl; dl = dl->next)
2380 if (dl->index == i)
2381 break;
2382
2383 if (__le32_to_cpu(dl->disk.status) & FAILED_DISK)
2384 dl = NULL;
2385
2386 if (dl)
2387 dprintf("%s: found %x:%x\n", __func__, dl->major, dl->minor);
2388
2389 return dl;
2390}
2391
2392static struct dl *imsm_add_spare(struct intel_super *super, int idx, struct active_array *a)
2393{
2394 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
2395 struct imsm_map *map = get_imsm_map(dev, 0);
2396 unsigned long long esize;
2397 unsigned long long pos;
2398 struct mdinfo *d;
2399 struct extent *ex;
2400 int j;
2401 int found;
2402 __u32 array_start;
2403 __u32 status;
2404 struct dl *dl;
2405
2406 for (dl = super->disks; dl; dl = dl->next) {
2407 /* If in this array, skip */
2408 for (d = a->info.devs ; d ; d = d->next)
2409 if (d->disk.major == dl->major &&
2410 d->disk.minor == dl->minor) {
2411 dprintf("%x:%x already in array\n", dl->major, dl->minor);
2412 break;
2413 }
2414 if (d)
2415 continue;
2416
2417 /* skip marked in use or failed drives */
2418 status = __le32_to_cpu(dl->disk.status);
2419 if (status & FAILED_DISK || status & CONFIGURED_DISK) {
2420 dprintf("%x:%x status ( %s%s)\n",
2421 dl->major, dl->minor,
2422 status & FAILED_DISK ? "failed " : "",
2423 status & CONFIGURED_DISK ? "configured " : "");
2424 continue;
2425 }
2426
2427 /* Does this unused device have the requisite free space?
2428 * We need a->info.component_size sectors
2429 */
2430 ex = get_extents(super, dl);
2431 if (!ex) {
2432 dprintf("cannot get extents\n");
2433 continue;
2434 }
2435 found = 0;
2436 j = 0;
2437 pos = 0;
2438 array_start = __le32_to_cpu(map->pba_of_lba0);
2439
2440 do {
2441 /* check that we can start at pba_of_lba0 with
2442 * a->info.component_size of space
2443 */
2444 esize = ex[j].start - pos;
2445 if (array_start >= pos &&
2446 array_start + a->info.component_size < ex[j].start) {
2447 found = 1;
2448 break;
2449 }
2450 pos = ex[j].start + ex[j].size;
2451 j++;
2452
2453 } while (ex[j-1].size);
2454
2455 free(ex);
2456 if (!found) {
2457 dprintf("%x:%x does not have %llu at %d\n",
2458 dl->major, dl->minor,
2459 a->info.component_size,
2460 __le32_to_cpu(map->pba_of_lba0));
2461 /* No room */
2462 continue;
2463 } else
2464 break;
2465 }
2466
2467 return dl;
2468}
2469
2470static struct mdinfo *imsm_activate_spare(struct active_array *a,
2471 struct metadata_update **updates)
2472{
2473 /**
2474 * Find a device with unused free space and use it to replace a
2475 * failed/vacant region in an array. We replace failed regions one a
2476 * array at a time. The result is that a new spare disk will be added
2477 * to the first failed array and after the monitor has finished
2478 * propagating failures the remainder will be consumed.
2479 *
2480 * FIXME add a capability for mdmon to request spares from another
2481 * container.
2482 */
2483
2484 struct intel_super *super = a->container->sb;
2485 int inst = a->info.container_member;
2486 struct imsm_dev *dev = get_imsm_dev(super, inst);
2487 struct imsm_map *map = get_imsm_map(dev, 0);
2488 int failed = a->info.array.raid_disks;
2489 struct mdinfo *rv = NULL;
2490 struct mdinfo *d;
2491 struct mdinfo *di;
2492 struct metadata_update *mu;
2493 struct dl *dl;
2494 struct imsm_update_activate_spare *u;
2495 int num_spares = 0;
2496 int i;
2497
2498 for (d = a->info.devs ; d ; d = d->next) {
2499 if ((d->curr_state & DS_FAULTY) &&
2500 d->state_fd >= 0)
2501 /* wait for Removal to happen */
2502 return NULL;
2503 if (d->state_fd >= 0)
2504 failed--;
2505 }
2506
2507 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
2508 inst, failed, a->info.array.raid_disks, a->info.array.level);
2509 if (imsm_check_degraded(super, inst, failed) != IMSM_T_STATE_DEGRADED)
2510 return NULL;
2511
2512 /* For each slot, if it is not working, find a spare */
2513 for (i = 0; i < a->info.array.raid_disks; i++) {
2514 for (d = a->info.devs ; d ; d = d->next)
2515 if (d->disk.raid_disk == i)
2516 break;
2517 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
2518 if (d && (d->state_fd >= 0))
2519 continue;
2520
2521 /*
2522 * OK, this device needs recovery. Try to re-add the previous
2523 * occupant of this slot, if this fails add a new spare
2524 */
2525 dl = imsm_readd(super, i, a);
2526 if (!dl)
2527 dl = imsm_add_spare(super, i, a);
2528 if (!dl)
2529 continue;
2530
2531 /* found a usable disk with enough space */
2532 di = malloc(sizeof(*di));
2533 memset(di, 0, sizeof(*di));
2534
2535 /* dl->index will be -1 in the case we are activating a
2536 * pristine spare. imsm_process_update() will create a
2537 * new index in this case. Once a disk is found to be
2538 * failed in all member arrays it is kicked from the
2539 * metadata
2540 */
2541 di->disk.number = dl->index;
2542
2543 /* (ab)use di->devs to store a pointer to the device
2544 * we chose
2545 */
2546 di->devs = (struct mdinfo *) dl;
2547
2548 di->disk.raid_disk = i;
2549 di->disk.major = dl->major;
2550 di->disk.minor = dl->minor;
2551 di->disk.state = 0;
2552 di->data_offset = __le32_to_cpu(map->pba_of_lba0);
2553 di->component_size = a->info.component_size;
2554 di->container_member = inst;
2555 di->next = rv;
2556 rv = di;
2557 num_spares++;
2558 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
2559 i, di->data_offset);
2560
2561 break;
2562 }
2563
2564 if (!rv)
2565 /* No spares found */
2566 return rv;
2567 /* Now 'rv' has a list of devices to return.
2568 * Create a metadata_update record to update the
2569 * disk_ord_tbl for the array
2570 */
2571 mu = malloc(sizeof(*mu));
2572 mu->buf = malloc(sizeof(struct imsm_update_activate_spare) * num_spares);
2573 mu->space = NULL;
2574 mu->len = sizeof(struct imsm_update_activate_spare) * num_spares;
2575 mu->next = *updates;
2576 u = (struct imsm_update_activate_spare *) mu->buf;
2577
2578 for (di = rv ; di ; di = di->next) {
2579 u->type = update_activate_spare;
2580 u->dl = (struct dl *) di->devs;
2581 di->devs = NULL;
2582 u->slot = di->disk.raid_disk;
2583 u->array = inst;
2584 u->next = u + 1;
2585 u++;
2586 }
2587 (u-1)->next = NULL;
2588 *updates = mu;
2589
2590 return rv;
2591}
2592
2593static int disks_overlap(struct imsm_map *m1, struct imsm_map *m2)
2594{
2595 int i;
2596 int j;
2597 int idx;
2598
2599 for (i = 0; i < m1->num_members; i++) {
2600 idx = get_imsm_disk_idx(m1, i);
2601 for (j = 0; j < m2->num_members; j++)
2602 if (idx == get_imsm_disk_idx(m2, j))
2603 return 1;
2604 }
2605
2606 return 0;
2607}
2608
2609static void imsm_delete(struct intel_super *super, struct dl **dlp);
2610
2611static void imsm_process_update(struct supertype *st,
2612 struct metadata_update *update)
2613{
2614 /**
2615 * crack open the metadata_update envelope to find the update record
2616 * update can be one of:
2617 * update_activate_spare - a spare device has replaced a failed
2618 * device in an array, update the disk_ord_tbl. If this disk is
2619 * present in all member arrays then also clear the SPARE_DISK
2620 * flag
2621 */
2622 struct intel_super *super = st->sb;
2623 struct imsm_super *mpb = super->anchor;
2624 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
2625
2626 switch (type) {
2627 case update_activate_spare: {
2628 struct imsm_update_activate_spare *u = (void *) update->buf;
2629 struct imsm_dev *dev = get_imsm_dev(super, u->array);
2630 struct imsm_map *map = get_imsm_map(dev, 0);
2631 struct active_array *a;
2632 struct imsm_disk *disk;
2633 __u32 status;
2634 struct dl *dl;
2635 unsigned int found;
2636 int victim;
2637 int i;
2638
2639 for (dl = super->disks; dl; dl = dl->next)
2640 if (dl == u->dl)
2641 break;
2642
2643 if (!dl) {
2644 fprintf(stderr, "error: imsm_activate_spare passed "
2645 "an unknown disk (index: %d serial: %s)\n",
2646 u->dl->index, u->dl->serial);
2647 return;
2648 }
2649
2650 super->updates_pending++;
2651
2652 /* adding a pristine spare, assign a new index */
2653 if (dl->index < 0) {
2654 dl->index = super->anchor->num_disks;
2655 super->anchor->num_disks++;
2656 }
2657 victim = get_imsm_disk_idx(map, u->slot);
2658 map->disk_ord_tbl[u->slot] = __cpu_to_le32(dl->index);
2659 disk = &dl->disk;
2660 status = __le32_to_cpu(disk->status);
2661 status |= CONFIGURED_DISK;
2662 status &= ~(SPARE_DISK | USABLE_DISK);
2663 disk->status = __cpu_to_le32(status);
2664
2665 /* count arrays using the victim in the metadata */
2666 found = 0;
2667 for (a = st->arrays; a ; a = a->next) {
2668 dev = get_imsm_dev(super, a->info.container_member);
2669 map = get_imsm_map(dev, 0);
2670 for (i = 0; i < map->num_members; i++)
2671 if (victim == get_imsm_disk_idx(map, i))
2672 found++;
2673 }
2674
2675 /* clear some flags if the victim is no longer being
2676 * utilized anywhere
2677 */
2678 if (!found) {
2679 struct dl **dlp;
2680 for (dlp = &super->disks; *dlp; )
2681 if ((*dlp)->index == victim)
2682 break;
2683 disk = &(*dlp)->disk;
2684 status = __le32_to_cpu(disk->status);
2685 status &= ~(CONFIGURED_DISK | USABLE_DISK);
2686 disk->status = __cpu_to_le32(status);
2687 /* We know that 'manager' isn't touching anything,
2688 * so it is safe to:
2689 */
2690 imsm_delete(super, dlp);
2691 }
2692 break;
2693 }
2694 case update_create_array: {
2695 /* someone wants to create a new array, we need to be aware of
2696 * a few races/collisions:
2697 * 1/ 'Create' called by two separate instances of mdadm
2698 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
2699 * devices that have since been assimilated via
2700 * activate_spare.
2701 * In the event this update can not be carried out mdadm will
2702 * (FIX ME) notice that its update did not take hold.
2703 */
2704 struct imsm_update_create_array *u = (void *) update->buf;
2705 struct imsm_dev *dev;
2706 struct imsm_map *map, *new_map;
2707 unsigned long long start, end;
2708 unsigned long long new_start, new_end;
2709 int i;
2710 int overlap = 0;
2711
2712 /* handle racing creates: first come first serve */
2713 if (u->dev_idx < mpb->num_raid_devs) {
2714 dprintf("%s: subarray %d already defined\n",
2715 __func__, u->dev_idx);
2716 return;
2717 }
2718
2719 /* check update is next in sequence */
2720 if (u->dev_idx != mpb->num_raid_devs) {
2721 dprintf("%s: can not create array %d expected index %d\n",
2722 __func__, u->dev_idx, mpb->num_raid_devs);
2723 return;
2724 }
2725
2726 new_map = get_imsm_map(&u->dev, 0);
2727 new_start = __le32_to_cpu(new_map->pba_of_lba0);
2728 new_end = new_start + __le32_to_cpu(new_map->blocks_per_member);
2729
2730 /* handle activate_spare versus create race:
2731 * check to make sure that overlapping arrays do not include
2732 * overalpping disks
2733 */
2734 for (i = 0; i < mpb->num_raid_devs; i++) {
2735 dev = get_imsm_dev(super, i);
2736 map = get_imsm_map(dev, 0);
2737 start = __le32_to_cpu(map->pba_of_lba0);
2738 end = start + __le32_to_cpu(map->blocks_per_member);
2739 if ((new_start >= start && new_start <= end) ||
2740 (start >= new_start && start <= new_end))
2741 overlap = 1;
2742 if (overlap && disks_overlap(map, new_map)) {
2743 dprintf("%s: arrays overlap\n", __func__);
2744 return;
2745 }
2746 }
2747 /* check num_members sanity */
2748 if (new_map->num_members > mpb->num_disks) {
2749 dprintf("%s: num_disks out of range\n", __func__);
2750 return;
2751 }
2752
2753 /* check that prepare update was successful */
2754 if (!update->space) {
2755 dprintf("%s: prepare update failed\n", __func__);
2756 return;
2757 }
2758
2759 super->updates_pending++;
2760 dev = update->space;
2761 update->space = NULL;
2762 imsm_copy_dev(dev, &u->dev);
2763 super->dev_tbl[u->dev_idx] = dev;
2764 mpb->num_raid_devs++;
2765
2766 /* fix up flags, if arrays overlap then the drives can not be
2767 * spares
2768 */
2769 for (i = 0; i < map->num_members; i++) {
2770 struct imsm_disk *disk;
2771 __u32 status;
2772
2773 disk = get_imsm_disk(super, get_imsm_disk_idx(map, i));
2774 status = __le32_to_cpu(disk->status);
2775 status |= CONFIGURED_DISK;
2776 if (overlap)
2777 status &= ~SPARE_DISK;
2778 disk->status = __cpu_to_le32(status);
2779 }
2780 break;
2781 }
2782 }
2783}
2784
2785static void imsm_prepare_update(struct supertype *st,
2786 struct metadata_update *update)
2787{
2788 /**
2789 * Allocate space to hold new disk entries, raid-device entries or a
2790 * new mpb if necessary. We currently maintain an mpb large enough to
2791 * hold 2 subarrays for the given number of disks. This may not be
2792 * sufficient when reshaping.
2793 *
2794 * FIX ME handle the reshape case.
2795 *
2796 * The monitor will be able to safely change super->mpb by arranging
2797 * for it to be freed in check_update_queue(). I.e. the monitor thread
2798 * will start using the new pointer and the manager can continue to use
2799 * the old value until check_update_queue() runs.
2800 */
2801 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
2802
2803 switch (type) {
2804 case update_create_array: {
2805 struct imsm_update_create_array *u = (void *) update->buf;
2806 size_t len = sizeof_imsm_dev(&u->dev, 1);
2807
2808 update->space = malloc(len);
2809 break;
2810 default:
2811 break;
2812 }
2813 }
2814
2815 return;
2816}
2817
2818/* must be called while manager is quiesced */
2819static void imsm_delete(struct intel_super *super, struct dl **dlp)
2820{
2821 struct imsm_super *mpb = super->anchor;
2822 struct dl *dl = *dlp;
2823 struct dl *iter;
2824 struct imsm_dev *dev;
2825 struct imsm_map *map;
2826 int i, j;
2827
2828 dprintf("%s: deleting device %x:%x from imsm_super\n",
2829 __func__, dl->major, dl->minor);
2830
2831 /* shift all indexes down one */
2832 for (iter = super->disks; iter; iter = iter->next)
2833 if (iter->index > dl->index)
2834 iter->index--;
2835
2836 for (i = 0; i < mpb->num_raid_devs; i++) {
2837 dev = get_imsm_dev(super, i);
2838 map = get_imsm_map(dev, 0);
2839
2840 for (j = 0; j < map->num_members; j++) {
2841 int idx = get_imsm_disk_idx(map, j);
2842
2843 if (idx > dl->index)
2844 map->disk_ord_tbl[j] = __cpu_to_le32(idx - 1);
2845 }
2846 }
2847
2848 mpb->num_disks--;
2849 super->updates_pending++;
2850 *dlp = (*dlp)->next;
2851 __free_imsm_disk(dl);
2852}
2853
2854struct superswitch super_imsm = {
2855#ifndef MDASSEMBLE
2856 .examine_super = examine_super_imsm,
2857 .brief_examine_super = brief_examine_super_imsm,
2858 .detail_super = detail_super_imsm,
2859 .brief_detail_super = brief_detail_super_imsm,
2860 .write_init_super = write_init_super_imsm,
2861#endif
2862 .match_home = match_home_imsm,
2863 .uuid_from_super= uuid_from_super_imsm,
2864 .getinfo_super = getinfo_super_imsm,
2865 .update_super = update_super_imsm,
2866
2867 .avail_size = avail_size_imsm,
2868
2869 .compare_super = compare_super_imsm,
2870
2871 .load_super = load_super_imsm,
2872 .init_super = init_super_imsm,
2873 .add_to_super = add_to_super_imsm,
2874 .store_super = store_zero_imsm,
2875 .free_super = free_super_imsm,
2876 .match_metadata_desc = match_metadata_desc_imsm,
2877 .container_content = container_content_imsm,
2878
2879 .validate_geometry = validate_geometry_imsm,
2880 .external = 1,
2881
2882/* for mdmon */
2883 .open_new = imsm_open_new,
2884 .load_super = load_super_imsm,
2885 .set_array_state= imsm_set_array_state,
2886 .set_disk = imsm_set_disk,
2887 .sync_metadata = imsm_sync_metadata,
2888 .activate_spare = imsm_activate_spare,
2889 .process_update = imsm_process_update,
2890 .prepare_update = imsm_prepare_update,
2891};