]> git.ipfire.org Git - thirdparty/mdadm.git/blame - super-intel.c
Allow metadata handlers to communicate desired safemode delay via mdinfo
[thirdparty/mdadm.git] / super-intel.c
CommitLineData
cdddbdbc
DW
1/*
2 * mdadm - Intel(R) Matrix Storage Manager Support
3 *
4 * Copyright (C) 2002-2007 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include "mdadm.h"
c2a1e7da 21#include "mdmon.h"
cdddbdbc
DW
22#include <values.h>
23#include <scsi/sg.h>
24#include <ctype.h>
25
26/* MPB == Metadata Parameter Block */
27#define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. "
28#define MPB_SIG_LEN (strlen(MPB_SIGNATURE))
29#define MPB_VERSION_RAID0 "1.0.00"
30#define MPB_VERSION_RAID1 "1.1.00"
31#define MPB_VERSION_RAID5 "1.2.02"
32#define MAX_SIGNATURE_LENGTH 32
33#define MAX_RAID_SERIAL_LEN 16
c2c087e6
DW
34#define MPB_SECTOR_CNT 418
35#define IMSM_RESERVED_SECTORS 4096
cdddbdbc
DW
36
37/* Disk configuration info. */
38#define IMSM_MAX_DEVICES 255
39struct imsm_disk {
40 __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */
41 __u32 total_blocks; /* 0xE8 - 0xEB total blocks */
42 __u32 scsi_id; /* 0xEC - 0xEF scsi ID */
43 __u32 status; /* 0xF0 - 0xF3 */
44#define SPARE_DISK 0x01 /* Spare */
45#define CONFIGURED_DISK 0x02 /* Member of some RaidDev */
46#define FAILED_DISK 0x04 /* Permanent failure */
47#define USABLE_DISK 0x08 /* Fully usable unless FAILED_DISK is set */
48
49#define IMSM_DISK_FILLERS 5
50 __u32 filler[IMSM_DISK_FILLERS]; /* 0xF4 - 0x107 MPB_DISK_FILLERS for future expansion */
51};
52
53/* RAID map configuration infos. */
54struct imsm_map {
55 __u32 pba_of_lba0; /* start address of partition */
56 __u32 blocks_per_member;/* blocks per member */
57 __u32 num_data_stripes; /* number of data stripes */
58 __u16 blocks_per_strip;
59 __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */
60#define IMSM_T_STATE_NORMAL 0
61#define IMSM_T_STATE_UNINITIALIZED 1
62#define IMSM_T_STATE_DEGRADED 2 /* FIXME: is this correct? */
63#define IMSM_T_STATE_FAILED 3 /* FIXME: is this correct? */
64 __u8 raid_level;
65#define IMSM_T_RAID0 0
66#define IMSM_T_RAID1 1
67#define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */
68 __u8 num_members; /* number of member disks */
69 __u8 reserved[3];
70 __u32 filler[7]; /* expansion area */
7eef0453 71#define IMSM_ORD_REBUILD (1 << 24)
cdddbdbc 72 __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members],
7eef0453
DW
73 * top byte contains some flags
74 */
cdddbdbc
DW
75} __attribute__ ((packed));
76
77struct imsm_vol {
78 __u32 reserved[2];
79 __u8 migr_state; /* Normal or Migrating */
80 __u8 migr_type; /* Initializing, Rebuilding, ... */
81 __u8 dirty;
82 __u8 fill[1];
83 __u32 filler[5];
84 struct imsm_map map[1];
85 /* here comes another one if migr_state */
86} __attribute__ ((packed));
87
88struct imsm_dev {
89 __u8 volume[MAX_RAID_SERIAL_LEN];
90 __u32 size_low;
91 __u32 size_high;
92 __u32 status; /* Persistent RaidDev status */
93 __u32 reserved_blocks; /* Reserved blocks at beginning of volume */
94#define IMSM_DEV_FILLERS 12
95 __u32 filler[IMSM_DEV_FILLERS];
96 struct imsm_vol vol;
97} __attribute__ ((packed));
98
99struct imsm_super {
100 __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */
101 __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */
102 __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */
103 __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */
104 __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */
604b746f
JD
105 __u32 error_log_size; /* 0x30 - 0x33 in bytes */
106 __u32 attributes; /* 0x34 - 0x37 */
cdddbdbc
DW
107 __u8 num_disks; /* 0x38 Number of configured disks */
108 __u8 num_raid_devs; /* 0x39 Number of configured volumes */
604b746f
JD
109 __u8 error_log_pos; /* 0x3A */
110 __u8 fill[1]; /* 0x3B */
111 __u32 cache_size; /* 0x3c - 0x40 in mb */
112 __u32 orig_family_num; /* 0x40 - 0x43 original family num */
113 __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */
114 __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */
115#define IMSM_FILLERS 35
116 __u32 filler[IMSM_FILLERS]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */
cdddbdbc
DW
117 struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */
118 /* here comes imsm_dev[num_raid_devs] */
604b746f 119 /* here comes BBM logs */
cdddbdbc
DW
120} __attribute__ ((packed));
121
604b746f
JD
122#define BBM_LOG_MAX_ENTRIES 254
123
124struct bbm_log_entry {
125 __u64 defective_block_start;
126#define UNREADABLE 0xFFFFFFFF
127 __u32 spare_block_offset;
128 __u16 remapped_marked_count;
129 __u16 disk_ordinal;
130} __attribute__ ((__packed__));
131
132struct bbm_log {
133 __u32 signature; /* 0xABADB10C */
134 __u32 entry_count;
135 __u32 reserved_spare_block_count; /* 0 */
136 __u32 reserved; /* 0xFFFF */
137 __u64 first_spare_lba;
138 struct bbm_log_entry mapped_block_entries[BBM_LOG_MAX_ENTRIES];
139} __attribute__ ((__packed__));
140
141
cdddbdbc
DW
142#ifndef MDASSEMBLE
143static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" };
144#endif
145
87eb16df 146static unsigned int sector_count(__u32 bytes)
cdddbdbc 147{
87eb16df
DW
148 return ((bytes + (512-1)) & (~(512-1))) / 512;
149}
cdddbdbc 150
87eb16df
DW
151static unsigned int mpb_sectors(struct imsm_super *mpb)
152{
153 return sector_count(__le32_to_cpu(mpb->mpb_size));
cdddbdbc
DW
154}
155
156/* internal representation of IMSM metadata */
157struct intel_super {
158 union {
949c47a0
DW
159 void *buf; /* O_DIRECT buffer for reading/writing metadata */
160 struct imsm_super *anchor; /* immovable parameters */
cdddbdbc 161 };
949c47a0 162 size_t len; /* size of the 'buf' allocation */
4d7b1503
DW
163 void *next_buf; /* for realloc'ing buf from the manager */
164 size_t next_len;
c2c087e6
DW
165 int updates_pending; /* count of pending updates for mdmon */
166 int creating_imsm; /* flag to indicate container creation */
bf5a934a 167 int current_vol; /* index of raid device undergoing creation */
949c47a0
DW
168 #define IMSM_MAX_RAID_DEVS 2
169 struct imsm_dev *dev_tbl[IMSM_MAX_RAID_DEVS];
cdddbdbc
DW
170 struct dl {
171 struct dl *next;
172 int index;
173 __u8 serial[MAX_RAID_SERIAL_LEN];
174 int major, minor;
175 char *devname;
b9f594fe 176 struct imsm_disk disk;
cdddbdbc
DW
177 int fd;
178 } *disks;
43dad3d6
DW
179 struct dl *add; /* list of disks to add while mdmon active */
180 struct bbm_log *bbm_log;
cdddbdbc
DW
181};
182
c2c087e6
DW
183struct extent {
184 unsigned long long start, size;
185};
186
88758e9d
DW
187/* definition of messages passed to imsm_process_update */
188enum imsm_update_type {
189 update_activate_spare,
8273f55e 190 update_create_array,
43dad3d6 191 update_add_disk,
88758e9d
DW
192};
193
194struct imsm_update_activate_spare {
195 enum imsm_update_type type;
d23fe947 196 struct dl *dl;
88758e9d
DW
197 int slot;
198 int array;
199 struct imsm_update_activate_spare *next;
200};
201
8273f55e
DW
202struct imsm_update_create_array {
203 enum imsm_update_type type;
8273f55e 204 int dev_idx;
6a3e913e 205 struct imsm_dev dev;
8273f55e
DW
206};
207
43dad3d6
DW
208struct imsm_update_add_disk {
209 enum imsm_update_type type;
210};
211
0030e8d6
DW
212static int imsm_env_devname_as_serial(void)
213{
214 char *val = getenv("IMSM_DEVNAME_AS_SERIAL");
215
216 if (val && atoi(val) == 1)
217 return 1;
218
219 return 0;
220}
221
222
cdddbdbc
DW
223static struct supertype *match_metadata_desc_imsm(char *arg)
224{
225 struct supertype *st;
226
227 if (strcmp(arg, "imsm") != 0 &&
228 strcmp(arg, "default") != 0
229 )
230 return NULL;
231
232 st = malloc(sizeof(*st));
ef609477 233 memset(st, 0, sizeof(*st));
cdddbdbc
DW
234 st->ss = &super_imsm;
235 st->max_devs = IMSM_MAX_DEVICES;
236 st->minor_version = 0;
237 st->sb = NULL;
238 return st;
239}
240
cdddbdbc
DW
241static __u8 *get_imsm_version(struct imsm_super *mpb)
242{
243 return &mpb->sig[MPB_SIG_LEN];
244}
245
949c47a0
DW
246/* retrieve a disk directly from the anchor when the anchor is known to be
247 * up-to-date, currently only at load time
248 */
249static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index)
cdddbdbc 250{
949c47a0 251 if (index >= mpb->num_disks)
cdddbdbc
DW
252 return NULL;
253 return &mpb->disk[index];
254}
255
b9f594fe 256/* retrieve a disk from the parsed metadata */
949c47a0
DW
257static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index)
258{
b9f594fe
DW
259 struct dl *d;
260
261 for (d = super->disks; d; d = d->next)
262 if (d->index == index)
263 return &d->disk;
264
265 return NULL;
949c47a0
DW
266}
267
268/* generate a checksum directly from the anchor when the anchor is known to be
269 * up-to-date, currently only at load or write_super after coalescing
270 */
271static __u32 __gen_imsm_checksum(struct imsm_super *mpb)
cdddbdbc
DW
272{
273 __u32 end = mpb->mpb_size / sizeof(end);
274 __u32 *p = (__u32 *) mpb;
275 __u32 sum = 0;
276
277 while (end--)
278 sum += __le32_to_cpu(*p++);
279
280 return sum - __le32_to_cpu(mpb->check_sum);
281}
282
a965f303
DW
283static size_t sizeof_imsm_map(struct imsm_map *map)
284{
285 return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1);
286}
287
288struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map)
cdddbdbc 289{
a965f303
DW
290 struct imsm_map *map = &dev->vol.map[0];
291
292 if (second_map && !dev->vol.migr_state)
293 return NULL;
294 else if (second_map) {
295 void *ptr = map;
296
297 return ptr + sizeof_imsm_map(map);
298 } else
299 return map;
300
301}
cdddbdbc 302
3393c6af
DW
303/* return the size of the device.
304 * migr_state increases the returned size if map[0] were to be duplicated
305 */
306static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state)
a965f303
DW
307{
308 size_t size = sizeof(*dev) - sizeof(struct imsm_map) +
309 sizeof_imsm_map(get_imsm_map(dev, 0));
cdddbdbc
DW
310
311 /* migrating means an additional map */
a965f303
DW
312 if (dev->vol.migr_state)
313 size += sizeof_imsm_map(get_imsm_map(dev, 1));
3393c6af
DW
314 else if (migr_state)
315 size += sizeof_imsm_map(get_imsm_map(dev, 0));
cdddbdbc
DW
316
317 return size;
318}
319
949c47a0 320static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index)
cdddbdbc
DW
321{
322 int offset;
323 int i;
324 void *_mpb = mpb;
325
949c47a0 326 if (index >= mpb->num_raid_devs)
cdddbdbc
DW
327 return NULL;
328
329 /* devices start after all disks */
330 offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb;
331
332 for (i = 0; i <= index; i++)
333 if (i == index)
334 return _mpb + offset;
335 else
3393c6af 336 offset += sizeof_imsm_dev(_mpb + offset, 0);
cdddbdbc
DW
337
338 return NULL;
339}
340
949c47a0
DW
341static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index)
342{
343 if (index >= super->anchor->num_raid_devs)
344 return NULL;
345 return super->dev_tbl[index];
346}
347
7eef0453
DW
348static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev, int slot)
349{
350 struct imsm_map *map;
351
352 if (dev->vol.migr_state)
7eef0453 353 map = get_imsm_map(dev, 1);
fb9bf0d3
DW
354 else
355 map = get_imsm_map(dev, 0);
7eef0453 356
ff077194
DW
357 /* top byte identifies disk under rebuild */
358 return __le32_to_cpu(map->disk_ord_tbl[slot]);
359}
360
361#define ord_to_idx(ord) (((ord) << 8) >> 8)
362static __u32 get_imsm_disk_idx(struct imsm_dev *dev, int slot)
363{
364 __u32 ord = get_imsm_ord_tbl_ent(dev, slot);
365
366 return ord_to_idx(ord);
7eef0453
DW
367}
368
be73972f
DW
369static void set_imsm_ord_tbl_ent(struct imsm_map *map, int slot, __u32 ord)
370{
371 map->disk_ord_tbl[slot] = __cpu_to_le32(ord);
372}
373
cdddbdbc
DW
374static int get_imsm_raid_level(struct imsm_map *map)
375{
376 if (map->raid_level == 1) {
377 if (map->num_members == 2)
378 return 1;
379 else
380 return 10;
381 }
382
383 return map->raid_level;
384}
385
c2c087e6
DW
386static int cmp_extent(const void *av, const void *bv)
387{
388 const struct extent *a = av;
389 const struct extent *b = bv;
390 if (a->start < b->start)
391 return -1;
392 if (a->start > b->start)
393 return 1;
394 return 0;
395}
396
397static struct extent *get_extents(struct intel_super *super, struct dl *dl)
398{
399 /* find a list of used extents on the given physical device */
c2c087e6
DW
400 struct extent *rv, *e;
401 int i, j;
402 int memberships = 0;
403
949c47a0
DW
404 for (i = 0; i < super->anchor->num_raid_devs; i++) {
405 struct imsm_dev *dev = get_imsm_dev(super, i);
a965f303 406 struct imsm_map *map = get_imsm_map(dev, 0);
c2c087e6
DW
407
408 for (j = 0; j < map->num_members; j++) {
ff077194 409 __u32 index = get_imsm_disk_idx(dev, j);
c2c087e6
DW
410
411 if (index == dl->index)
412 memberships++;
413 }
414 }
415 rv = malloc(sizeof(struct extent) * (memberships + 1));
416 if (!rv)
417 return NULL;
418 e = rv;
419
949c47a0
DW
420 for (i = 0; i < super->anchor->num_raid_devs; i++) {
421 struct imsm_dev *dev = get_imsm_dev(super, i);
a965f303 422 struct imsm_map *map = get_imsm_map(dev, 0);
c2c087e6
DW
423
424 for (j = 0; j < map->num_members; j++) {
ff077194 425 __u32 index = get_imsm_disk_idx(dev, j);
c2c087e6
DW
426
427 if (index == dl->index) {
428 e->start = __le32_to_cpu(map->pba_of_lba0);
429 e->size = __le32_to_cpu(map->blocks_per_member);
430 e++;
431 }
432 }
433 }
434 qsort(rv, memberships, sizeof(*rv), cmp_extent);
435
b9f594fe 436 e->start = __le32_to_cpu(dl->disk.total_blocks) -
c2c087e6
DW
437 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
438 e->size = 0;
439 return rv;
440}
441
4f5bc454 442#ifndef MDASSEMBLE
cdddbdbc
DW
443static void print_imsm_dev(struct imsm_dev *dev, int index)
444{
445 __u64 sz;
446 int slot;
a965f303 447 struct imsm_map *map = get_imsm_map(dev, 0);
b10b37b8 448 __u32 ord;
cdddbdbc
DW
449
450 printf("\n");
451 printf("[%s]:\n", dev->volume);
452 printf(" RAID Level : %d\n", get_imsm_raid_level(map));
453 printf(" Members : %d\n", map->num_members);
454 for (slot = 0; slot < map->num_members; slot++)
ff077194 455 if (index == get_imsm_disk_idx(dev, slot))
cdddbdbc 456 break;
b10b37b8
DW
457 if (slot < map->num_members) {
458 ord = get_imsm_ord_tbl_ent(dev, slot);
459 printf(" This Slot : %d%s\n", slot,
460 ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : "");
461 } else
cdddbdbc
DW
462 printf(" This Slot : ?\n");
463 sz = __le32_to_cpu(dev->size_high);
464 sz <<= 32;
465 sz += __le32_to_cpu(dev->size_low);
466 printf(" Array Size : %llu%s\n", (unsigned long long)sz,
467 human_size(sz * 512));
468 sz = __le32_to_cpu(map->blocks_per_member);
469 printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz,
470 human_size(sz * 512));
471 printf(" Sector Offset : %u\n",
472 __le32_to_cpu(map->pba_of_lba0));
473 printf(" Num Stripes : %u\n",
474 __le32_to_cpu(map->num_data_stripes));
475 printf(" Chunk Size : %u KiB\n",
476 __le16_to_cpu(map->blocks_per_strip) / 2);
477 printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks));
3393c6af
DW
478 printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle");
479 if (dev->vol.migr_state)
480 printf(": %s", dev->vol.migr_type ? "rebuilding" : "initializing");
481 printf("\n");
482 printf(" Map State : %s", map_state_str[map->map_state]);
483 if (dev->vol.migr_state) {
484 struct imsm_map *map = get_imsm_map(dev, 1);
b10b37b8 485 printf(" <-- %s", map_state_str[map->map_state]);
3393c6af
DW
486 }
487 printf("\n");
cdddbdbc 488 printf(" Dirty State : %s\n", dev->vol.dirty ? "dirty" : "clean");
cdddbdbc
DW
489}
490
491static void print_imsm_disk(struct imsm_super *mpb, int index)
492{
949c47a0 493 struct imsm_disk *disk = __get_imsm_disk(mpb, index);
1f24f035 494 char str[MAX_RAID_SERIAL_LEN + 1];
cdddbdbc
DW
495 __u32 s;
496 __u64 sz;
497
e9d82038
DW
498 if (index < 0)
499 return;
500
cdddbdbc 501 printf("\n");
1f24f035 502 snprintf(str, MAX_RAID_SERIAL_LEN + 1, "%s", disk->serial);
cdddbdbc
DW
503 printf(" Disk%02d Serial : %s\n", index, str);
504 s = __le32_to_cpu(disk->status);
505 printf(" State :%s%s%s%s\n", s&SPARE_DISK ? " spare" : "",
506 s&CONFIGURED_DISK ? " active" : "",
507 s&FAILED_DISK ? " failed" : "",
508 s&USABLE_DISK ? " usable" : "");
509 printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id));
c2c087e6
DW
510 sz = __le32_to_cpu(disk->total_blocks) -
511 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS * mpb->num_raid_devs);
cdddbdbc
DW
512 printf(" Usable Size : %llu%s\n", (unsigned long long)sz,
513 human_size(sz * 512));
514}
515
516static void examine_super_imsm(struct supertype *st, char *homehost)
517{
518 struct intel_super *super = st->sb;
949c47a0 519 struct imsm_super *mpb = super->anchor;
cdddbdbc
DW
520 char str[MAX_SIGNATURE_LENGTH];
521 int i;
522 __u32 sum;
523
524 snprintf(str, MPB_SIG_LEN, "%s", mpb->sig);
525 printf(" Magic : %s\n", str);
526 snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb));
527 printf(" Version : %s\n", get_imsm_version(mpb));
528 printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num));
529 printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num));
530 sum = __le32_to_cpu(mpb->check_sum);
531 printf(" Checksum : %08x %s\n", sum,
949c47a0 532 __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect");
87eb16df 533 printf(" MPB Sectors : %d\n", mpb_sectors(mpb));
cdddbdbc
DW
534 printf(" Disks : %d\n", mpb->num_disks);
535 printf(" RAID Devices : %d\n", mpb->num_raid_devs);
536 print_imsm_disk(mpb, super->disks->index);
604b746f
JD
537 if (super->bbm_log) {
538 struct bbm_log *log = super->bbm_log;
539
540 printf("\n");
541 printf("Bad Block Management Log:\n");
542 printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size));
543 printf(" Signature : %x\n", __le32_to_cpu(log->signature));
544 printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count));
545 printf(" Spare Blocks : %d\n", __le32_to_cpu(log->reserved_spare_block_count));
546 printf(" First Spare : %llx\n", __le64_to_cpu(log->first_spare_lba));
547 }
cdddbdbc 548 for (i = 0; i < mpb->num_raid_devs; i++)
949c47a0 549 print_imsm_dev(__get_imsm_dev(mpb, i), super->disks->index);
cdddbdbc
DW
550 for (i = 0; i < mpb->num_disks; i++) {
551 if (i == super->disks->index)
552 continue;
553 print_imsm_disk(mpb, i);
554 }
555}
556
557static void brief_examine_super_imsm(struct supertype *st)
558{
828408eb 559 printf("ARRAY /dev/imsm metadata=imsm\n");
cdddbdbc
DW
560}
561
562static void detail_super_imsm(struct supertype *st, char *homehost)
563{
564 printf("%s\n", __FUNCTION__);
565}
566
567static void brief_detail_super_imsm(struct supertype *st)
568{
569 printf("%s\n", __FUNCTION__);
570}
571#endif
572
573static int match_home_imsm(struct supertype *st, char *homehost)
574{
575 printf("%s\n", __FUNCTION__);
576
577 return 0;
578}
579
580static void uuid_from_super_imsm(struct supertype *st, int uuid[4])
581{
43dad3d6
DW
582 /* imsm does not track uuid's so just make sure we never return
583 * the same value twice to break uuid matching in Manage_subdevs
584 * FIXME what about the use of uuid's with bitmap's?
585 */
586 static int dummy_id = 0;
587
588 uuid[0] = dummy_id++;
cdddbdbc
DW
589}
590
0d481d37 591#if 0
4f5bc454
DW
592static void
593get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p)
cdddbdbc 594{
cdddbdbc
DW
595 __u8 *v = get_imsm_version(mpb);
596 __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH;
597 char major[] = { 0, 0, 0 };
598 char minor[] = { 0 ,0, 0 };
599 char patch[] = { 0, 0, 0 };
600 char *ver_parse[] = { major, minor, patch };
601 int i, j;
602
603 i = j = 0;
604 while (*v != '\0' && v < end) {
605 if (*v != '.' && j < 2)
606 ver_parse[i][j++] = *v;
607 else {
608 i++;
609 j = 0;
610 }
611 v++;
612 }
613
4f5bc454
DW
614 *m = strtol(minor, NULL, 0);
615 *p = strtol(patch, NULL, 0);
616}
0d481d37 617#endif
4f5bc454 618
c2c087e6
DW
619static int imsm_level_to_layout(int level)
620{
621 switch (level) {
622 case 0:
623 case 1:
624 return 0;
625 case 5:
626 case 6:
a380c027 627 return ALGORITHM_LEFT_ASYMMETRIC;
c2c087e6
DW
628 case 10:
629 return 0x102; //FIXME is this correct?
630 }
631 return -1;
632}
633
bf5a934a
DW
634static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info)
635{
636 struct intel_super *super = st->sb;
949c47a0 637 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
a965f303 638 struct imsm_map *map = get_imsm_map(dev, 0);
bf5a934a
DW
639
640 info->container_member = super->current_vol;
641 info->array.raid_disks = map->num_members;
642 info->array.level = get_imsm_raid_level(map);
643 info->array.layout = imsm_level_to_layout(info->array.level);
644 info->array.md_minor = -1;
645 info->array.ctime = 0;
646 info->array.utime = 0;
647 info->array.chunk_size = __le16_to_cpu(map->blocks_per_strip * 512);
648
649 info->data_offset = __le32_to_cpu(map->pba_of_lba0);
650 info->component_size = __le32_to_cpu(map->blocks_per_member);
651
652 info->disk.major = 0;
653 info->disk.minor = 0;
654
655 sprintf(info->text_version, "/%s/%d",
656 devnum2devname(st->container_dev),
657 info->container_member);
a67dd8cc 658 info->safe_mode_delay = 4000; /* 4 secs like the Matrix driver */
bf5a934a
DW
659}
660
661
4f5bc454
DW
662static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info)
663{
664 struct intel_super *super = st->sb;
4f5bc454
DW
665 struct imsm_disk *disk;
666 __u32 s;
4f5bc454 667
bf5a934a
DW
668 if (super->current_vol >= 0) {
669 getinfo_super_imsm_volume(st, info);
670 return;
671 }
d23fe947
DW
672
673 /* Set raid_disks to zero so that Assemble will always pull in valid
674 * spares
675 */
676 info->array.raid_disks = 0;
cdddbdbc
DW
677 info->array.level = LEVEL_CONTAINER;
678 info->array.layout = 0;
679 info->array.md_minor = -1;
c2c087e6 680 info->array.ctime = 0; /* N/A for imsm */
cdddbdbc
DW
681 info->array.utime = 0;
682 info->array.chunk_size = 0;
683
684 info->disk.major = 0;
685 info->disk.minor = 0;
cdddbdbc 686 info->disk.raid_disk = -1;
c2c087e6
DW
687 info->reshape_active = 0;
688 strcpy(info->text_version, "imsm");
a67dd8cc 689 info->safe_mode_delay = 0;
c2c087e6
DW
690 info->disk.number = -1;
691 info->disk.state = 0;
692
4a04ec6c 693 if (super->disks) {
b9f594fe 694 disk = &super->disks->disk;
4a04ec6c
DW
695 info->disk.number = super->disks->index;
696 info->disk.raid_disk = super->disks->index;
bf5a934a
DW
697 info->data_offset = __le32_to_cpu(disk->total_blocks) -
698 (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
699 info->component_size = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
4a04ec6c
DW
700 s = __le32_to_cpu(disk->status);
701 info->disk.state = s & CONFIGURED_DISK ? (1 << MD_DISK_ACTIVE) : 0;
702 info->disk.state |= s & FAILED_DISK ? (1 << MD_DISK_FAULTY) : 0;
703 info->disk.state |= s & USABLE_DISK ? (1 << MD_DISK_SYNC) : 0;
cdddbdbc 704 }
cdddbdbc
DW
705}
706
cdddbdbc
DW
707static int update_super_imsm(struct supertype *st, struct mdinfo *info,
708 char *update, char *devname, int verbose,
709 int uuid_set, char *homehost)
710{
f352c545
DW
711 /* FIXME */
712
713 /* For 'assemble' and 'force' we need to return non-zero if any
714 * change was made. For others, the return value is ignored.
715 * Update options are:
716 * force-one : This device looks a bit old but needs to be included,
717 * update age info appropriately.
718 * assemble: clear any 'faulty' flag to allow this device to
719 * be assembled.
720 * force-array: Array is degraded but being forced, mark it clean
721 * if that will be needed to assemble it.
722 *
723 * newdev: not used ????
724 * grow: Array has gained a new device - this is currently for
725 * linear only
726 * resync: mark as dirty so a resync will happen.
727 * name: update the name - preserving the homehost
728 *
729 * Following are not relevant for this imsm:
730 * sparc2.2 : update from old dodgey metadata
731 * super-minor: change the preferred_minor number
732 * summaries: update redundant counters.
733 * uuid: Change the uuid of the array to match watch is given
734 * homehost: update the recorded homehost
735 * _reshape_progress: record new reshape_progress position.
736 */
737 int rv = 0;
738 //struct intel_super *super = st->sb;
739 //struct imsm_super *mpb = super->mpb;
740
741 if (strcmp(update, "grow") == 0) {
742 }
743 if (strcmp(update, "resync") == 0) {
744 /* dev->vol.dirty = 1; */
745 }
746
747 /* IMSM has no concept of UUID or homehost */
748
749 return rv;
cdddbdbc
DW
750}
751
c2c087e6 752static size_t disks_to_mpb_size(int disks)
cdddbdbc 753{
c2c087e6 754 size_t size;
cdddbdbc 755
c2c087e6
DW
756 size = sizeof(struct imsm_super);
757 size += (disks - 1) * sizeof(struct imsm_disk);
758 size += 2 * sizeof(struct imsm_dev);
759 /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */
760 size += (4 - 2) * sizeof(struct imsm_map);
761 /* 4 possible disk_ord_tbl's */
762 size += 4 * (disks - 1) * sizeof(__u32);
763
764 return size;
765}
766
767static __u64 avail_size_imsm(struct supertype *st, __u64 devsize)
768{
769 if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS))
770 return 0;
771
772 return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS);
cdddbdbc
DW
773}
774
775static int compare_super_imsm(struct supertype *st, struct supertype *tst)
776{
777 /*
778 * return:
779 * 0 same, or first was empty, and second was copied
780 * 1 second had wrong number
781 * 2 wrong uuid
782 * 3 wrong other info
783 */
784 struct intel_super *first = st->sb;
785 struct intel_super *sec = tst->sb;
786
787 if (!first) {
788 st->sb = tst->sb;
789 tst->sb = NULL;
790 return 0;
791 }
792
949c47a0 793 if (memcmp(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH) != 0)
cdddbdbc 794 return 3;
d23fe947
DW
795
796 /* if an anchor does not have num_raid_devs set then it is a free
797 * floating spare
798 */
799 if (first->anchor->num_raid_devs > 0 &&
800 sec->anchor->num_raid_devs > 0) {
801 if (first->anchor->family_num != sec->anchor->family_num)
802 return 3;
d23fe947 803 }
cdddbdbc 804
3e372e5a
DW
805 /* if 'first' is a spare promote it to a populated mpb with sec's
806 * family number
807 */
808 if (first->anchor->num_raid_devs == 0 &&
809 sec->anchor->num_raid_devs > 0) {
810 first->anchor->num_raid_devs = sec->anchor->num_raid_devs;
811 first->anchor->family_num = sec->anchor->family_num;
812 }
813
cdddbdbc
DW
814 return 0;
815}
816
0030e8d6
DW
817static void fd2devname(int fd, char *name)
818{
819 struct stat st;
820 char path[256];
821 char dname[100];
822 char *nm;
823 int rv;
824
825 name[0] = '\0';
826 if (fstat(fd, &st) != 0)
827 return;
828 sprintf(path, "/sys/dev/block/%d:%d",
829 major(st.st_rdev), minor(st.st_rdev));
830
831 rv = readlink(path, dname, sizeof(dname));
832 if (rv <= 0)
833 return;
834
835 dname[rv] = '\0';
836 nm = strrchr(dname, '/');
837 nm++;
838 snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm);
839}
840
841
cdddbdbc
DW
842extern int scsi_get_serial(int fd, void *buf, size_t buf_len);
843
844static int imsm_read_serial(int fd, char *devname,
845 __u8 serial[MAX_RAID_SERIAL_LEN])
846{
847 unsigned char scsi_serial[255];
cdddbdbc
DW
848 int rv;
849 int rsp_len;
1f24f035
DW
850 int len;
851 char *c, *rsp_buf;
cdddbdbc
DW
852
853 memset(scsi_serial, 0, sizeof(scsi_serial));
cdddbdbc 854
f9ba0ff1
DW
855 rv = scsi_get_serial(fd, scsi_serial, sizeof(scsi_serial));
856
857 if (rv && imsm_env_devname_as_serial()) {
858 memset(serial, 0, MAX_RAID_SERIAL_LEN);
859 fd2devname(fd, (char *) serial);
0030e8d6
DW
860 return 0;
861 }
862
cdddbdbc
DW
863 if (rv != 0) {
864 if (devname)
865 fprintf(stderr,
866 Name ": Failed to retrieve serial for %s\n",
867 devname);
868 return rv;
869 }
870
1f24f035 871 /* trim whitespace */
cdddbdbc 872 rsp_len = scsi_serial[3];
1f24f035
DW
873 rsp_buf = (char *) &scsi_serial[4];
874 c = rsp_buf;
875 while (isspace(*c))
876 c++;
877 if (c + MAX_RAID_SERIAL_LEN > rsp_buf + rsp_len)
878 len = rsp_len - (c - rsp_buf);
879 else
880 len = MAX_RAID_SERIAL_LEN;
881 memcpy(serial, c, len);
882 c = (char *) &serial[len - 1];
883 while (isspace(*c) || *c == '\0')
884 *c-- = '\0';
cdddbdbc
DW
885
886 return 0;
887}
888
1f24f035
DW
889static int serialcmp(__u8 *s1, __u8 *s2)
890{
891 return strncmp((char *) s1, (char *) s2, MAX_RAID_SERIAL_LEN);
892}
893
894static void serialcpy(__u8 *dest, __u8 *src)
895{
896 strncpy((char *) dest, (char *) src, MAX_RAID_SERIAL_LEN);
897}
898
cdddbdbc
DW
899static int
900load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd)
901{
cdddbdbc
DW
902 struct dl *dl;
903 struct stat stb;
cdddbdbc
DW
904 int rv;
905 int i;
d23fe947
DW
906 int alloc = 1;
907 __u8 serial[MAX_RAID_SERIAL_LEN];
908
909 rv = imsm_read_serial(fd, devname, serial);
910
911 if (rv != 0)
912 return 2;
913
914 /* check if this is a disk we have seen before. it may be a spare in
915 * super->disks while the current anchor believes it is a raid member,
916 * check if we need to update dl->index
917 */
918 for (dl = super->disks; dl; dl = dl->next)
1f24f035 919 if (serialcmp(dl->serial, serial) == 0)
d23fe947
DW
920 break;
921
922 if (!dl)
923 dl = malloc(sizeof(*dl));
924 else
925 alloc = 0;
cdddbdbc 926
b9f594fe 927 if (!dl) {
cdddbdbc
DW
928 if (devname)
929 fprintf(stderr,
930 Name ": failed to allocate disk buffer for %s\n",
931 devname);
932 return 2;
933 }
cdddbdbc 934
d23fe947
DW
935 if (alloc) {
936 fstat(fd, &stb);
937 dl->major = major(stb.st_rdev);
938 dl->minor = minor(stb.st_rdev);
939 dl->next = super->disks;
940 dl->fd = keep_fd ? fd : -1;
941 dl->devname = devname ? strdup(devname) : NULL;
1f24f035 942 serialcpy(dl->serial, serial);
8796fdc4 943 dl->index = -2;
d23fe947
DW
944 } else if (keep_fd) {
945 close(dl->fd);
946 dl->fd = fd;
947 }
cdddbdbc 948
d23fe947 949 /* look up this disk's index in the current anchor */
949c47a0
DW
950 for (i = 0; i < super->anchor->num_disks; i++) {
951 struct imsm_disk *disk_iter;
952
953 disk_iter = __get_imsm_disk(super->anchor, i);
cdddbdbc 954
1f24f035 955 if (serialcmp(disk_iter->serial, dl->serial) == 0) {
d23fe947
DW
956 __u32 status;
957
b9f594fe 958 dl->disk = *disk_iter;
d23fe947
DW
959 status = __le32_to_cpu(dl->disk.status);
960 /* only set index on disks that are a member of a
961 * populated contianer, i.e. one with raid_devs
962 */
6c386dd3
DW
963 if (status & FAILED_DISK)
964 dl->index = -2;
965 else if (status & SPARE_DISK)
d23fe947
DW
966 dl->index = -1;
967 else
968 dl->index = i;
8796fdc4 969
cdddbdbc 970 break;
949c47a0 971 }
cdddbdbc
DW
972 }
973
d23fe947
DW
974 if (alloc)
975 super->disks = dl;
6c386dd3 976
949c47a0
DW
977 return 0;
978}
979
980static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src)
981{
3393c6af
DW
982 memcpy(dest, src, sizeof_imsm_dev(src, 0));
983}
984
0c046afd
DW
985/* When migrating map0 contains the 'destination' state while map1
986 * contains the current state. When not migrating map0 contains the
987 * current state. This routine assumes that map[0].map_state is set to
988 * the current array state before being called.
989 *
990 * Migration is indicated by one of the following states
991 * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed)
992 * 2/ Initialize (migr_state=1 migr_type=0 map0state=normal
993 * map1state=unitialized)
994 * 3/ Verify (Resync) (migr_state=1 migr_type=1 map0state=normal
995 * map1state=normal)
996 * 4/ Rebuild (migr_state=1 migr_type=1 map0state=normal
997 * map1state=degraded)
998 */
999static void migrate(struct imsm_dev *dev, __u8 to_state, int rebuild_resync)
3393c6af 1000{
0c046afd 1001 struct imsm_map *dest;
3393c6af
DW
1002 struct imsm_map *src = get_imsm_map(dev, 0);
1003
0c046afd
DW
1004 dev->vol.migr_state = 1;
1005 dev->vol.migr_type = rebuild_resync;
1006 dest = get_imsm_map(dev, 1);
1007
3393c6af 1008 memcpy(dest, src, sizeof_imsm_map(src));
0c046afd 1009 src->map_state = to_state;
949c47a0
DW
1010}
1011
1012static int parse_raid_devices(struct intel_super *super)
1013{
1014 int i;
1015 struct imsm_dev *dev_new;
4d7b1503
DW
1016 size_t len, len_migr;
1017 size_t space_needed = 0;
1018 struct imsm_super *mpb = super->anchor;
949c47a0
DW
1019
1020 for (i = 0; i < super->anchor->num_raid_devs; i++) {
1021 struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i);
1022
4d7b1503
DW
1023 len = sizeof_imsm_dev(dev_iter, 0);
1024 len_migr = sizeof_imsm_dev(dev_iter, 1);
1025 if (len_migr > len)
1026 space_needed += len_migr - len;
1027
1028 dev_new = malloc(len_migr);
949c47a0
DW
1029 if (!dev_new)
1030 return 1;
1031 imsm_copy_dev(dev_new, dev_iter);
1032 super->dev_tbl[i] = dev_new;
1033 }
cdddbdbc 1034
4d7b1503
DW
1035 /* ensure that super->buf is large enough when all raid devices
1036 * are migrating
1037 */
1038 if (__le32_to_cpu(mpb->mpb_size) + space_needed > super->len) {
1039 void *buf;
1040
1041 len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + space_needed, 512);
1042 if (posix_memalign(&buf, 512, len) != 0)
1043 return 1;
1044
1045 memcpy(buf, super->buf, len);
1046 free(super->buf);
1047 super->buf = buf;
1048 super->len = len;
1049 }
1050
cdddbdbc
DW
1051 return 0;
1052}
1053
604b746f
JD
1054/* retrieve a pointer to the bbm log which starts after all raid devices */
1055struct bbm_log *__get_imsm_bbm_log(struct imsm_super *mpb)
1056{
1057 void *ptr = NULL;
1058
1059 if (__le32_to_cpu(mpb->bbm_log_size)) {
1060 ptr = mpb;
1061 ptr += mpb->mpb_size - __le32_to_cpu(mpb->bbm_log_size);
1062 }
1063
1064 return ptr;
1065}
1066
d23fe947 1067static void __free_imsm(struct intel_super *super, int free_disks);
9ca2c81c 1068
cdddbdbc
DW
1069/* load_imsm_mpb - read matrix metadata
1070 * allocates super->mpb to be freed by free_super
1071 */
1072static int load_imsm_mpb(int fd, struct intel_super *super, char *devname)
1073{
1074 unsigned long long dsize;
cdddbdbc
DW
1075 unsigned long long sectors;
1076 struct stat;
6416d527 1077 struct imsm_super *anchor;
cdddbdbc 1078 __u32 check_sum;
949c47a0 1079 int rc;
cdddbdbc 1080
cdddbdbc
DW
1081 get_dev_size(fd, NULL, &dsize);
1082
1083 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) {
1084 if (devname)
1085 fprintf(stderr,
1086 Name ": Cannot seek to anchor block on %s: %s\n",
1087 devname, strerror(errno));
1088 return 1;
1089 }
1090
949c47a0 1091 if (posix_memalign((void**)&anchor, 512, 512) != 0) {
ad97895e
DW
1092 if (devname)
1093 fprintf(stderr,
1094 Name ": Failed to allocate imsm anchor buffer"
1095 " on %s\n", devname);
1096 return 1;
1097 }
949c47a0 1098 if (read(fd, anchor, 512) != 512) {
cdddbdbc
DW
1099 if (devname)
1100 fprintf(stderr,
1101 Name ": Cannot read anchor block on %s: %s\n",
1102 devname, strerror(errno));
6416d527 1103 free(anchor);
cdddbdbc
DW
1104 return 1;
1105 }
1106
6416d527 1107 if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) {
cdddbdbc
DW
1108 if (devname)
1109 fprintf(stderr,
1110 Name ": no IMSM anchor on %s\n", devname);
6416d527 1111 free(anchor);
cdddbdbc
DW
1112 return 2;
1113 }
1114
d23fe947 1115 __free_imsm(super, 0);
949c47a0
DW
1116 super->len = ROUND_UP(anchor->mpb_size, 512);
1117 if (posix_memalign(&super->buf, 512, super->len) != 0) {
cdddbdbc
DW
1118 if (devname)
1119 fprintf(stderr,
1120 Name ": unable to allocate %zu byte mpb buffer\n",
949c47a0 1121 super->len);
6416d527 1122 free(anchor);
cdddbdbc
DW
1123 return 2;
1124 }
949c47a0 1125 memcpy(super->buf, anchor, 512);
cdddbdbc 1126
6416d527
NB
1127 sectors = mpb_sectors(anchor) - 1;
1128 free(anchor);
949c47a0
DW
1129 if (!sectors) {
1130 rc = load_imsm_disk(fd, super, devname, 0);
1131 if (rc == 0)
1132 rc = parse_raid_devices(super);
1133 return rc;
1134 }
cdddbdbc
DW
1135
1136 /* read the extended mpb */
1137 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0) {
1138 if (devname)
1139 fprintf(stderr,
1140 Name ": Cannot seek to extended mpb on %s: %s\n",
1141 devname, strerror(errno));
1142 return 1;
1143 }
1144
949c47a0 1145 if (read(fd, super->buf + 512, super->len - 512) != super->len - 512) {
cdddbdbc
DW
1146 if (devname)
1147 fprintf(stderr,
1148 Name ": Cannot read extended mpb on %s: %s\n",
1149 devname, strerror(errno));
1150 return 2;
1151 }
1152
949c47a0
DW
1153 check_sum = __gen_imsm_checksum(super->anchor);
1154 if (check_sum != __le32_to_cpu(super->anchor->check_sum)) {
cdddbdbc
DW
1155 if (devname)
1156 fprintf(stderr,
1157 Name ": IMSM checksum %x != %x on %s\n",
949c47a0 1158 check_sum, __le32_to_cpu(super->anchor->check_sum),
cdddbdbc
DW
1159 devname);
1160 return 2;
1161 }
1162
604b746f
JD
1163 /* FIXME the BBM log is disk specific so we cannot use this global
1164 * buffer for all disks. Ok for now since we only look at the global
1165 * bbm_log_size parameter to gate assembly
1166 */
1167 super->bbm_log = __get_imsm_bbm_log(super->anchor);
1168
949c47a0
DW
1169 rc = load_imsm_disk(fd, super, devname, 0);
1170 if (rc == 0)
1171 rc = parse_raid_devices(super);
4d7b1503 1172
949c47a0 1173 return rc;
cdddbdbc
DW
1174}
1175
ae6aad82
DW
1176static void __free_imsm_disk(struct dl *d)
1177{
1178 if (d->fd >= 0)
1179 close(d->fd);
1180 if (d->devname)
1181 free(d->devname);
1182 free(d);
1183
1184}
cdddbdbc
DW
1185static void free_imsm_disks(struct intel_super *super)
1186{
1187 while (super->disks) {
1188 struct dl *d = super->disks;
1189
1190 super->disks = d->next;
ae6aad82 1191 __free_imsm_disk(d);
cdddbdbc
DW
1192 }
1193}
1194
9ca2c81c 1195/* free all the pieces hanging off of a super pointer */
d23fe947 1196static void __free_imsm(struct intel_super *super, int free_disks)
cdddbdbc 1197{
949c47a0
DW
1198 int i;
1199
9ca2c81c 1200 if (super->buf) {
949c47a0 1201 free(super->buf);
9ca2c81c
DW
1202 super->buf = NULL;
1203 }
d23fe947
DW
1204 if (free_disks)
1205 free_imsm_disks(super);
949c47a0 1206 for (i = 0; i < IMSM_MAX_RAID_DEVS; i++)
9ca2c81c 1207 if (super->dev_tbl[i]) {
949c47a0 1208 free(super->dev_tbl[i]);
9ca2c81c
DW
1209 super->dev_tbl[i] = NULL;
1210 }
cdddbdbc
DW
1211}
1212
9ca2c81c
DW
1213static void free_imsm(struct intel_super *super)
1214{
d23fe947 1215 __free_imsm(super, 1);
9ca2c81c
DW
1216 free(super);
1217}
cdddbdbc
DW
1218
1219static void free_super_imsm(struct supertype *st)
1220{
1221 struct intel_super *super = st->sb;
1222
1223 if (!super)
1224 return;
1225
1226 free_imsm(super);
1227 st->sb = NULL;
1228}
1229
c2c087e6
DW
1230static struct intel_super *alloc_super(int creating_imsm)
1231{
1232 struct intel_super *super = malloc(sizeof(*super));
1233
1234 if (super) {
1235 memset(super, 0, sizeof(*super));
1236 super->creating_imsm = creating_imsm;
bf5a934a 1237 super->current_vol = -1;
c2c087e6
DW
1238 }
1239
1240 return super;
1241}
1242
cdddbdbc
DW
1243#ifndef MDASSEMBLE
1244static int load_super_imsm_all(struct supertype *st, int fd, void **sbp,
1245 char *devname, int keep_fd)
1246{
1247 struct mdinfo *sra;
1248 struct intel_super *super;
1249 struct mdinfo *sd, *best = NULL;
1250 __u32 bestgen = 0;
1251 __u32 gen;
1252 char nm[20];
1253 int dfd;
1254 int rv;
1255
1256 /* check if this disk is a member of an active array */
1257 sra = sysfs_read(fd, 0, GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE);
1258 if (!sra)
1259 return 1;
1260
1261 if (sra->array.major_version != -1 ||
1262 sra->array.minor_version != -2 ||
1263 strcmp(sra->text_version, "imsm") != 0)
1264 return 1;
1265
c2c087e6 1266 super = alloc_super(0);
cdddbdbc
DW
1267 if (!super)
1268 return 1;
1269
d23fe947 1270 /* find the most up to date disk in this array, skipping spares */
cdddbdbc
DW
1271 for (sd = sra->devs; sd; sd = sd->next) {
1272 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
1273 dfd = dev_open(nm, keep_fd ? O_RDWR : O_RDONLY);
1274 if (!dfd) {
1275 free_imsm(super);
1276 return 2;
1277 }
1278 rv = load_imsm_mpb(dfd, super, NULL);
1279 if (!keep_fd)
1280 close(dfd);
1281 if (rv == 0) {
d23fe947
DW
1282 if (super->anchor->num_raid_devs == 0)
1283 gen = 0;
1284 else
1285 gen = __le32_to_cpu(super->anchor->generation_num);
cdddbdbc
DW
1286 if (!best || gen > bestgen) {
1287 bestgen = gen;
1288 best = sd;
1289 }
1290 } else {
1291 free_imsm(super);
1292 return 2;
1293 }
1294 }
1295
1296 if (!best) {
1297 free_imsm(super);
1298 return 1;
1299 }
1300
1301 /* load the most up to date anchor */
1302 sprintf(nm, "%d:%d", best->disk.major, best->disk.minor);
1303 dfd = dev_open(nm, O_RDONLY);
1304 if (!dfd) {
1305 free_imsm(super);
1306 return 1;
1307 }
1308 rv = load_imsm_mpb(dfd, super, NULL);
1309 close(dfd);
1310 if (rv != 0) {
1311 free_imsm(super);
1312 return 2;
1313 }
1314
d23fe947 1315 /* re-parse the disk list with the current anchor */
cdddbdbc
DW
1316 for (sd = sra->devs ; sd ; sd = sd->next) {
1317 sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor);
1318 dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY);
1319 if (!dfd) {
1320 free_imsm(super);
1321 return 2;
1322 }
1323 load_imsm_disk(dfd, super, NULL, keep_fd);
1324 if (!keep_fd)
1325 close(dfd);
1326 }
1327
f7e7067b 1328 if (st->subarray[0]) {
949c47a0 1329 if (atoi(st->subarray) <= super->anchor->num_raid_devs)
bf5a934a
DW
1330 super->current_vol = atoi(st->subarray);
1331 else
1332 return 1;
f7e7067b
NB
1333 }
1334
cdddbdbc 1335 *sbp = super;
43dad3d6 1336 st->container_dev = fd2devnum(fd);
cdddbdbc 1337 if (st->ss == NULL) {
bf5a934a 1338 st->ss = &super_imsm;
cdddbdbc
DW
1339 st->minor_version = 0;
1340 st->max_devs = IMSM_MAX_DEVICES;
1341 }
1342
1343 return 0;
1344}
1345#endif
1346
1347static int load_super_imsm(struct supertype *st, int fd, char *devname)
1348{
1349 struct intel_super *super;
1350 int rv;
1351
1352#ifndef MDASSEMBLE
3dbccbcf 1353 if (load_super_imsm_all(st, fd, &st->sb, devname, 1) == 0)
cdddbdbc
DW
1354 return 0;
1355#endif
f7e7067b
NB
1356 if (st->subarray[0])
1357 return 1; /* FIXME */
cdddbdbc 1358
c2c087e6 1359 super = alloc_super(0);
cdddbdbc
DW
1360 if (!super) {
1361 fprintf(stderr,
1362 Name ": malloc of %zu failed.\n",
1363 sizeof(*super));
1364 return 1;
1365 }
1366
1367 rv = load_imsm_mpb(fd, super, devname);
1368
1369 if (rv) {
1370 if (devname)
1371 fprintf(stderr,
1372 Name ": Failed to load all information "
1373 "sections on %s\n", devname);
1374 free_imsm(super);
1375 return rv;
1376 }
1377
1378 st->sb = super;
1379 if (st->ss == NULL) {
1380 st->ss = &super_imsm;
1381 st->minor_version = 0;
1382 st->max_devs = IMSM_MAX_DEVICES;
1383 }
1384
1385 return 0;
1386}
1387
ef6ffade
DW
1388static __u16 info_to_blocks_per_strip(mdu_array_info_t *info)
1389{
1390 if (info->level == 1)
1391 return 128;
1392 return info->chunk_size >> 9;
1393}
1394
1395static __u32 info_to_num_data_stripes(mdu_array_info_t *info)
1396{
1397 __u32 num_stripes;
1398
1399 num_stripes = (info->size * 2) / info_to_blocks_per_strip(info);
1400 if (info->level == 1)
1401 num_stripes /= 2;
1402
1403 return num_stripes;
1404}
1405
fcfd9599
DW
1406static __u32 info_to_blocks_per_member(mdu_array_info_t *info)
1407{
1408 return (info->size * 2) & ~(info_to_blocks_per_strip(info) - 1);
1409}
1410
8b353278
DW
1411static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info,
1412 unsigned long long size, char *name,
1413 char *homehost, int *uuid)
cdddbdbc 1414{
c2c087e6
DW
1415 /* We are creating a volume inside a pre-existing container.
1416 * so st->sb is already set.
1417 */
1418 struct intel_super *super = st->sb;
949c47a0 1419 struct imsm_super *mpb = super->anchor;
c2c087e6
DW
1420 struct imsm_dev *dev;
1421 struct imsm_vol *vol;
1422 struct imsm_map *map;
1423 int idx = mpb->num_raid_devs;
1424 int i;
1425 unsigned long long array_blocks;
c2c087e6 1426 __u32 offset = 0;
2c092cad 1427 size_t size_old, size_new;
cdddbdbc 1428
c2c087e6
DW
1429 if (mpb->num_raid_devs >= 2) {
1430 fprintf(stderr, Name": This imsm-container already has the "
1431 "maximum of 2 volumes\n");
1432 return 0;
1433 }
1434
2c092cad
DW
1435 /* ensure the mpb is large enough for the new data */
1436 size_old = __le32_to_cpu(mpb->mpb_size);
1437 size_new = disks_to_mpb_size(info->nr_disks);
1438 if (size_new > size_old) {
1439 void *mpb_new;
1440 size_t size_round = ROUND_UP(size_new, 512);
1441
1442 if (posix_memalign(&mpb_new, 512, size_round) != 0) {
1443 fprintf(stderr, Name": could not allocate new mpb\n");
1444 return 0;
1445 }
1446 memcpy(mpb_new, mpb, size_old);
1447 free(mpb);
1448 mpb = mpb_new;
949c47a0 1449 super->anchor = mpb_new;
2c092cad
DW
1450 mpb->mpb_size = __cpu_to_le32(size_new);
1451 memset(mpb_new + size_old, 0, size_round - size_old);
1452 }
bf5a934a 1453 super->current_vol = idx;
d23fe947
DW
1454 /* when creating the first raid device in this container set num_disks
1455 * to zero, i.e. delete this spare and add raid member devices in
1456 * add_to_super_imsm_volume()
1457 */
1458 if (super->current_vol == 0)
1459 mpb->num_disks = 0;
bf5a934a 1460 sprintf(st->subarray, "%d", idx);
949c47a0
DW
1461 dev = malloc(sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1));
1462 if (!dev) {
1463 fprintf(stderr, Name": could not allocate raid device\n");
1464 return 0;
1465 }
c2c087e6
DW
1466 strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN);
1467 array_blocks = calc_array_size(info->level, info->raid_disks,
1468 info->layout, info->chunk_size,
1469 info->size*2);
1470 dev->size_low = __cpu_to_le32((__u32) array_blocks);
1471 dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32));
1472 dev->status = __cpu_to_le32(0);
1473 dev->reserved_blocks = __cpu_to_le32(0);
1474 vol = &dev->vol;
1475 vol->migr_state = 0;
1476 vol->migr_type = 0;
1477 vol->dirty = 0;
1478 for (i = 0; i < idx; i++) {
949c47a0 1479 struct imsm_dev *prev = get_imsm_dev(super, i);
a965f303 1480 struct imsm_map *pmap = get_imsm_map(prev, 0);
c2c087e6
DW
1481
1482 offset += __le32_to_cpu(pmap->blocks_per_member);
1483 offset += IMSM_RESERVED_SECTORS;
1484 }
a965f303 1485 map = get_imsm_map(dev, 0);
c2c087e6 1486 map->pba_of_lba0 = __cpu_to_le32(offset);
fcfd9599 1487 map->blocks_per_member = __cpu_to_le32(info_to_blocks_per_member(info));
ef6ffade
DW
1488 map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info));
1489 map->num_data_stripes = __cpu_to_le32(info_to_num_data_stripes(info));
c2c087e6
DW
1490 map->map_state = info->level ? IMSM_T_STATE_UNINITIALIZED :
1491 IMSM_T_STATE_NORMAL;
ef6ffade
DW
1492
1493 if (info->level == 1 && info->raid_disks > 2) {
1494 fprintf(stderr, Name": imsm does not support more than 2 disks"
1495 "in a raid1 volume\n");
1496 return 0;
1497 }
c2c087e6
DW
1498 if (info->level == 10)
1499 map->raid_level = 1;
1500 else
1501 map->raid_level = info->level;
ef6ffade 1502
c2c087e6
DW
1503 map->num_members = info->raid_disks;
1504 for (i = 0; i < map->num_members; i++) {
1505 /* initialized in add_to_super */
be73972f 1506 set_imsm_ord_tbl_ent(map, i, 0);
c2c087e6 1507 }
949c47a0
DW
1508 mpb->num_raid_devs++;
1509 super->dev_tbl[super->current_vol] = dev;
c2c087e6
DW
1510
1511 return 1;
cdddbdbc
DW
1512}
1513
bf5a934a
DW
1514static int init_super_imsm(struct supertype *st, mdu_array_info_t *info,
1515 unsigned long long size, char *name,
1516 char *homehost, int *uuid)
1517{
1518 /* This is primarily called by Create when creating a new array.
1519 * We will then get add_to_super called for each component, and then
1520 * write_init_super called to write it out to each device.
1521 * For IMSM, Create can create on fresh devices or on a pre-existing
1522 * array.
1523 * To create on a pre-existing array a different method will be called.
1524 * This one is just for fresh drives.
1525 */
1526 struct intel_super *super;
1527 struct imsm_super *mpb;
1528 size_t mpb_size;
1529
1530 if (!info) {
1531 st->sb = NULL;
1532 return 0;
1533 }
1534 if (st->sb)
1535 return init_super_imsm_volume(st, info, size, name, homehost,
1536 uuid);
1537
1538 super = alloc_super(1);
1539 if (!super)
1540 return 0;
1541 mpb_size = disks_to_mpb_size(info->nr_disks);
ef649044 1542 if (posix_memalign(&super->buf, 512, mpb_size) != 0) {
bf5a934a
DW
1543 free(super);
1544 return 0;
1545 }
ef649044 1546 mpb = super->buf;
bf5a934a
DW
1547 memset(mpb, 0, mpb_size);
1548
1549 memcpy(mpb->sig, MPB_SIGNATURE, strlen(MPB_SIGNATURE));
1550 memcpy(mpb->sig + strlen(MPB_SIGNATURE), MPB_VERSION_RAID5,
1551 strlen(MPB_VERSION_RAID5));
1552 mpb->mpb_size = mpb_size;
1553
bf5a934a
DW
1554 st->sb = super;
1555 return 1;
1556}
1557
1558static void add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk,
1559 int fd, char *devname)
1560{
1561 struct intel_super *super = st->sb;
d23fe947 1562 struct imsm_super *mpb = super->anchor;
bf5a934a
DW
1563 struct dl *dl;
1564 struct imsm_dev *dev;
1565 struct imsm_map *map;
bf5a934a
DW
1566 __u32 status;
1567
949c47a0 1568 dev = get_imsm_dev(super, super->current_vol);
a965f303 1569 map = get_imsm_map(dev, 0);
bf5a934a
DW
1570
1571 for (dl = super->disks; dl ; dl = dl->next)
1572 if (dl->major == dk->major &&
1573 dl->minor == dk->minor)
1574 break;
d23fe947 1575
bf5a934a
DW
1576 if (!dl || ! (dk->state & (1<<MD_DISK_SYNC)))
1577 return;
1578
d23fe947
DW
1579 /* add a pristine spare to the metadata */
1580 if (dl->index < 0) {
1581 dl->index = super->anchor->num_disks;
1582 super->anchor->num_disks++;
1583 }
be73972f 1584 set_imsm_ord_tbl_ent(map, dk->number, dl->index);
bf5a934a 1585 status = CONFIGURED_DISK | USABLE_DISK;
d23fe947
DW
1586 dl->disk.status = __cpu_to_le32(status);
1587
1588 /* if we are creating the first raid device update the family number */
1589 if (super->current_vol == 0) {
1590 __u32 sum;
1591 struct imsm_dev *_dev = __get_imsm_dev(mpb, 0);
1592 struct imsm_disk *_disk = __get_imsm_disk(mpb, dl->index);
1593
1594 *_dev = *dev;
1595 *_disk = dl->disk;
1596 sum = __gen_imsm_checksum(mpb);
1597 mpb->family_num = __cpu_to_le32(sum);
1598 }
bf5a934a
DW
1599}
1600
c2c087e6 1601static void add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk,
cdddbdbc
DW
1602 int fd, char *devname)
1603{
c2c087e6 1604 struct intel_super *super = st->sb;
c2c087e6
DW
1605 struct dl *dd;
1606 unsigned long long size;
1607 __u32 status, id;
1608 int rv;
1609 struct stat stb;
1610
bf5a934a
DW
1611 if (super->current_vol >= 0) {
1612 add_to_super_imsm_volume(st, dk, fd, devname);
1613 return;
1614 }
1615
c2c087e6
DW
1616 fstat(fd, &stb);
1617 dd = malloc(sizeof(*dd));
b9f594fe 1618 if (!dd) {
c2c087e6
DW
1619 fprintf(stderr,
1620 Name ": malloc failed %s:%d.\n", __func__, __LINE__);
1621 abort();
1622 }
1623 memset(dd, 0, sizeof(*dd));
1624 dd->major = major(stb.st_rdev);
1625 dd->minor = minor(stb.st_rdev);
b9f594fe 1626 dd->index = -1;
c2c087e6 1627 dd->devname = devname ? strdup(devname) : NULL;
c2c087e6
DW
1628 dd->fd = fd;
1629 rv = imsm_read_serial(fd, devname, dd->serial);
1630 if (rv) {
1631 fprintf(stderr,
0030e8d6 1632 Name ": failed to retrieve scsi serial, aborting\n");
949c47a0 1633 free(dd);
0030e8d6 1634 abort();
c2c087e6
DW
1635 }
1636
c2c087e6
DW
1637 get_dev_size(fd, NULL, &size);
1638 size /= 512;
1639 status = USABLE_DISK | SPARE_DISK;
1f24f035 1640 serialcpy(dd->disk.serial, dd->serial);
b9f594fe
DW
1641 dd->disk.total_blocks = __cpu_to_le32(size);
1642 dd->disk.status = __cpu_to_le32(status);
c2c087e6 1643 if (sysfs_disk_to_scsi_id(fd, &id) == 0)
b9f594fe 1644 dd->disk.scsi_id = __cpu_to_le32(id);
c2c087e6 1645 else
b9f594fe 1646 dd->disk.scsi_id = __cpu_to_le32(0);
43dad3d6
DW
1647
1648 if (st->update_tail) {
1649 dd->next = super->add;
1650 super->add = dd;
1651 } else {
1652 dd->next = super->disks;
1653 super->disks = dd;
1654 }
cdddbdbc
DW
1655}
1656
c2c087e6
DW
1657static int store_imsm_mpb(int fd, struct intel_super *super);
1658
d23fe947
DW
1659/* spare records have their own family number and do not have any defined raid
1660 * devices
1661 */
1662static int write_super_imsm_spares(struct intel_super *super, int doclose)
1663{
1664 struct imsm_super mpb_save;
1665 struct imsm_super *mpb = super->anchor;
1666 __u32 sum;
1667 struct dl *d;
1668
1669 mpb_save = *mpb;
1670 mpb->num_raid_devs = 0;
1671 mpb->num_disks = 1;
1672 mpb->mpb_size = sizeof(struct imsm_super);
1673 mpb->generation_num = __cpu_to_le32(1UL);
1674
1675 for (d = super->disks; d; d = d->next) {
8796fdc4 1676 if (d->index != -1)
d23fe947
DW
1677 continue;
1678
1679 mpb->disk[0] = d->disk;
1680 sum = __gen_imsm_checksum(mpb);
1681 mpb->family_num = __cpu_to_le32(sum);
1682 sum = __gen_imsm_checksum(mpb);
1683 mpb->check_sum = __cpu_to_le32(sum);
1684
1685 if (store_imsm_mpb(d->fd, super)) {
1686 fprintf(stderr, "%s: failed for device %d:%d %s\n",
1687 __func__, d->major, d->minor, strerror(errno));
1688 *mpb = mpb_save;
e74255d9 1689 return 1;
d23fe947
DW
1690 }
1691 if (doclose) {
1692 close(d->fd);
1693 d->fd = -1;
1694 }
1695 }
1696
1697 *mpb = mpb_save;
e74255d9 1698 return 0;
d23fe947
DW
1699}
1700
c2c087e6 1701static int write_super_imsm(struct intel_super *super, int doclose)
cdddbdbc 1702{
949c47a0 1703 struct imsm_super *mpb = super->anchor;
c2c087e6
DW
1704 struct dl *d;
1705 __u32 generation;
1706 __u32 sum;
d23fe947 1707 int spares = 0;
949c47a0 1708 int i;
a48ac0a8 1709 __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk);
cdddbdbc 1710
c2c087e6
DW
1711 /* 'generation' is incremented everytime the metadata is written */
1712 generation = __le32_to_cpu(mpb->generation_num);
1713 generation++;
1714 mpb->generation_num = __cpu_to_le32(generation);
1715
d23fe947 1716 for (d = super->disks; d; d = d->next) {
8796fdc4 1717 if (d->index == -1)
d23fe947
DW
1718 spares++;
1719 else {
d23fe947 1720 mpb->disk[d->index] = d->disk;
a48ac0a8 1721 mpb_size += sizeof(struct imsm_disk);
d23fe947
DW
1722 }
1723 }
b9f594fe 1724
949c47a0
DW
1725 for (i = 0; i < mpb->num_raid_devs; i++) {
1726 struct imsm_dev *dev = __get_imsm_dev(mpb, i);
1727
1728 imsm_copy_dev(dev, super->dev_tbl[i]);
a48ac0a8 1729 mpb_size += sizeof_imsm_dev(dev, 0);
949c47a0 1730 }
a48ac0a8
DW
1731 mpb_size += __le32_to_cpu(mpb->bbm_log_size);
1732 mpb->mpb_size = __cpu_to_le32(mpb_size);
949c47a0 1733
c2c087e6 1734 /* recalculate checksum */
949c47a0 1735 sum = __gen_imsm_checksum(mpb);
c2c087e6
DW
1736 mpb->check_sum = __cpu_to_le32(sum);
1737
d23fe947 1738 /* write the mpb for disks that compose raid devices */
c2c087e6 1739 for (d = super->disks; d ; d = d->next) {
d23fe947
DW
1740 if (d->index < 0)
1741 continue;
8796fdc4 1742 if (store_imsm_mpb(d->fd, super))
c2c087e6
DW
1743 fprintf(stderr, "%s: failed for device %d:%d %s\n",
1744 __func__, d->major, d->minor, strerror(errno));
c2c087e6
DW
1745 if (doclose) {
1746 close(d->fd);
1747 d->fd = -1;
1748 }
1749 }
1750
d23fe947
DW
1751 if (spares)
1752 return write_super_imsm_spares(super, doclose);
1753
e74255d9 1754 return 0;
c2c087e6
DW
1755}
1756
43dad3d6
DW
1757static int create_array(struct supertype *st)
1758{
1759 size_t len;
1760 struct imsm_update_create_array *u;
1761 struct intel_super *super = st->sb;
1762 struct imsm_dev *dev = get_imsm_dev(super, super->current_vol);
1763
1764 len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0);
1765 u = malloc(len);
1766 if (!u) {
1767 fprintf(stderr, "%s: failed to allocate update buffer\n",
1768 __func__);
1769 return 1;
1770 }
1771
1772 u->type = update_create_array;
1773 u->dev_idx = super->current_vol;
1774 imsm_copy_dev(&u->dev, dev);
1775 append_metadata_update(st, u, len);
1776
1777 return 0;
1778}
1779
1780static int add_disk(struct supertype *st)
1781{
1782 struct intel_super *super = st->sb;
1783 size_t len;
1784 struct imsm_update_add_disk *u;
1785
1786 if (!super->add)
1787 return 0;
1788
1789 len = sizeof(*u);
1790 u = malloc(len);
1791 if (!u) {
1792 fprintf(stderr, "%s: failed to allocate update buffer\n",
1793 __func__);
1794 return 1;
1795 }
1796
1797 u->type = update_add_disk;
1798 append_metadata_update(st, u, len);
1799
1800 return 0;
1801}
1802
c2c087e6
DW
1803static int write_init_super_imsm(struct supertype *st)
1804{
8273f55e 1805 if (st->update_tail) {
43dad3d6
DW
1806 /* queue the recently created array / added disk
1807 * as a metadata update */
8273f55e 1808 struct intel_super *super = st->sb;
8273f55e 1809 struct dl *d;
43dad3d6 1810 int rv;
8273f55e 1811
43dad3d6
DW
1812 /* determine if we are creating a volume or adding a disk */
1813 if (super->current_vol < 0) {
1814 /* in the add disk case we are running in mdmon
1815 * context, so don't close fd's
1816 */
1817 return add_disk(st);
1818 } else
1819 rv = create_array(st);
8273f55e
DW
1820
1821 for (d = super->disks; d ; d = d->next) {
1822 close(d->fd);
1823 d->fd = -1;
1824 }
1825
43dad3d6 1826 return rv;
8273f55e
DW
1827 } else
1828 return write_super_imsm(st->sb, 1);
cdddbdbc
DW
1829}
1830
1831static int store_zero_imsm(struct supertype *st, int fd)
1832{
551c80c1 1833 unsigned long long dsize;
6416d527 1834 void *buf;
551c80c1
DW
1835
1836 get_dev_size(fd, NULL, &dsize);
1837
1838 /* first block is stored on second to last sector of the disk */
1839 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
1840 return 1;
1841
ad97895e
DW
1842 if (posix_memalign(&buf, 512, 512) != 0)
1843 return 1;
1844
eb7ea463
DW
1845 memset(buf, 0, 512);
1846 if (write(fd, buf, 512) != 512)
551c80c1 1847 return 1;
cdddbdbc
DW
1848 return 0;
1849}
1850
cdddbdbc
DW
1851static int validate_geometry_imsm_container(struct supertype *st, int level,
1852 int layout, int raiddisks, int chunk,
c2c087e6 1853 unsigned long long size, char *dev,
2c514b71
NB
1854 unsigned long long *freesize,
1855 int verbose)
cdddbdbc 1856{
c2c087e6
DW
1857 int fd;
1858 unsigned long long ldsize;
cdddbdbc 1859
c2c087e6
DW
1860 if (level != LEVEL_CONTAINER)
1861 return 0;
1862 if (!dev)
1863 return 1;
1864
1865 fd = open(dev, O_RDONLY|O_EXCL, 0);
1866 if (fd < 0) {
2c514b71
NB
1867 if (verbose)
1868 fprintf(stderr, Name ": imsm: Cannot open %s: %s\n",
1869 dev, strerror(errno));
c2c087e6
DW
1870 return 0;
1871 }
1872 if (!get_dev_size(fd, dev, &ldsize)) {
1873 close(fd);
1874 return 0;
1875 }
1876 close(fd);
1877
1878 *freesize = avail_size_imsm(st, ldsize >> 9);
1879
1880 return 1;
cdddbdbc
DW
1881}
1882
c2c087e6
DW
1883/* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd
1884 * FIX ME add ahci details
1885 */
8b353278
DW
1886static int validate_geometry_imsm_volume(struct supertype *st, int level,
1887 int layout, int raiddisks, int chunk,
c2c087e6 1888 unsigned long long size, char *dev,
2c514b71
NB
1889 unsigned long long *freesize,
1890 int verbose)
cdddbdbc 1891{
c2c087e6
DW
1892 struct stat stb;
1893 struct intel_super *super = st->sb;
1894 struct dl *dl;
1895 unsigned long long pos = 0;
1896 unsigned long long maxsize;
1897 struct extent *e;
1898 int i;
cdddbdbc 1899
c2c087e6
DW
1900 if (level == LEVEL_CONTAINER)
1901 return 0;
1902
1903 if (level == 1 && raiddisks > 2) {
2c514b71
NB
1904 if (verbose)
1905 fprintf(stderr, Name ": imsm does not support more "
1906 "than 2 in a raid1 configuration\n");
c2c087e6
DW
1907 return 0;
1908 }
1909
1910 /* We must have the container info already read in. */
1911 if (!super)
1912 return 0;
1913
1914 if (!dev) {
1915 /* General test: make sure there is space for
2da8544a
DW
1916 * 'raiddisks' device extents of size 'size' at a given
1917 * offset
c2c087e6
DW
1918 */
1919 unsigned long long minsize = size*2 /* convert to blocks */;
2da8544a 1920 unsigned long long start_offset = ~0ULL;
c2c087e6
DW
1921 int dcnt = 0;
1922 if (minsize == 0)
1923 minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS;
1924 for (dl = super->disks; dl ; dl = dl->next) {
1925 int found = 0;
1926
bf5a934a 1927 pos = 0;
c2c087e6
DW
1928 i = 0;
1929 e = get_extents(super, dl);
1930 if (!e) continue;
1931 do {
1932 unsigned long long esize;
1933 esize = e[i].start - pos;
1934 if (esize >= minsize)
1935 found = 1;
2da8544a
DW
1936 if (found && start_offset == ~0ULL) {
1937 start_offset = pos;
1938 break;
1939 } else if (found && pos != start_offset) {
1940 found = 0;
1941 break;
1942 }
c2c087e6
DW
1943 pos = e[i].start + e[i].size;
1944 i++;
1945 } while (e[i-1].size);
1946 if (found)
1947 dcnt++;
1948 free(e);
1949 }
1950 if (dcnt < raiddisks) {
2c514b71
NB
1951 if (verbose)
1952 fprintf(stderr, Name ": imsm: Not enough "
1953 "devices with space for this array "
1954 "(%d < %d)\n",
1955 dcnt, raiddisks);
c2c087e6
DW
1956 return 0;
1957 }
1958 return 1;
1959 }
1960 /* This device must be a member of the set */
1961 if (stat(dev, &stb) < 0)
1962 return 0;
1963 if ((S_IFMT & stb.st_mode) != S_IFBLK)
1964 return 0;
1965 for (dl = super->disks ; dl ; dl = dl->next) {
1966 if (dl->major == major(stb.st_rdev) &&
1967 dl->minor == minor(stb.st_rdev))
1968 break;
1969 }
1970 if (!dl) {
2c514b71
NB
1971 if (verbose)
1972 fprintf(stderr, Name ": %s is not in the "
1973 "same imsm set\n", dev);
c2c087e6
DW
1974 return 0;
1975 }
1976 e = get_extents(super, dl);
1977 maxsize = 0;
1978 i = 0;
1979 if (e) do {
1980 unsigned long long esize;
1981 esize = e[i].start - pos;
1982 if (esize >= maxsize)
1983 maxsize = esize;
1984 pos = e[i].start + e[i].size;
1985 i++;
1986 } while (e[i-1].size);
1987 *freesize = maxsize;
1988
1989 return 1;
cdddbdbc
DW
1990}
1991
604b746f
JD
1992int imsm_bbm_log_size(struct imsm_super *mpb)
1993{
1994 return __le32_to_cpu(mpb->bbm_log_size);
1995}
1996
bf5a934a
DW
1997static int validate_geometry_imsm(struct supertype *st, int level, int layout,
1998 int raiddisks, int chunk, unsigned long long size,
1999 char *dev, unsigned long long *freesize,
2000 int verbose)
2001{
2002 int fd, cfd;
2003 struct mdinfo *sra;
2004
2005 /* if given unused devices create a container
2006 * if given given devices in a container create a member volume
2007 */
2008 if (level == LEVEL_CONTAINER) {
2009 /* Must be a fresh device to add to a container */
2010 return validate_geometry_imsm_container(st, level, layout,
2011 raiddisks, chunk, size,
2012 dev, freesize,
2013 verbose);
2014 }
2015
2016 if (st->sb) {
2017 /* creating in a given container */
2018 return validate_geometry_imsm_volume(st, level, layout,
2019 raiddisks, chunk, size,
2020 dev, freesize, verbose);
2021 }
2022
2023 /* limit creation to the following levels */
2024 if (!dev)
2025 switch (level) {
2026 case 0:
2027 case 1:
2028 case 10:
2029 case 5:
2030 break;
2031 default:
2032 return 1;
2033 }
2034
2035 /* This device needs to be a device in an 'imsm' container */
2036 fd = open(dev, O_RDONLY|O_EXCL, 0);
2037 if (fd >= 0) {
2038 if (verbose)
2039 fprintf(stderr,
2040 Name ": Cannot create this array on device %s\n",
2041 dev);
2042 close(fd);
2043 return 0;
2044 }
2045 if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) {
2046 if (verbose)
2047 fprintf(stderr, Name ": Cannot open %s: %s\n",
2048 dev, strerror(errno));
2049 return 0;
2050 }
2051 /* Well, it is in use by someone, maybe an 'imsm' container. */
2052 cfd = open_container(fd);
2053 if (cfd < 0) {
2054 close(fd);
2055 if (verbose)
2056 fprintf(stderr, Name ": Cannot use %s: It is busy\n",
2057 dev);
2058 return 0;
2059 }
2060 sra = sysfs_read(cfd, 0, GET_VERSION);
2061 close(fd);
2062 if (sra && sra->array.major_version == -1 &&
2063 strcmp(sra->text_version, "imsm") == 0) {
2064 /* This is a member of a imsm container. Load the container
2065 * and try to create a volume
2066 */
2067 struct intel_super *super;
2068
2069 if (load_super_imsm_all(st, cfd, (void **) &super, NULL, 1) == 0) {
2070 st->sb = super;
2071 st->container_dev = fd2devnum(cfd);
2072 close(cfd);
2073 return validate_geometry_imsm_volume(st, level, layout,
2074 raiddisks, chunk,
2075 size, dev,
2076 freesize, verbose);
2077 }
2078 close(cfd);
2079 } else /* may belong to another container */
2080 return 0;
2081
2082 return 1;
2083}
2084
cdddbdbc
DW
2085static struct mdinfo *container_content_imsm(struct supertype *st)
2086{
4f5bc454
DW
2087 /* Given a container loaded by load_super_imsm_all,
2088 * extract information about all the arrays into
2089 * an mdinfo tree.
2090 *
2091 * For each imsm_dev create an mdinfo, fill it in,
2092 * then look for matching devices in super->disks
2093 * and create appropriate device mdinfo.
2094 */
2095 struct intel_super *super = st->sb;
949c47a0 2096 struct imsm_super *mpb = super->anchor;
4f5bc454
DW
2097 struct mdinfo *rest = NULL;
2098 int i;
cdddbdbc 2099
604b746f
JD
2100 /* do not assemble arrays that might have bad blocks */
2101 if (imsm_bbm_log_size(super->anchor)) {
2102 fprintf(stderr, Name ": BBM log found in metadata. "
2103 "Cannot activate array(s).\n");
2104 return NULL;
2105 }
2106
4f5bc454 2107 for (i = 0; i < mpb->num_raid_devs; i++) {
949c47a0 2108 struct imsm_dev *dev = get_imsm_dev(super, i);
4f5bc454 2109 struct imsm_vol *vol = &dev->vol;
a965f303 2110 struct imsm_map *map = get_imsm_map(dev, 0);
4f5bc454 2111 struct mdinfo *this;
4f5bc454
DW
2112 int slot;
2113
2114 this = malloc(sizeof(*this));
2115 memset(this, 0, sizeof(*this));
2116 this->next = rest;
4f5bc454 2117
4f5bc454
DW
2118 this->array.level = get_imsm_raid_level(map);
2119 this->array.raid_disks = map->num_members;
c2c087e6 2120 this->array.layout = imsm_level_to_layout(this->array.level);
4f5bc454
DW
2121 this->array.md_minor = -1;
2122 this->array.ctime = 0;
2123 this->array.utime = 0;
2124 this->array.chunk_size = __le16_to_cpu(map->blocks_per_strip) << 9;
2125 this->array.state = !vol->dirty;
2126 this->container_member = i;
9a1608e5
DW
2127 if (map->map_state == IMSM_T_STATE_UNINITIALIZED ||
2128 dev->vol.dirty || dev->vol.migr_state)
0fd5c350
DW
2129 this->resync_start = 0;
2130 else
2131 this->resync_start = ~0ULL;
2132
4f5bc454
DW
2133 strncpy(this->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN);
2134 this->name[MAX_RAID_SERIAL_LEN] = 0;
2135
159c3a1a
NB
2136 sprintf(this->text_version, "/%s/%d",
2137 devnum2devname(st->container_dev),
2138 this->container_member);
2139
4f5bc454
DW
2140 memset(this->uuid, 0, sizeof(this->uuid));
2141
f54e6321 2142 this->component_size = __le32_to_cpu(map->blocks_per_member);
4f5bc454
DW
2143
2144 for (slot = 0 ; slot < map->num_members; slot++) {
4f5bc454
DW
2145 struct mdinfo *info_d;
2146 struct dl *d;
2147 int idx;
9a1608e5 2148 int skip;
4f5bc454 2149 __u32 s;
7eef0453 2150 __u32 ord;
4f5bc454 2151
9a1608e5 2152 skip = 0;
ff077194 2153 idx = get_imsm_disk_idx(dev, slot);
7eef0453 2154 ord = get_imsm_ord_tbl_ent(dev, slot);
4f5bc454
DW
2155 for (d = super->disks; d ; d = d->next)
2156 if (d->index == idx)
2157 break;
2158
2159 if (d == NULL)
9a1608e5
DW
2160 skip = 1;
2161
2162 s = d ? __le32_to_cpu(d->disk.status) : 0;
2163 if (s & FAILED_DISK)
2164 skip = 1;
2165 if (!(s & USABLE_DISK))
2166 skip = 1;
7eef0453
DW
2167 if (ord & IMSM_ORD_REBUILD)
2168 skip = 1;
9a1608e5
DW
2169
2170 /*
2171 * if we skip some disks the array will be assmebled degraded;
2172 * reset resync start to avoid a dirty-degraded situation
2173 *
2174 * FIXME handle dirty degraded
2175 */
2176 if (skip && !dev->vol.dirty)
2177 this->resync_start = ~0ULL;
2178 if (skip)
2179 continue;
4f5bc454
DW
2180
2181 info_d = malloc(sizeof(*info_d));
9a1608e5
DW
2182 if (!info_d) {
2183 fprintf(stderr, Name ": failed to allocate disk"
2184 " for volume %s\n", (char *) dev->volume);
2185 free(this);
2186 this = rest;
2187 break;
2188 }
4f5bc454
DW
2189 memset(info_d, 0, sizeof(*info_d));
2190 info_d->next = this->devs;
2191 this->devs = info_d;
2192
4f5bc454
DW
2193 info_d->disk.number = d->index;
2194 info_d->disk.major = d->major;
2195 info_d->disk.minor = d->minor;
2196 info_d->disk.raid_disk = slot;
4f5bc454
DW
2197
2198 this->array.working_disks++;
2199
2200 info_d->events = __le32_to_cpu(mpb->generation_num);
2201 info_d->data_offset = __le32_to_cpu(map->pba_of_lba0);
2202 info_d->component_size = __le32_to_cpu(map->blocks_per_member);
2203 if (d->devname)
2204 strcpy(info_d->name, d->devname);
2205 }
9a1608e5 2206 rest = this;
4f5bc454
DW
2207 }
2208
2209 return rest;
cdddbdbc
DW
2210}
2211
845dea95 2212
cba0191b
NB
2213static int imsm_open_new(struct supertype *c, struct active_array *a,
2214 char *inst)
845dea95 2215{
0372d5a2 2216 struct intel_super *super = c->sb;
949c47a0 2217 struct imsm_super *mpb = super->anchor;
0372d5a2 2218
949c47a0 2219 if (atoi(inst) >= mpb->num_raid_devs) {
0372d5a2
DW
2220 fprintf(stderr, "%s: subarry index %d, out of range\n",
2221 __func__, atoi(inst));
2222 return -ENODEV;
2223 }
2224
4e6e574a 2225 dprintf("imsm: open_new %s\n", inst);
cba0191b 2226 a->info.container_member = atoi(inst);
845dea95
NB
2227 return 0;
2228}
2229
fb49eef2 2230static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev, int failed)
c2a1e7da 2231{
a965f303 2232 struct imsm_map *map = get_imsm_map(dev, 0);
c2a1e7da
DW
2233
2234 if (!failed)
3393c6af
DW
2235 return map->map_state == IMSM_T_STATE_UNINITIALIZED ?
2236 IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL;
c2a1e7da
DW
2237
2238 switch (get_imsm_raid_level(map)) {
2239 case 0:
2240 return IMSM_T_STATE_FAILED;
2241 break;
2242 case 1:
2243 if (failed < map->num_members)
2244 return IMSM_T_STATE_DEGRADED;
2245 else
2246 return IMSM_T_STATE_FAILED;
2247 break;
2248 case 10:
2249 {
2250 /**
2251 * check to see if any mirrors have failed,
2252 * otherwise we are degraded
2253 */
2254 int device_per_mirror = 2; /* FIXME is this always the case?
2255 * and are they always adjacent?
2256 */
8796fdc4 2257 int r10fail = 0;
c2a1e7da
DW
2258 int i;
2259
2260 for (i = 0; i < map->num_members; i++) {
ff077194 2261 int idx = get_imsm_disk_idx(dev, i);
949c47a0 2262 struct imsm_disk *disk = get_imsm_disk(super, idx);
c2a1e7da 2263
8796fdc4
DW
2264 if (!disk)
2265 r10fail++;
2266 else if (__le32_to_cpu(disk->status) & FAILED_DISK)
2267 r10fail++;
c2a1e7da 2268
8796fdc4 2269 if (r10fail >= device_per_mirror)
c2a1e7da
DW
2270 return IMSM_T_STATE_FAILED;
2271
8796fdc4 2272 /* reset 'r10fail' for next mirror set */
c2a1e7da 2273 if (!((i + 1) % device_per_mirror))
8796fdc4 2274 r10fail = 0;
c2a1e7da
DW
2275 }
2276
2277 return IMSM_T_STATE_DEGRADED;
2278 }
2279 case 5:
2280 if (failed < 2)
2281 return IMSM_T_STATE_DEGRADED;
2282 else
2283 return IMSM_T_STATE_FAILED;
2284 break;
2285 default:
2286 break;
2287 }
2288
2289 return map->map_state;
2290}
2291
ff077194 2292static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev)
c2a1e7da
DW
2293{
2294 int i;
2295 int failed = 0;
2296 struct imsm_disk *disk;
ff077194 2297 struct imsm_map *map = get_imsm_map(dev, 0);
c2a1e7da
DW
2298
2299 for (i = 0; i < map->num_members; i++) {
b10b37b8
DW
2300 __u32 ord = get_imsm_ord_tbl_ent(dev, i);
2301 int idx = ord_to_idx(ord);
c2a1e7da 2302
949c47a0 2303 disk = get_imsm_disk(super, idx);
b10b37b8
DW
2304 if (!disk ||
2305 __le32_to_cpu(disk->status) & FAILED_DISK ||
2306 ord & IMSM_ORD_REBUILD)
fcb84475 2307 failed++;
c2a1e7da
DW
2308 }
2309
2310 return failed;
845dea95
NB
2311}
2312
0c046afd
DW
2313static int is_resyncing(struct imsm_dev *dev)
2314{
2315 struct imsm_map *migr_map;
2316
2317 if (!dev->vol.migr_state)
2318 return 0;
2319
2320 if (dev->vol.migr_type == 0)
2321 return 1;
2322
2323 migr_map = get_imsm_map(dev, 1);
2324
2325 if (migr_map->map_state == IMSM_T_STATE_NORMAL)
2326 return 1;
2327 else
2328 return 0;
2329}
2330
2331static int is_rebuilding(struct imsm_dev *dev)
2332{
2333 struct imsm_map *migr_map;
2334
2335 if (!dev->vol.migr_state)
2336 return 0;
2337
2338 if (dev->vol.migr_type == 0)
2339 return 0;
2340
2341 migr_map = get_imsm_map(dev, 1);
2342
2343 if (migr_map->map_state == IMSM_T_STATE_DEGRADED)
2344 return 1;
2345 else
2346 return 0;
2347}
2348
2349/* Handle dirty -> clean transititions and resync. Degraded and rebuild
2350 * states are handled in imsm_set_disk() with one exception, when a
2351 * resync is stopped due to a new failure this routine will set the
2352 * 'degraded' state for the array.
2353 */
01f157d7 2354static int imsm_set_array_state(struct active_array *a, int consistent)
a862209d
DW
2355{
2356 int inst = a->info.container_member;
2357 struct intel_super *super = a->container->sb;
949c47a0 2358 struct imsm_dev *dev = get_imsm_dev(super, inst);
a965f303 2359 struct imsm_map *map = get_imsm_map(dev, 0);
0c046afd
DW
2360 int failed = imsm_count_failed(super, dev);
2361 __u8 map_state = imsm_check_degraded(super, dev, failed);
a862209d 2362
0c046afd
DW
2363 if (consistent == 2 &&
2364 (a->resync_start != ~0ULL ||
2365 map_state != IMSM_T_STATE_NORMAL ||
2366 dev->vol.migr_state))
01f157d7 2367 consistent = 0;
272906ef 2368
a862209d 2369 if (a->resync_start == ~0ULL) {
0c046afd
DW
2370 /* complete intialization / resync,
2371 * recovery is completed in ->set_disk
2372 */
2373 if (is_resyncing(dev)) {
2374 dprintf("imsm: mark resync done\n");
3393c6af 2375 dev->vol.migr_state = 0;
0c046afd 2376 map->map_state = map_state;
115c3803 2377 super->updates_pending++;
115c3803 2378 }
0c046afd
DW
2379 } else if (!is_resyncing(dev) && !failed) {
2380 /* mark the start of the init process if nothing is failed */
2381 dprintf("imsm: mark resync start (%llu)\n", a->resync_start);
2382 map->map_state = map_state;
2383 migrate(dev, IMSM_T_STATE_NORMAL,
2384 map->map_state == IMSM_T_STATE_NORMAL);
3393c6af 2385 super->updates_pending++;
115c3803 2386 }
a862209d 2387
3393c6af 2388 /* mark dirty / clean */
0c046afd 2389 if (dev->vol.dirty != !consistent) {
3393c6af 2390 dprintf("imsm: mark '%s' (%llu)\n",
0c046afd
DW
2391 consistent ? "clean" : "dirty", a->resync_start);
2392 if (consistent)
2393 dev->vol.dirty = 0;
2394 else
2395 dev->vol.dirty = 1;
a862209d
DW
2396 super->updates_pending++;
2397 }
01f157d7 2398 return consistent;
a862209d
DW
2399}
2400
8d45d196 2401static void imsm_set_disk(struct active_array *a, int n, int state)
845dea95 2402{
8d45d196
DW
2403 int inst = a->info.container_member;
2404 struct intel_super *super = a->container->sb;
949c47a0 2405 struct imsm_dev *dev = get_imsm_dev(super, inst);
a965f303 2406 struct imsm_map *map = get_imsm_map(dev, 0);
8d45d196 2407 struct imsm_disk *disk;
0c046afd 2408 int failed;
8d45d196 2409 __u32 status;
b10b37b8 2410 __u32 ord;
0c046afd 2411 __u8 map_state;
8d45d196
DW
2412
2413 if (n > map->num_members)
2414 fprintf(stderr, "imsm: set_disk %d out of range 0..%d\n",
2415 n, map->num_members - 1);
2416
2417 if (n < 0)
2418 return;
2419
4e6e574a 2420 dprintf("imsm: set_disk %d:%x\n", n, state);
8d45d196 2421
b10b37b8
DW
2422 ord = get_imsm_ord_tbl_ent(dev, n);
2423 disk = get_imsm_disk(super, ord_to_idx(ord));
8d45d196 2424
5802a811 2425 /* check for new failures */
8d45d196
DW
2426 status = __le32_to_cpu(disk->status);
2427 if ((state & DS_FAULTY) && !(status & FAILED_DISK)) {
2428 status |= FAILED_DISK;
2429 disk->status = __cpu_to_le32(status);
8796fdc4
DW
2430 disk->scsi_id = __cpu_to_le32(~0UL);
2431 memmove(&disk->serial[0], &disk->serial[1], MAX_RAID_SERIAL_LEN - 1);
5802a811 2432 super->updates_pending++;
8d45d196 2433 }
19859edc 2434 /* check if in_sync */
b10b37b8
DW
2435 if (state & DS_INSYNC && ord & IMSM_ORD_REBUILD) {
2436 struct imsm_map *migr_map = get_imsm_map(dev, 1);
2437
2438 set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord));
19859edc
DW
2439 super->updates_pending++;
2440 }
8d45d196 2441
0c046afd
DW
2442 failed = imsm_count_failed(super, dev);
2443 map_state = imsm_check_degraded(super, dev, failed);
5802a811 2444
0c046afd
DW
2445 /* check if recovery complete, newly degraded, or failed */
2446 if (map_state == IMSM_T_STATE_NORMAL && is_rebuilding(dev)) {
2447 map->map_state = map_state;
2448 dev->vol.migr_state = 0;
2449 super->updates_pending++;
2450 } else if (map_state == IMSM_T_STATE_DEGRADED &&
2451 map->map_state != map_state &&
2452 !dev->vol.migr_state) {
2453 dprintf("imsm: mark degraded\n");
2454 map->map_state = map_state;
2455 super->updates_pending++;
2456 } else if (map_state == IMSM_T_STATE_FAILED &&
2457 map->map_state != map_state) {
2458 dprintf("imsm: mark failed\n");
2459 dev->vol.migr_state = 0;
2460 map->map_state = map_state;
2461 super->updates_pending++;
5802a811 2462 }
845dea95
NB
2463}
2464
c2a1e7da
DW
2465static int store_imsm_mpb(int fd, struct intel_super *super)
2466{
949c47a0 2467 struct imsm_super *mpb = super->anchor;
c2a1e7da
DW
2468 __u32 mpb_size = __le32_to_cpu(mpb->mpb_size);
2469 unsigned long long dsize;
2470 unsigned long long sectors;
2471
2472 get_dev_size(fd, NULL, &dsize);
2473
272f648f
DW
2474 if (mpb_size > 512) {
2475 /* -1 to account for anchor */
2476 sectors = mpb_sectors(mpb) - 1;
c2a1e7da 2477
272f648f
DW
2478 /* write the extended mpb to the sectors preceeding the anchor */
2479 if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0)
2480 return 1;
c2a1e7da 2481
99e29264 2482 if (write(fd, super->buf + 512, 512 * sectors) != 512 * sectors)
272f648f
DW
2483 return 1;
2484 }
c2a1e7da 2485
272f648f
DW
2486 /* first block is stored on second to last sector of the disk */
2487 if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0)
c2a1e7da
DW
2488 return 1;
2489
272f648f 2490 if (write(fd, super->buf, 512) != 512)
c2a1e7da
DW
2491 return 1;
2492
c2a1e7da
DW
2493 return 0;
2494}
2495
2e735d19 2496static void imsm_sync_metadata(struct supertype *container)
845dea95 2497{
2e735d19 2498 struct intel_super *super = container->sb;
c2a1e7da
DW
2499
2500 if (!super->updates_pending)
2501 return;
2502
c2c087e6 2503 write_super_imsm(super, 0);
c2a1e7da
DW
2504
2505 super->updates_pending = 0;
845dea95
NB
2506}
2507
272906ef
DW
2508static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a)
2509{
2510 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
ff077194 2511 int i = get_imsm_disk_idx(dev, idx);
272906ef
DW
2512 struct dl *dl;
2513
2514 for (dl = super->disks; dl; dl = dl->next)
2515 if (dl->index == i)
2516 break;
2517
8796fdc4 2518 if (dl && __le32_to_cpu(dl->disk.status) & FAILED_DISK)
272906ef
DW
2519 dl = NULL;
2520
2521 if (dl)
2522 dprintf("%s: found %x:%x\n", __func__, dl->major, dl->minor);
2523
2524 return dl;
2525}
2526
2527static struct dl *imsm_add_spare(struct intel_super *super, int idx, struct active_array *a)
2528{
2529 struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member);
2530 struct imsm_map *map = get_imsm_map(dev, 0);
2531 unsigned long long esize;
2532 unsigned long long pos;
2533 struct mdinfo *d;
2534 struct extent *ex;
2535 int j;
2536 int found;
2537 __u32 array_start;
9a1608e5 2538 __u32 status;
272906ef
DW
2539 struct dl *dl;
2540
2541 for (dl = super->disks; dl; dl = dl->next) {
2542 /* If in this array, skip */
2543 for (d = a->info.devs ; d ; d = d->next)
2544 if (d->disk.major == dl->major &&
2545 d->disk.minor == dl->minor) {
2546 dprintf("%x:%x already in array\n", dl->major, dl->minor);
2547 break;
2548 }
2549 if (d)
2550 continue;
2551
9a1608e5
DW
2552 /* skip marked in use or failed drives */
2553 status = __le32_to_cpu(dl->disk.status);
2554 if (status & FAILED_DISK || status & CONFIGURED_DISK) {
2555 dprintf("%x:%x status ( %s%s)\n",
2556 dl->major, dl->minor,
2557 status & FAILED_DISK ? "failed " : "",
2558 status & CONFIGURED_DISK ? "configured " : "");
2559 continue;
2560 }
2561
272906ef
DW
2562 /* Does this unused device have the requisite free space?
2563 * We need a->info.component_size sectors
2564 */
2565 ex = get_extents(super, dl);
2566 if (!ex) {
2567 dprintf("cannot get extents\n");
2568 continue;
2569 }
2570 found = 0;
2571 j = 0;
2572 pos = 0;
2573 array_start = __le32_to_cpu(map->pba_of_lba0);
2574
2575 do {
2576 /* check that we can start at pba_of_lba0 with
2577 * a->info.component_size of space
2578 */
2579 esize = ex[j].start - pos;
2580 if (array_start >= pos &&
2581 array_start + a->info.component_size < ex[j].start) {
2582 found = 1;
2583 break;
2584 }
2585 pos = ex[j].start + ex[j].size;
2586 j++;
2587
2588 } while (ex[j-1].size);
2589
2590 free(ex);
2591 if (!found) {
2592 dprintf("%x:%x does not have %llu at %d\n",
2593 dl->major, dl->minor,
2594 a->info.component_size,
2595 __le32_to_cpu(map->pba_of_lba0));
2596 /* No room */
2597 continue;
2598 } else
2599 break;
2600 }
2601
2602 return dl;
2603}
2604
88758e9d
DW
2605static struct mdinfo *imsm_activate_spare(struct active_array *a,
2606 struct metadata_update **updates)
2607{
2608 /**
d23fe947
DW
2609 * Find a device with unused free space and use it to replace a
2610 * failed/vacant region in an array. We replace failed regions one a
2611 * array at a time. The result is that a new spare disk will be added
2612 * to the first failed array and after the monitor has finished
2613 * propagating failures the remainder will be consumed.
88758e9d 2614 *
d23fe947
DW
2615 * FIXME add a capability for mdmon to request spares from another
2616 * container.
88758e9d
DW
2617 */
2618
2619 struct intel_super *super = a->container->sb;
88758e9d 2620 int inst = a->info.container_member;
949c47a0 2621 struct imsm_dev *dev = get_imsm_dev(super, inst);
a965f303 2622 struct imsm_map *map = get_imsm_map(dev, 0);
88758e9d
DW
2623 int failed = a->info.array.raid_disks;
2624 struct mdinfo *rv = NULL;
2625 struct mdinfo *d;
2626 struct mdinfo *di;
2627 struct metadata_update *mu;
2628 struct dl *dl;
2629 struct imsm_update_activate_spare *u;
2630 int num_spares = 0;
2631 int i;
2632
2633 for (d = a->info.devs ; d ; d = d->next) {
2634 if ((d->curr_state & DS_FAULTY) &&
2635 d->state_fd >= 0)
2636 /* wait for Removal to happen */
2637 return NULL;
2638 if (d->state_fd >= 0)
2639 failed--;
2640 }
2641
2642 dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n",
2643 inst, failed, a->info.array.raid_disks, a->info.array.level);
fb49eef2 2644 if (imsm_check_degraded(super, dev, failed) != IMSM_T_STATE_DEGRADED)
88758e9d
DW
2645 return NULL;
2646
2647 /* For each slot, if it is not working, find a spare */
88758e9d
DW
2648 for (i = 0; i < a->info.array.raid_disks; i++) {
2649 for (d = a->info.devs ; d ; d = d->next)
2650 if (d->disk.raid_disk == i)
2651 break;
2652 dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0);
2653 if (d && (d->state_fd >= 0))
2654 continue;
2655
272906ef
DW
2656 /*
2657 * OK, this device needs recovery. Try to re-add the previous
2658 * occupant of this slot, if this fails add a new spare
2659 */
2660 dl = imsm_readd(super, i, a);
2661 if (!dl)
2662 dl = imsm_add_spare(super, i, a);
2663 if (!dl)
2664 continue;
2665
2666 /* found a usable disk with enough space */
2667 di = malloc(sizeof(*di));
2668 memset(di, 0, sizeof(*di));
2669
2670 /* dl->index will be -1 in the case we are activating a
2671 * pristine spare. imsm_process_update() will create a
2672 * new index in this case. Once a disk is found to be
2673 * failed in all member arrays it is kicked from the
2674 * metadata
2675 */
2676 di->disk.number = dl->index;
d23fe947 2677
272906ef
DW
2678 /* (ab)use di->devs to store a pointer to the device
2679 * we chose
2680 */
2681 di->devs = (struct mdinfo *) dl;
2682
2683 di->disk.raid_disk = i;
2684 di->disk.major = dl->major;
2685 di->disk.minor = dl->minor;
2686 di->disk.state = 0;
2687 di->data_offset = __le32_to_cpu(map->pba_of_lba0);
2688 di->component_size = a->info.component_size;
2689 di->container_member = inst;
2690 di->next = rv;
2691 rv = di;
2692 num_spares++;
2693 dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor,
2694 i, di->data_offset);
88758e9d 2695
272906ef 2696 break;
88758e9d
DW
2697 }
2698
2699 if (!rv)
2700 /* No spares found */
2701 return rv;
2702 /* Now 'rv' has a list of devices to return.
2703 * Create a metadata_update record to update the
2704 * disk_ord_tbl for the array
2705 */
2706 mu = malloc(sizeof(*mu));
2707 mu->buf = malloc(sizeof(struct imsm_update_activate_spare) * num_spares);
2708 mu->space = NULL;
2709 mu->len = sizeof(struct imsm_update_activate_spare) * num_spares;
2710 mu->next = *updates;
2711 u = (struct imsm_update_activate_spare *) mu->buf;
2712
2713 for (di = rv ; di ; di = di->next) {
2714 u->type = update_activate_spare;
d23fe947
DW
2715 u->dl = (struct dl *) di->devs;
2716 di->devs = NULL;
88758e9d
DW
2717 u->slot = di->disk.raid_disk;
2718 u->array = inst;
2719 u->next = u + 1;
2720 u++;
2721 }
2722 (u-1)->next = NULL;
2723 *updates = mu;
2724
2725 return rv;
2726}
2727
ff077194 2728static int disks_overlap(struct imsm_dev *d1, struct imsm_dev *d2)
8273f55e 2729{
ff077194
DW
2730 struct imsm_map *m1 = get_imsm_map(d1, 0);
2731 struct imsm_map *m2 = get_imsm_map(d2, 0);
8273f55e
DW
2732 int i;
2733 int j;
2734 int idx;
2735
2736 for (i = 0; i < m1->num_members; i++) {
ff077194 2737 idx = get_imsm_disk_idx(d1, i);
8273f55e 2738 for (j = 0; j < m2->num_members; j++)
ff077194 2739 if (idx == get_imsm_disk_idx(d2, j))
8273f55e
DW
2740 return 1;
2741 }
2742
2743 return 0;
2744}
2745
24565c9a 2746static void imsm_delete(struct intel_super *super, struct dl **dlp, int index);
ae6aad82 2747
e8319a19
DW
2748static void imsm_process_update(struct supertype *st,
2749 struct metadata_update *update)
2750{
2751 /**
2752 * crack open the metadata_update envelope to find the update record
2753 * update can be one of:
2754 * update_activate_spare - a spare device has replaced a failed
2755 * device in an array, update the disk_ord_tbl. If this disk is
2756 * present in all member arrays then also clear the SPARE_DISK
2757 * flag
2758 */
2759 struct intel_super *super = st->sb;
4d7b1503 2760 struct imsm_super *mpb;
e8319a19
DW
2761 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
2762
4d7b1503
DW
2763 /* update requires a larger buf but the allocation failed */
2764 if (super->next_len && !super->next_buf) {
2765 super->next_len = 0;
2766 return;
2767 }
2768
2769 if (super->next_buf) {
2770 memcpy(super->next_buf, super->buf, super->len);
2771 free(super->buf);
2772 super->len = super->next_len;
2773 super->buf = super->next_buf;
2774
2775 super->next_len = 0;
2776 super->next_buf = NULL;
2777 }
2778
2779 mpb = super->anchor;
2780
e8319a19
DW
2781 switch (type) {
2782 case update_activate_spare: {
2783 struct imsm_update_activate_spare *u = (void *) update->buf;
949c47a0 2784 struct imsm_dev *dev = get_imsm_dev(super, u->array);
a965f303 2785 struct imsm_map *map = get_imsm_map(dev, 0);
0c046afd 2786 struct imsm_map *migr_map;
e8319a19
DW
2787 struct active_array *a;
2788 struct imsm_disk *disk;
2789 __u32 status;
0c046afd 2790 __u8 to_state;
e8319a19 2791 struct dl *dl;
e8319a19 2792 unsigned int found;
0c046afd
DW
2793 int failed;
2794 int victim = get_imsm_disk_idx(dev, u->slot);
e8319a19
DW
2795 int i;
2796
2797 for (dl = super->disks; dl; dl = dl->next)
d23fe947 2798 if (dl == u->dl)
e8319a19
DW
2799 break;
2800
2801 if (!dl) {
2802 fprintf(stderr, "error: imsm_activate_spare passed "
1f24f035
DW
2803 "an unknown disk (index: %d)\n",
2804 u->dl->index);
e8319a19
DW
2805 return;
2806 }
2807
2808 super->updates_pending++;
2809
0c046afd
DW
2810 /* count failures (excluding rebuilds and the victim)
2811 * to determine map[0] state
2812 */
2813 failed = 0;
2814 for (i = 0; i < map->num_members; i++) {
2815 if (i == u->slot)
2816 continue;
2817 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i));
2818 if (!disk ||
2819 __le32_to_cpu(disk->status) & FAILED_DISK)
2820 failed++;
2821 }
2822
d23fe947
DW
2823 /* adding a pristine spare, assign a new index */
2824 if (dl->index < 0) {
2825 dl->index = super->anchor->num_disks;
2826 super->anchor->num_disks++;
2827 }
d23fe947 2828 disk = &dl->disk;
e8319a19
DW
2829 status = __le32_to_cpu(disk->status);
2830 status |= CONFIGURED_DISK;
b10b37b8 2831 status &= ~SPARE_DISK;
e8319a19
DW
2832 disk->status = __cpu_to_le32(status);
2833
0c046afd
DW
2834 /* mark rebuild */
2835 to_state = imsm_check_degraded(super, dev, failed);
2836 map->map_state = IMSM_T_STATE_DEGRADED;
2837 migrate(dev, to_state, 1);
2838 migr_map = get_imsm_map(dev, 1);
2839 set_imsm_ord_tbl_ent(map, u->slot, dl->index);
2840 set_imsm_ord_tbl_ent(migr_map, u->slot, dl->index | IMSM_ORD_REBUILD);
2841
e8319a19
DW
2842 /* count arrays using the victim in the metadata */
2843 found = 0;
2844 for (a = st->arrays; a ; a = a->next) {
949c47a0 2845 dev = get_imsm_dev(super, a->info.container_member);
e8319a19 2846 for (i = 0; i < map->num_members; i++)
ff077194 2847 if (victim == get_imsm_disk_idx(dev, i))
e8319a19
DW
2848 found++;
2849 }
2850
24565c9a 2851 /* delete the victim if it is no longer being
e8319a19
DW
2852 * utilized anywhere
2853 */
e8319a19 2854 if (!found) {
ae6aad82 2855 struct dl **dlp;
24565c9a
DW
2856
2857 for (dlp = &super->disks; *dlp; dlp = &(*dlp)->next)
ae6aad82
DW
2858 if ((*dlp)->index == victim)
2859 break;
ae6aad82
DW
2860 /* We know that 'manager' isn't touching anything,
2861 * so it is safe to:
2862 */
24565c9a 2863 imsm_delete(super, dlp, victim);
e8319a19 2864 }
8273f55e
DW
2865 break;
2866 }
2867 case update_create_array: {
2868 /* someone wants to create a new array, we need to be aware of
2869 * a few races/collisions:
2870 * 1/ 'Create' called by two separate instances of mdadm
2871 * 2/ 'Create' versus 'activate_spare': mdadm has chosen
2872 * devices that have since been assimilated via
2873 * activate_spare.
2874 * In the event this update can not be carried out mdadm will
2875 * (FIX ME) notice that its update did not take hold.
2876 */
2877 struct imsm_update_create_array *u = (void *) update->buf;
2878 struct imsm_dev *dev;
2879 struct imsm_map *map, *new_map;
2880 unsigned long long start, end;
2881 unsigned long long new_start, new_end;
2882 int i;
2883 int overlap = 0;
2884
2885 /* handle racing creates: first come first serve */
2886 if (u->dev_idx < mpb->num_raid_devs) {
2887 dprintf("%s: subarray %d already defined\n",
2888 __func__, u->dev_idx);
2889 return;
2890 }
2891
2892 /* check update is next in sequence */
2893 if (u->dev_idx != mpb->num_raid_devs) {
6a3e913e
DW
2894 dprintf("%s: can not create array %d expected index %d\n",
2895 __func__, u->dev_idx, mpb->num_raid_devs);
8273f55e
DW
2896 return;
2897 }
2898
a965f303 2899 new_map = get_imsm_map(&u->dev, 0);
8273f55e
DW
2900 new_start = __le32_to_cpu(new_map->pba_of_lba0);
2901 new_end = new_start + __le32_to_cpu(new_map->blocks_per_member);
2902
2903 /* handle activate_spare versus create race:
2904 * check to make sure that overlapping arrays do not include
2905 * overalpping disks
2906 */
2907 for (i = 0; i < mpb->num_raid_devs; i++) {
949c47a0 2908 dev = get_imsm_dev(super, i);
a965f303 2909 map = get_imsm_map(dev, 0);
8273f55e
DW
2910 start = __le32_to_cpu(map->pba_of_lba0);
2911 end = start + __le32_to_cpu(map->blocks_per_member);
2912 if ((new_start >= start && new_start <= end) ||
2913 (start >= new_start && start <= new_end))
2914 overlap = 1;
ff077194 2915 if (overlap && disks_overlap(dev, &u->dev)) {
8273f55e
DW
2916 dprintf("%s: arrays overlap\n", __func__);
2917 return;
2918 }
2919 }
2920 /* check num_members sanity */
2921 if (new_map->num_members > mpb->num_disks) {
2922 dprintf("%s: num_disks out of range\n", __func__);
2923 return;
2924 }
2925
949c47a0
DW
2926 /* check that prepare update was successful */
2927 if (!update->space) {
2928 dprintf("%s: prepare update failed\n", __func__);
2929 return;
2930 }
2931
8273f55e 2932 super->updates_pending++;
949c47a0 2933 dev = update->space;
ff077194 2934 map = get_imsm_map(dev, 0);
949c47a0
DW
2935 update->space = NULL;
2936 imsm_copy_dev(dev, &u->dev);
e0783b41 2937 map = get_imsm_map(dev, 0);
949c47a0 2938 super->dev_tbl[u->dev_idx] = dev;
8273f55e 2939 mpb->num_raid_devs++;
8273f55e 2940
e0783b41 2941 /* fix up flags */
8273f55e
DW
2942 for (i = 0; i < map->num_members; i++) {
2943 struct imsm_disk *disk;
2944 __u32 status;
2945
ff077194 2946 disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i));
8273f55e
DW
2947 status = __le32_to_cpu(disk->status);
2948 status |= CONFIGURED_DISK;
e0783b41 2949 status &= ~SPARE_DISK;
8273f55e
DW
2950 disk->status = __cpu_to_le32(status);
2951 }
2952 break;
e8319a19 2953 }
43dad3d6
DW
2954 case update_add_disk:
2955
2956 /* we may be able to repair some arrays if disks are
2957 * being added */
2958 if (super->add) {
2959 struct active_array *a;
2960 for (a = st->arrays; a; a = a->next)
2961 a->check_degraded = 1;
2962 }
2963 /* check if we can add / replace some disks in the
2964 * metadata */
2965 while (super->add) {
2966 struct dl **dlp, *dl, *al;
2967 al = super->add;
2968 super->add = al->next;
2969 for (dlp = &super->disks; *dlp ; ) {
1f24f035 2970 if (serialcmp(al->serial, (*dlp)->serial) == 0) {
43dad3d6
DW
2971 dl = *dlp;
2972 *dlp = (*dlp)->next;
2973 __free_imsm_disk(dl);
2974 break;
2975 } else
2976 dlp = &(*dlp)->next;
2977 }
2978 al->next = super->disks;
2979 super->disks = al;
2980 }
2981
2982 break;
e8319a19
DW
2983 }
2984}
88758e9d 2985
8273f55e
DW
2986static void imsm_prepare_update(struct supertype *st,
2987 struct metadata_update *update)
2988{
949c47a0 2989 /**
4d7b1503
DW
2990 * Allocate space to hold new disk entries, raid-device entries or a new
2991 * mpb if necessary. The manager synchronously waits for updates to
2992 * complete in the monitor, so new mpb buffers allocated here can be
2993 * integrated by the monitor thread without worrying about live pointers
2994 * in the manager thread.
8273f55e 2995 */
949c47a0 2996 enum imsm_update_type type = *(enum imsm_update_type *) update->buf;
4d7b1503
DW
2997 struct intel_super *super = st->sb;
2998 struct imsm_super *mpb = super->anchor;
2999 size_t buf_len;
3000 size_t len = 0;
949c47a0
DW
3001
3002 switch (type) {
3003 case update_create_array: {
3004 struct imsm_update_create_array *u = (void *) update->buf;
949c47a0 3005
4d7b1503 3006 len = sizeof_imsm_dev(&u->dev, 1);
949c47a0
DW
3007 update->space = malloc(len);
3008 break;
3009 default:
3010 break;
3011 }
3012 }
8273f55e 3013
4d7b1503
DW
3014 /* check if we need a larger metadata buffer */
3015 if (super->next_buf)
3016 buf_len = super->next_len;
3017 else
3018 buf_len = super->len;
3019
3020 if (__le32_to_cpu(mpb->mpb_size) + len > buf_len) {
3021 /* ok we need a larger buf than what is currently allocated
3022 * if this allocation fails process_update will notice that
3023 * ->next_len is set and ->next_buf is NULL
3024 */
3025 buf_len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + len, 512);
3026 if (super->next_buf)
3027 free(super->next_buf);
3028
3029 super->next_len = buf_len;
3030 if (posix_memalign(&super->next_buf, buf_len, 512) != 0)
3031 super->next_buf = NULL;
3032 }
8273f55e
DW
3033}
3034
ae6aad82 3035/* must be called while manager is quiesced */
24565c9a 3036static void imsm_delete(struct intel_super *super, struct dl **dlp, int index)
ae6aad82
DW
3037{
3038 struct imsm_super *mpb = super->anchor;
ae6aad82
DW
3039 struct dl *iter;
3040 struct imsm_dev *dev;
3041 struct imsm_map *map;
24565c9a
DW
3042 int i, j, num_members;
3043 __u32 ord;
ae6aad82 3044
24565c9a
DW
3045 dprintf("%s: deleting device[%d] from imsm_super\n",
3046 __func__, index);
ae6aad82
DW
3047
3048 /* shift all indexes down one */
3049 for (iter = super->disks; iter; iter = iter->next)
24565c9a 3050 if (iter->index > index)
ae6aad82
DW
3051 iter->index--;
3052
3053 for (i = 0; i < mpb->num_raid_devs; i++) {
3054 dev = get_imsm_dev(super, i);
3055 map = get_imsm_map(dev, 0);
24565c9a
DW
3056 num_members = map->num_members;
3057 for (j = 0; j < num_members; j++) {
3058 /* update ord entries being careful not to propagate
3059 * ord-flags to the first map
3060 */
3061 ord = get_imsm_ord_tbl_ent(dev, j);
ae6aad82 3062
24565c9a
DW
3063 if (ord_to_idx(ord) <= index)
3064 continue;
ae6aad82 3065
24565c9a
DW
3066 map = get_imsm_map(dev, 0);
3067 set_imsm_ord_tbl_ent(map, j, ord_to_idx(ord - 1));
3068 map = get_imsm_map(dev, 1);
3069 if (map)
3070 set_imsm_ord_tbl_ent(map, j, ord - 1);
ae6aad82
DW
3071 }
3072 }
3073
3074 mpb->num_disks--;
3075 super->updates_pending++;
24565c9a
DW
3076 if (*dlp) {
3077 struct dl *dl = *dlp;
3078
3079 *dlp = (*dlp)->next;
3080 __free_imsm_disk(dl);
3081 }
ae6aad82
DW
3082}
3083
cdddbdbc
DW
3084struct superswitch super_imsm = {
3085#ifndef MDASSEMBLE
3086 .examine_super = examine_super_imsm,
3087 .brief_examine_super = brief_examine_super_imsm,
3088 .detail_super = detail_super_imsm,
3089 .brief_detail_super = brief_detail_super_imsm,
bf5a934a 3090 .write_init_super = write_init_super_imsm,
cdddbdbc
DW
3091#endif
3092 .match_home = match_home_imsm,
3093 .uuid_from_super= uuid_from_super_imsm,
3094 .getinfo_super = getinfo_super_imsm,
3095 .update_super = update_super_imsm,
3096
3097 .avail_size = avail_size_imsm,
3098
3099 .compare_super = compare_super_imsm,
3100
3101 .load_super = load_super_imsm,
bf5a934a
DW
3102 .init_super = init_super_imsm,
3103 .add_to_super = add_to_super_imsm,
cdddbdbc
DW
3104 .store_super = store_zero_imsm,
3105 .free_super = free_super_imsm,
3106 .match_metadata_desc = match_metadata_desc_imsm,
bf5a934a 3107 .container_content = container_content_imsm,
cdddbdbc
DW
3108
3109 .validate_geometry = validate_geometry_imsm,
cdddbdbc 3110 .external = 1,
845dea95
NB
3111
3112/* for mdmon */
3113 .open_new = imsm_open_new,
3114 .load_super = load_super_imsm,
ed9d66aa 3115 .set_array_state= imsm_set_array_state,
845dea95
NB
3116 .set_disk = imsm_set_disk,
3117 .sync_metadata = imsm_sync_metadata,
88758e9d 3118 .activate_spare = imsm_activate_spare,
e8319a19 3119 .process_update = imsm_process_update,
8273f55e 3120 .prepare_update = imsm_prepare_update,
cdddbdbc 3121};