]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * mdadm - Intel(R) Matrix Storage Manager Support | |
3 | * | |
4 | * Copyright (C) 2002-2008 Intel Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program; if not, write to the Free Software Foundation, Inc., | |
17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | */ | |
19 | ||
20 | #define HAVE_STDINT_H 1 | |
21 | #include "mdadm.h" | |
22 | #include "mdmon.h" | |
23 | #include "sha1.h" | |
24 | #include "platform-intel.h" | |
25 | #include <values.h> | |
26 | #include <scsi/sg.h> | |
27 | #include <ctype.h> | |
28 | #include <dirent.h> | |
29 | ||
30 | /* MPB == Metadata Parameter Block */ | |
31 | #define MPB_SIGNATURE "Intel Raid ISM Cfg Sig. " | |
32 | #define MPB_SIG_LEN (strlen(MPB_SIGNATURE)) | |
33 | #define MPB_VERSION_RAID0 "1.0.00" | |
34 | #define MPB_VERSION_RAID1 "1.1.00" | |
35 | #define MPB_VERSION_MANY_VOLUMES_PER_ARRAY "1.2.00" | |
36 | #define MPB_VERSION_3OR4_DISK_ARRAY "1.2.01" | |
37 | #define MPB_VERSION_RAID5 "1.2.02" | |
38 | #define MPB_VERSION_5OR6_DISK_ARRAY "1.2.04" | |
39 | #define MPB_VERSION_CNG "1.2.06" | |
40 | #define MPB_VERSION_ATTRIBS "1.3.00" | |
41 | #define MAX_SIGNATURE_LENGTH 32 | |
42 | #define MAX_RAID_SERIAL_LEN 16 | |
43 | ||
44 | #define MPB_ATTRIB_CHECKSUM_VERIFY __cpu_to_le32(0x80000000) | |
45 | #define MPB_ATTRIB_PM __cpu_to_le32(0x40000000) | |
46 | #define MPB_ATTRIB_2TB __cpu_to_le32(0x20000000) | |
47 | #define MPB_ATTRIB_RAID0 __cpu_to_le32(0x00000001) | |
48 | #define MPB_ATTRIB_RAID1 __cpu_to_le32(0x00000002) | |
49 | #define MPB_ATTRIB_RAID10 __cpu_to_le32(0x00000004) | |
50 | #define MPB_ATTRIB_RAID1E __cpu_to_le32(0x00000008) | |
51 | #define MPB_ATTRIB_RAID5 __cpu_to_le32(0x00000010) | |
52 | #define MPB_ATTRIB_RAIDCNG __cpu_to_le32(0x00000020) | |
53 | ||
54 | #define MPB_SECTOR_CNT 418 | |
55 | #define IMSM_RESERVED_SECTORS 4096 | |
56 | ||
57 | /* Disk configuration info. */ | |
58 | #define IMSM_MAX_DEVICES 255 | |
59 | struct imsm_disk { | |
60 | __u8 serial[MAX_RAID_SERIAL_LEN];/* 0xD8 - 0xE7 ascii serial number */ | |
61 | __u32 total_blocks; /* 0xE8 - 0xEB total blocks */ | |
62 | __u32 scsi_id; /* 0xEC - 0xEF scsi ID */ | |
63 | #define SPARE_DISK __cpu_to_le32(0x01) /* Spare */ | |
64 | #define CONFIGURED_DISK __cpu_to_le32(0x02) /* Member of some RaidDev */ | |
65 | #define FAILED_DISK __cpu_to_le32(0x04) /* Permanent failure */ | |
66 | #define USABLE_DISK __cpu_to_le32(0x08) /* Fully usable unless FAILED_DISK is set */ | |
67 | __u32 status; /* 0xF0 - 0xF3 */ | |
68 | __u32 owner_cfg_num; /* which config 0,1,2... owns this disk */ | |
69 | #define IMSM_DISK_FILLERS 4 | |
70 | __u32 filler[IMSM_DISK_FILLERS]; /* 0xF4 - 0x107 MPB_DISK_FILLERS for future expansion */ | |
71 | }; | |
72 | ||
73 | /* RAID map configuration infos. */ | |
74 | struct imsm_map { | |
75 | __u32 pba_of_lba0; /* start address of partition */ | |
76 | __u32 blocks_per_member;/* blocks per member */ | |
77 | __u32 num_data_stripes; /* number of data stripes */ | |
78 | __u16 blocks_per_strip; | |
79 | __u8 map_state; /* Normal, Uninitialized, Degraded, Failed */ | |
80 | #define IMSM_T_STATE_NORMAL 0 | |
81 | #define IMSM_T_STATE_UNINITIALIZED 1 | |
82 | #define IMSM_T_STATE_DEGRADED 2 | |
83 | #define IMSM_T_STATE_FAILED 3 | |
84 | __u8 raid_level; | |
85 | #define IMSM_T_RAID0 0 | |
86 | #define IMSM_T_RAID1 1 | |
87 | #define IMSM_T_RAID5 5 /* since metadata version 1.2.02 ? */ | |
88 | __u8 num_members; /* number of member disks */ | |
89 | __u8 num_domains; /* number of parity domains */ | |
90 | __u8 failed_disk_num; /* valid only when state is degraded */ | |
91 | __u8 reserved[1]; | |
92 | __u32 filler[7]; /* expansion area */ | |
93 | #define IMSM_ORD_REBUILD (1 << 24) | |
94 | __u32 disk_ord_tbl[1]; /* disk_ord_tbl[num_members], | |
95 | * top byte contains some flags | |
96 | */ | |
97 | } __attribute__ ((packed)); | |
98 | ||
99 | struct imsm_vol { | |
100 | __u32 curr_migr_unit; | |
101 | __u32 checkpoint_id; /* id to access curr_migr_unit */ | |
102 | __u8 migr_state; /* Normal or Migrating */ | |
103 | #define MIGR_INIT 0 | |
104 | #define MIGR_REBUILD 1 | |
105 | #define MIGR_VERIFY 2 /* analagous to echo check > sync_action */ | |
106 | #define MIGR_GEN_MIGR 3 | |
107 | #define MIGR_STATE_CHANGE 4 | |
108 | __u8 migr_type; /* Initializing, Rebuilding, ... */ | |
109 | __u8 dirty; | |
110 | __u8 fs_state; /* fast-sync state for CnG (0xff == disabled) */ | |
111 | __u16 verify_errors; /* number of mismatches */ | |
112 | __u16 bad_blocks; /* number of bad blocks during verify */ | |
113 | __u32 filler[4]; | |
114 | struct imsm_map map[1]; | |
115 | /* here comes another one if migr_state */ | |
116 | } __attribute__ ((packed)); | |
117 | ||
118 | struct imsm_dev { | |
119 | __u8 volume[MAX_RAID_SERIAL_LEN]; | |
120 | __u32 size_low; | |
121 | __u32 size_high; | |
122 | #define DEV_BOOTABLE __cpu_to_le32(0x01) | |
123 | #define DEV_BOOT_DEVICE __cpu_to_le32(0x02) | |
124 | #define DEV_READ_COALESCING __cpu_to_le32(0x04) | |
125 | #define DEV_WRITE_COALESCING __cpu_to_le32(0x08) | |
126 | #define DEV_LAST_SHUTDOWN_DIRTY __cpu_to_le32(0x10) | |
127 | #define DEV_HIDDEN_AT_BOOT __cpu_to_le32(0x20) | |
128 | #define DEV_CURRENTLY_HIDDEN __cpu_to_le32(0x40) | |
129 | #define DEV_VERIFY_AND_FIX __cpu_to_le32(0x80) | |
130 | #define DEV_MAP_STATE_UNINIT __cpu_to_le32(0x100) | |
131 | #define DEV_NO_AUTO_RECOVERY __cpu_to_le32(0x200) | |
132 | #define DEV_CLONE_N_GO __cpu_to_le32(0x400) | |
133 | #define DEV_CLONE_MAN_SYNC __cpu_to_le32(0x800) | |
134 | #define DEV_CNG_MASTER_DISK_NUM __cpu_to_le32(0x1000) | |
135 | __u32 status; /* Persistent RaidDev status */ | |
136 | __u32 reserved_blocks; /* Reserved blocks at beginning of volume */ | |
137 | __u8 migr_priority; | |
138 | __u8 num_sub_vols; | |
139 | __u8 tid; | |
140 | __u8 cng_master_disk; | |
141 | __u16 cache_policy; | |
142 | __u8 cng_state; | |
143 | __u8 cng_sub_state; | |
144 | #define IMSM_DEV_FILLERS 10 | |
145 | __u32 filler[IMSM_DEV_FILLERS]; | |
146 | struct imsm_vol vol; | |
147 | } __attribute__ ((packed)); | |
148 | ||
149 | struct imsm_super { | |
150 | __u8 sig[MAX_SIGNATURE_LENGTH]; /* 0x00 - 0x1F */ | |
151 | __u32 check_sum; /* 0x20 - 0x23 MPB Checksum */ | |
152 | __u32 mpb_size; /* 0x24 - 0x27 Size of MPB */ | |
153 | __u32 family_num; /* 0x28 - 0x2B Checksum from first time this config was written */ | |
154 | __u32 generation_num; /* 0x2C - 0x2F Incremented each time this array's MPB is written */ | |
155 | __u32 error_log_size; /* 0x30 - 0x33 in bytes */ | |
156 | __u32 attributes; /* 0x34 - 0x37 */ | |
157 | __u8 num_disks; /* 0x38 Number of configured disks */ | |
158 | __u8 num_raid_devs; /* 0x39 Number of configured volumes */ | |
159 | __u8 error_log_pos; /* 0x3A */ | |
160 | __u8 fill[1]; /* 0x3B */ | |
161 | __u32 cache_size; /* 0x3c - 0x40 in mb */ | |
162 | __u32 orig_family_num; /* 0x40 - 0x43 original family num */ | |
163 | __u32 pwr_cycle_count; /* 0x44 - 0x47 simulated power cycle count for array */ | |
164 | __u32 bbm_log_size; /* 0x48 - 0x4B - size of bad Block Mgmt Log in bytes */ | |
165 | #define IMSM_FILLERS 35 | |
166 | __u32 filler[IMSM_FILLERS]; /* 0x4C - 0xD7 RAID_MPB_FILLERS */ | |
167 | struct imsm_disk disk[1]; /* 0xD8 diskTbl[numDisks] */ | |
168 | /* here comes imsm_dev[num_raid_devs] */ | |
169 | /* here comes BBM logs */ | |
170 | } __attribute__ ((packed)); | |
171 | ||
172 | #define BBM_LOG_MAX_ENTRIES 254 | |
173 | ||
174 | struct bbm_log_entry { | |
175 | __u64 defective_block_start; | |
176 | #define UNREADABLE 0xFFFFFFFF | |
177 | __u32 spare_block_offset; | |
178 | __u16 remapped_marked_count; | |
179 | __u16 disk_ordinal; | |
180 | } __attribute__ ((__packed__)); | |
181 | ||
182 | struct bbm_log { | |
183 | __u32 signature; /* 0xABADB10C */ | |
184 | __u32 entry_count; | |
185 | __u32 reserved_spare_block_count; /* 0 */ | |
186 | __u32 reserved; /* 0xFFFF */ | |
187 | __u64 first_spare_lba; | |
188 | struct bbm_log_entry mapped_block_entries[BBM_LOG_MAX_ENTRIES]; | |
189 | } __attribute__ ((__packed__)); | |
190 | ||
191 | ||
192 | #ifndef MDASSEMBLE | |
193 | static char *map_state_str[] = { "normal", "uninitialized", "degraded", "failed" }; | |
194 | #endif | |
195 | ||
196 | static unsigned int sector_count(__u32 bytes) | |
197 | { | |
198 | return ((bytes + (512-1)) & (~(512-1))) / 512; | |
199 | } | |
200 | ||
201 | static unsigned int mpb_sectors(struct imsm_super *mpb) | |
202 | { | |
203 | return sector_count(__le32_to_cpu(mpb->mpb_size)); | |
204 | } | |
205 | ||
206 | struct intel_dev { | |
207 | struct imsm_dev *dev; | |
208 | struct intel_dev *next; | |
209 | int index; | |
210 | }; | |
211 | ||
212 | /* internal representation of IMSM metadata */ | |
213 | struct intel_super { | |
214 | union { | |
215 | void *buf; /* O_DIRECT buffer for reading/writing metadata */ | |
216 | struct imsm_super *anchor; /* immovable parameters */ | |
217 | }; | |
218 | size_t len; /* size of the 'buf' allocation */ | |
219 | void *next_buf; /* for realloc'ing buf from the manager */ | |
220 | size_t next_len; | |
221 | int updates_pending; /* count of pending updates for mdmon */ | |
222 | int creating_imsm; /* flag to indicate container creation */ | |
223 | int current_vol; /* index of raid device undergoing creation */ | |
224 | __u32 create_offset; /* common start for 'current_vol' */ | |
225 | struct intel_dev *devlist; | |
226 | struct dl { | |
227 | struct dl *next; | |
228 | int index; | |
229 | __u8 serial[MAX_RAID_SERIAL_LEN]; | |
230 | int major, minor; | |
231 | char *devname; | |
232 | struct imsm_disk disk; | |
233 | int fd; | |
234 | int extent_cnt; | |
235 | struct extent *e; /* for determining freespace @ create */ | |
236 | int raiddisk; /* slot to fill in autolayout */ | |
237 | } *disks; | |
238 | struct dl *add; /* list of disks to add while mdmon active */ | |
239 | struct dl *missing; /* disks removed while we weren't looking */ | |
240 | struct bbm_log *bbm_log; | |
241 | const char *hba; /* device path of the raid controller for this metadata */ | |
242 | const struct imsm_orom *orom; /* platform firmware support */ | |
243 | }; | |
244 | ||
245 | struct extent { | |
246 | unsigned long long start, size; | |
247 | }; | |
248 | ||
249 | /* definition of messages passed to imsm_process_update */ | |
250 | enum imsm_update_type { | |
251 | update_activate_spare, | |
252 | update_create_array, | |
253 | update_add_disk, | |
254 | }; | |
255 | ||
256 | struct imsm_update_activate_spare { | |
257 | enum imsm_update_type type; | |
258 | struct dl *dl; | |
259 | int slot; | |
260 | int array; | |
261 | struct imsm_update_activate_spare *next; | |
262 | }; | |
263 | ||
264 | struct disk_info { | |
265 | __u8 serial[MAX_RAID_SERIAL_LEN]; | |
266 | }; | |
267 | ||
268 | struct imsm_update_create_array { | |
269 | enum imsm_update_type type; | |
270 | int dev_idx; | |
271 | struct imsm_dev dev; | |
272 | }; | |
273 | ||
274 | struct imsm_update_add_disk { | |
275 | enum imsm_update_type type; | |
276 | }; | |
277 | ||
278 | static struct supertype *match_metadata_desc_imsm(char *arg) | |
279 | { | |
280 | struct supertype *st; | |
281 | ||
282 | if (strcmp(arg, "imsm") != 0 && | |
283 | strcmp(arg, "default") != 0 | |
284 | ) | |
285 | return NULL; | |
286 | ||
287 | st = malloc(sizeof(*st)); | |
288 | memset(st, 0, sizeof(*st)); | |
289 | st->ss = &super_imsm; | |
290 | st->max_devs = IMSM_MAX_DEVICES; | |
291 | st->minor_version = 0; | |
292 | st->sb = NULL; | |
293 | return st; | |
294 | } | |
295 | ||
296 | #ifndef MDASSEMBLE | |
297 | static __u8 *get_imsm_version(struct imsm_super *mpb) | |
298 | { | |
299 | return &mpb->sig[MPB_SIG_LEN]; | |
300 | } | |
301 | #endif | |
302 | ||
303 | /* retrieve a disk directly from the anchor when the anchor is known to be | |
304 | * up-to-date, currently only at load time | |
305 | */ | |
306 | static struct imsm_disk *__get_imsm_disk(struct imsm_super *mpb, __u8 index) | |
307 | { | |
308 | if (index >= mpb->num_disks) | |
309 | return NULL; | |
310 | return &mpb->disk[index]; | |
311 | } | |
312 | ||
313 | #ifndef MDASSEMBLE | |
314 | /* retrieve a disk from the parsed metadata */ | |
315 | static struct imsm_disk *get_imsm_disk(struct intel_super *super, __u8 index) | |
316 | { | |
317 | struct dl *d; | |
318 | ||
319 | for (d = super->disks; d; d = d->next) | |
320 | if (d->index == index) | |
321 | return &d->disk; | |
322 | ||
323 | return NULL; | |
324 | } | |
325 | #endif | |
326 | ||
327 | /* generate a checksum directly from the anchor when the anchor is known to be | |
328 | * up-to-date, currently only at load or write_super after coalescing | |
329 | */ | |
330 | static __u32 __gen_imsm_checksum(struct imsm_super *mpb) | |
331 | { | |
332 | __u32 end = mpb->mpb_size / sizeof(end); | |
333 | __u32 *p = (__u32 *) mpb; | |
334 | __u32 sum = 0; | |
335 | ||
336 | while (end--) { | |
337 | sum += __le32_to_cpu(*p); | |
338 | p++; | |
339 | } | |
340 | ||
341 | return sum - __le32_to_cpu(mpb->check_sum); | |
342 | } | |
343 | ||
344 | static size_t sizeof_imsm_map(struct imsm_map *map) | |
345 | { | |
346 | return sizeof(struct imsm_map) + sizeof(__u32) * (map->num_members - 1); | |
347 | } | |
348 | ||
349 | struct imsm_map *get_imsm_map(struct imsm_dev *dev, int second_map) | |
350 | { | |
351 | struct imsm_map *map = &dev->vol.map[0]; | |
352 | ||
353 | if (second_map && !dev->vol.migr_state) | |
354 | return NULL; | |
355 | else if (second_map) { | |
356 | void *ptr = map; | |
357 | ||
358 | return ptr + sizeof_imsm_map(map); | |
359 | } else | |
360 | return map; | |
361 | ||
362 | } | |
363 | ||
364 | /* return the size of the device. | |
365 | * migr_state increases the returned size if map[0] were to be duplicated | |
366 | */ | |
367 | static size_t sizeof_imsm_dev(struct imsm_dev *dev, int migr_state) | |
368 | { | |
369 | size_t size = sizeof(*dev) - sizeof(struct imsm_map) + | |
370 | sizeof_imsm_map(get_imsm_map(dev, 0)); | |
371 | ||
372 | /* migrating means an additional map */ | |
373 | if (dev->vol.migr_state) | |
374 | size += sizeof_imsm_map(get_imsm_map(dev, 1)); | |
375 | else if (migr_state) | |
376 | size += sizeof_imsm_map(get_imsm_map(dev, 0)); | |
377 | ||
378 | return size; | |
379 | } | |
380 | ||
381 | #ifndef MDASSEMBLE | |
382 | /* retrieve disk serial number list from a metadata update */ | |
383 | static struct disk_info *get_disk_info(struct imsm_update_create_array *update) | |
384 | { | |
385 | void *u = update; | |
386 | struct disk_info *inf; | |
387 | ||
388 | inf = u + sizeof(*update) - sizeof(struct imsm_dev) + | |
389 | sizeof_imsm_dev(&update->dev, 0); | |
390 | ||
391 | return inf; | |
392 | } | |
393 | #endif | |
394 | ||
395 | static struct imsm_dev *__get_imsm_dev(struct imsm_super *mpb, __u8 index) | |
396 | { | |
397 | int offset; | |
398 | int i; | |
399 | void *_mpb = mpb; | |
400 | ||
401 | if (index >= mpb->num_raid_devs) | |
402 | return NULL; | |
403 | ||
404 | /* devices start after all disks */ | |
405 | offset = ((void *) &mpb->disk[mpb->num_disks]) - _mpb; | |
406 | ||
407 | for (i = 0; i <= index; i++) | |
408 | if (i == index) | |
409 | return _mpb + offset; | |
410 | else | |
411 | offset += sizeof_imsm_dev(_mpb + offset, 0); | |
412 | ||
413 | return NULL; | |
414 | } | |
415 | ||
416 | static struct imsm_dev *get_imsm_dev(struct intel_super *super, __u8 index) | |
417 | { | |
418 | struct intel_dev *dv; | |
419 | ||
420 | if (index >= super->anchor->num_raid_devs) | |
421 | return NULL; | |
422 | for (dv = super->devlist; dv; dv = dv->next) | |
423 | if (dv->index == index) | |
424 | return dv->dev; | |
425 | return NULL; | |
426 | } | |
427 | ||
428 | static __u32 get_imsm_ord_tbl_ent(struct imsm_dev *dev, int slot) | |
429 | { | |
430 | struct imsm_map *map; | |
431 | ||
432 | if (dev->vol.migr_state) | |
433 | map = get_imsm_map(dev, 1); | |
434 | else | |
435 | map = get_imsm_map(dev, 0); | |
436 | ||
437 | /* top byte identifies disk under rebuild */ | |
438 | return __le32_to_cpu(map->disk_ord_tbl[slot]); | |
439 | } | |
440 | ||
441 | #define ord_to_idx(ord) (((ord) << 8) >> 8) | |
442 | static __u32 get_imsm_disk_idx(struct imsm_dev *dev, int slot) | |
443 | { | |
444 | __u32 ord = get_imsm_ord_tbl_ent(dev, slot); | |
445 | ||
446 | return ord_to_idx(ord); | |
447 | } | |
448 | ||
449 | static void set_imsm_ord_tbl_ent(struct imsm_map *map, int slot, __u32 ord) | |
450 | { | |
451 | map->disk_ord_tbl[slot] = __cpu_to_le32(ord); | |
452 | } | |
453 | ||
454 | static int get_imsm_disk_slot(struct imsm_map *map, int idx) | |
455 | { | |
456 | int slot; | |
457 | __u32 ord; | |
458 | ||
459 | for (slot = 0; slot < map->num_members; slot++) { | |
460 | ord = __le32_to_cpu(map->disk_ord_tbl[slot]); | |
461 | if (ord_to_idx(ord) == idx) | |
462 | return slot; | |
463 | } | |
464 | ||
465 | return -1; | |
466 | } | |
467 | ||
468 | static int get_imsm_raid_level(struct imsm_map *map) | |
469 | { | |
470 | if (map->raid_level == 1) { | |
471 | if (map->num_members == 2) | |
472 | return 1; | |
473 | else | |
474 | return 10; | |
475 | } | |
476 | ||
477 | return map->raid_level; | |
478 | } | |
479 | ||
480 | static int cmp_extent(const void *av, const void *bv) | |
481 | { | |
482 | const struct extent *a = av; | |
483 | const struct extent *b = bv; | |
484 | if (a->start < b->start) | |
485 | return -1; | |
486 | if (a->start > b->start) | |
487 | return 1; | |
488 | return 0; | |
489 | } | |
490 | ||
491 | static int count_memberships(struct dl *dl, struct intel_super *super) | |
492 | { | |
493 | int memberships = 0; | |
494 | int i; | |
495 | ||
496 | for (i = 0; i < super->anchor->num_raid_devs; i++) { | |
497 | struct imsm_dev *dev = get_imsm_dev(super, i); | |
498 | struct imsm_map *map = get_imsm_map(dev, 0); | |
499 | ||
500 | if (get_imsm_disk_slot(map, dl->index) >= 0) | |
501 | memberships++; | |
502 | } | |
503 | ||
504 | return memberships; | |
505 | } | |
506 | ||
507 | static struct extent *get_extents(struct intel_super *super, struct dl *dl) | |
508 | { | |
509 | /* find a list of used extents on the given physical device */ | |
510 | struct extent *rv, *e; | |
511 | int i; | |
512 | int memberships = count_memberships(dl, super); | |
513 | __u32 reservation = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS; | |
514 | ||
515 | rv = malloc(sizeof(struct extent) * (memberships + 1)); | |
516 | if (!rv) | |
517 | return NULL; | |
518 | e = rv; | |
519 | ||
520 | for (i = 0; i < super->anchor->num_raid_devs; i++) { | |
521 | struct imsm_dev *dev = get_imsm_dev(super, i); | |
522 | struct imsm_map *map = get_imsm_map(dev, 0); | |
523 | ||
524 | if (get_imsm_disk_slot(map, dl->index) >= 0) { | |
525 | e->start = __le32_to_cpu(map->pba_of_lba0); | |
526 | e->size = __le32_to_cpu(map->blocks_per_member); | |
527 | e++; | |
528 | } | |
529 | } | |
530 | qsort(rv, memberships, sizeof(*rv), cmp_extent); | |
531 | ||
532 | /* determine the start of the metadata | |
533 | * when no raid devices are defined use the default | |
534 | * ...otherwise allow the metadata to truncate the value | |
535 | * as is the case with older versions of imsm | |
536 | */ | |
537 | if (memberships) { | |
538 | struct extent *last = &rv[memberships - 1]; | |
539 | __u32 remainder; | |
540 | ||
541 | remainder = __le32_to_cpu(dl->disk.total_blocks) - | |
542 | (last->start + last->size); | |
543 | /* round down to 1k block to satisfy precision of the kernel | |
544 | * 'size' interface | |
545 | */ | |
546 | remainder &= ~1UL; | |
547 | /* make sure remainder is still sane */ | |
548 | if (remainder < ROUND_UP(super->len, 512) >> 9) | |
549 | remainder = ROUND_UP(super->len, 512) >> 9; | |
550 | if (reservation > remainder) | |
551 | reservation = remainder; | |
552 | } | |
553 | e->start = __le32_to_cpu(dl->disk.total_blocks) - reservation; | |
554 | e->size = 0; | |
555 | return rv; | |
556 | } | |
557 | ||
558 | /* try to determine how much space is reserved for metadata from | |
559 | * the last get_extents() entry, otherwise fallback to the | |
560 | * default | |
561 | */ | |
562 | static __u32 imsm_reserved_sectors(struct intel_super *super, struct dl *dl) | |
563 | { | |
564 | struct extent *e; | |
565 | int i; | |
566 | __u32 rv; | |
567 | ||
568 | /* for spares just return a minimal reservation which will grow | |
569 | * once the spare is picked up by an array | |
570 | */ | |
571 | if (dl->index == -1) | |
572 | return MPB_SECTOR_CNT; | |
573 | ||
574 | e = get_extents(super, dl); | |
575 | if (!e) | |
576 | return MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS; | |
577 | ||
578 | /* scroll to last entry */ | |
579 | for (i = 0; e[i].size; i++) | |
580 | continue; | |
581 | ||
582 | rv = __le32_to_cpu(dl->disk.total_blocks) - e[i].start; | |
583 | ||
584 | free(e); | |
585 | ||
586 | return rv; | |
587 | } | |
588 | ||
589 | #ifndef MDASSEMBLE | |
590 | static void print_imsm_dev(struct imsm_dev *dev, char *uuid, int disk_idx) | |
591 | { | |
592 | __u64 sz; | |
593 | int slot; | |
594 | struct imsm_map *map = get_imsm_map(dev, 0); | |
595 | __u32 ord; | |
596 | ||
597 | printf("\n"); | |
598 | printf("[%.16s]:\n", dev->volume); | |
599 | printf(" UUID : %s\n", uuid); | |
600 | printf(" RAID Level : %d\n", get_imsm_raid_level(map)); | |
601 | printf(" Members : %d\n", map->num_members); | |
602 | slot = get_imsm_disk_slot(map, disk_idx); | |
603 | if (slot >= 0) { | |
604 | ord = get_imsm_ord_tbl_ent(dev, slot); | |
605 | printf(" This Slot : %d%s\n", slot, | |
606 | ord & IMSM_ORD_REBUILD ? " (out-of-sync)" : ""); | |
607 | } else | |
608 | printf(" This Slot : ?\n"); | |
609 | sz = __le32_to_cpu(dev->size_high); | |
610 | sz <<= 32; | |
611 | sz += __le32_to_cpu(dev->size_low); | |
612 | printf(" Array Size : %llu%s\n", (unsigned long long)sz, | |
613 | human_size(sz * 512)); | |
614 | sz = __le32_to_cpu(map->blocks_per_member); | |
615 | printf(" Per Dev Size : %llu%s\n", (unsigned long long)sz, | |
616 | human_size(sz * 512)); | |
617 | printf(" Sector Offset : %u\n", | |
618 | __le32_to_cpu(map->pba_of_lba0)); | |
619 | printf(" Num Stripes : %u\n", | |
620 | __le32_to_cpu(map->num_data_stripes)); | |
621 | printf(" Chunk Size : %u KiB\n", | |
622 | __le16_to_cpu(map->blocks_per_strip) / 2); | |
623 | printf(" Reserved : %d\n", __le32_to_cpu(dev->reserved_blocks)); | |
624 | printf(" Migrate State : %s", dev->vol.migr_state ? "migrating" : "idle"); | |
625 | if (dev->vol.migr_state) | |
626 | printf(": %s", dev->vol.migr_type ? "rebuilding" : "initializing"); | |
627 | printf("\n"); | |
628 | printf(" Map State : %s", map_state_str[map->map_state]); | |
629 | if (dev->vol.migr_state) { | |
630 | struct imsm_map *map = get_imsm_map(dev, 1); | |
631 | printf(" <-- %s", map_state_str[map->map_state]); | |
632 | } | |
633 | printf("\n"); | |
634 | printf(" Dirty State : %s\n", dev->vol.dirty ? "dirty" : "clean"); | |
635 | } | |
636 | ||
637 | static void print_imsm_disk(struct imsm_super *mpb, int index, __u32 reserved) | |
638 | { | |
639 | struct imsm_disk *disk = __get_imsm_disk(mpb, index); | |
640 | char str[MAX_RAID_SERIAL_LEN + 1]; | |
641 | __u32 s; | |
642 | __u64 sz; | |
643 | ||
644 | if (index < 0) | |
645 | return; | |
646 | ||
647 | printf("\n"); | |
648 | snprintf(str, MAX_RAID_SERIAL_LEN + 1, "%s", disk->serial); | |
649 | printf(" Disk%02d Serial : %s\n", index, str); | |
650 | s = disk->status; | |
651 | printf(" State :%s%s%s%s\n", s&SPARE_DISK ? " spare" : "", | |
652 | s&CONFIGURED_DISK ? " active" : "", | |
653 | s&FAILED_DISK ? " failed" : "", | |
654 | s&USABLE_DISK ? " usable" : ""); | |
655 | printf(" Id : %08x\n", __le32_to_cpu(disk->scsi_id)); | |
656 | sz = __le32_to_cpu(disk->total_blocks) - reserved; | |
657 | printf(" Usable Size : %llu%s\n", (unsigned long long)sz, | |
658 | human_size(sz * 512)); | |
659 | } | |
660 | ||
661 | static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info); | |
662 | ||
663 | static void examine_super_imsm(struct supertype *st, char *homehost) | |
664 | { | |
665 | struct intel_super *super = st->sb; | |
666 | struct imsm_super *mpb = super->anchor; | |
667 | char str[MAX_SIGNATURE_LENGTH]; | |
668 | int i; | |
669 | struct mdinfo info; | |
670 | char nbuf[64]; | |
671 | __u32 sum; | |
672 | __u32 reserved = imsm_reserved_sectors(super, super->disks); | |
673 | ||
674 | ||
675 | snprintf(str, MPB_SIG_LEN, "%s", mpb->sig); | |
676 | printf(" Magic : %s\n", str); | |
677 | snprintf(str, strlen(MPB_VERSION_RAID0), "%s", get_imsm_version(mpb)); | |
678 | printf(" Version : %s\n", get_imsm_version(mpb)); | |
679 | printf(" Family : %08x\n", __le32_to_cpu(mpb->family_num)); | |
680 | printf(" Generation : %08x\n", __le32_to_cpu(mpb->generation_num)); | |
681 | getinfo_super_imsm(st, &info); | |
682 | fname_from_uuid(st, &info, nbuf, ':'); | |
683 | printf(" UUID : %s\n", nbuf + 5); | |
684 | sum = __le32_to_cpu(mpb->check_sum); | |
685 | printf(" Checksum : %08x %s\n", sum, | |
686 | __gen_imsm_checksum(mpb) == sum ? "correct" : "incorrect"); | |
687 | printf(" MPB Sectors : %d\n", mpb_sectors(mpb)); | |
688 | printf(" Disks : %d\n", mpb->num_disks); | |
689 | printf(" RAID Devices : %d\n", mpb->num_raid_devs); | |
690 | print_imsm_disk(mpb, super->disks->index, reserved); | |
691 | if (super->bbm_log) { | |
692 | struct bbm_log *log = super->bbm_log; | |
693 | ||
694 | printf("\n"); | |
695 | printf("Bad Block Management Log:\n"); | |
696 | printf(" Log Size : %d\n", __le32_to_cpu(mpb->bbm_log_size)); | |
697 | printf(" Signature : %x\n", __le32_to_cpu(log->signature)); | |
698 | printf(" Entry Count : %d\n", __le32_to_cpu(log->entry_count)); | |
699 | printf(" Spare Blocks : %d\n", __le32_to_cpu(log->reserved_spare_block_count)); | |
700 | printf(" First Spare : %llx\n", __le64_to_cpu(log->first_spare_lba)); | |
701 | } | |
702 | for (i = 0; i < mpb->num_raid_devs; i++) { | |
703 | struct mdinfo info; | |
704 | struct imsm_dev *dev = __get_imsm_dev(mpb, i); | |
705 | ||
706 | super->current_vol = i; | |
707 | getinfo_super_imsm(st, &info); | |
708 | fname_from_uuid(st, &info, nbuf, ':'); | |
709 | print_imsm_dev(dev, nbuf + 5, super->disks->index); | |
710 | } | |
711 | for (i = 0; i < mpb->num_disks; i++) { | |
712 | if (i == super->disks->index) | |
713 | continue; | |
714 | print_imsm_disk(mpb, i, reserved); | |
715 | } | |
716 | } | |
717 | ||
718 | static void brief_examine_super_imsm(struct supertype *st) | |
719 | { | |
720 | /* We just write a generic IMSM ARRAY entry */ | |
721 | struct mdinfo info; | |
722 | char nbuf[64]; | |
723 | char nbuf1[64]; | |
724 | struct intel_super *super = st->sb; | |
725 | int i; | |
726 | ||
727 | if (!super->anchor->num_raid_devs) | |
728 | return; | |
729 | ||
730 | getinfo_super_imsm(st, &info); | |
731 | fname_from_uuid(st, &info, nbuf, ':'); | |
732 | printf("ARRAY metadata=imsm auto=md UUID=%s\n", nbuf + 5); | |
733 | for (i = 0; i < super->anchor->num_raid_devs; i++) { | |
734 | struct imsm_dev *dev = get_imsm_dev(super, i); | |
735 | ||
736 | super->current_vol = i; | |
737 | getinfo_super_imsm(st, &info); | |
738 | fname_from_uuid(st, &info, nbuf1, ':'); | |
739 | printf("ARRAY /dev/md/%.16s container=%s\n" | |
740 | " member=%d auto=mdp UUID=%s\n", | |
741 | dev->volume, nbuf + 5, i, nbuf1 + 5); | |
742 | } | |
743 | } | |
744 | ||
745 | static void export_examine_super_imsm(struct supertype *st) | |
746 | { | |
747 | struct intel_super *super = st->sb; | |
748 | struct imsm_super *mpb = super->anchor; | |
749 | struct mdinfo info; | |
750 | char nbuf[64]; | |
751 | ||
752 | getinfo_super_imsm(st, &info); | |
753 | fname_from_uuid(st, &info, nbuf, ':'); | |
754 | printf("MD_METADATA=imsm\n"); | |
755 | printf("MD_LEVEL=container\n"); | |
756 | printf("MD_UUID=%s\n", nbuf+5); | |
757 | printf("MD_DEVICES=%u\n", mpb->num_disks); | |
758 | } | |
759 | ||
760 | static void detail_super_imsm(struct supertype *st, char *homehost) | |
761 | { | |
762 | struct mdinfo info; | |
763 | char nbuf[64]; | |
764 | ||
765 | getinfo_super_imsm(st, &info); | |
766 | fname_from_uuid(st, &info, nbuf, ':'); | |
767 | printf("\n UUID : %s\n", nbuf + 5); | |
768 | } | |
769 | ||
770 | static void brief_detail_super_imsm(struct supertype *st) | |
771 | { | |
772 | struct mdinfo info; | |
773 | char nbuf[64]; | |
774 | getinfo_super_imsm(st, &info); | |
775 | fname_from_uuid(st, &info, nbuf, ':'); | |
776 | printf(" UUID=%s", nbuf + 5); | |
777 | } | |
778 | ||
779 | static int imsm_read_serial(int fd, char *devname, __u8 *serial); | |
780 | static void fd2devname(int fd, char *name); | |
781 | ||
782 | static int imsm_enumerate_ports(const char *hba_path, int port_count, int host_base, int verbose) | |
783 | { | |
784 | /* dump an unsorted list of devices attached to ahci, as well as | |
785 | * non-connected ports | |
786 | */ | |
787 | int hba_len = strlen(hba_path) + 1; | |
788 | struct dirent *ent; | |
789 | DIR *dir; | |
790 | char *path = NULL; | |
791 | int err = 0; | |
792 | unsigned long port_mask = (1 << port_count) - 1; | |
793 | ||
794 | if (port_count > sizeof(port_mask) * 8) { | |
795 | if (verbose) | |
796 | fprintf(stderr, Name ": port_count %d out of range\n", port_count); | |
797 | return 2; | |
798 | } | |
799 | ||
800 | /* scroll through /sys/dev/block looking for devices attached to | |
801 | * this hba | |
802 | */ | |
803 | dir = opendir("/sys/dev/block"); | |
804 | for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) { | |
805 | int fd; | |
806 | char model[64]; | |
807 | char vendor[64]; | |
808 | char buf[1024]; | |
809 | int major, minor; | |
810 | char *device; | |
811 | char *c; | |
812 | int port; | |
813 | int type; | |
814 | ||
815 | if (sscanf(ent->d_name, "%d:%d", &major, &minor) != 2) | |
816 | continue; | |
817 | path = devt_to_devpath(makedev(major, minor)); | |
818 | if (!path) | |
819 | continue; | |
820 | if (!path_attached_to_hba(path, hba_path)) { | |
821 | free(path); | |
822 | path = NULL; | |
823 | continue; | |
824 | } | |
825 | ||
826 | /* retrieve the scsi device type */ | |
827 | if (asprintf(&device, "/sys/dev/block/%d:%d/device/xxxxxxx", major, minor) < 0) { | |
828 | if (verbose) | |
829 | fprintf(stderr, Name ": failed to allocate 'device'\n"); | |
830 | err = 2; | |
831 | break; | |
832 | } | |
833 | sprintf(device, "/sys/dev/block/%d:%d/device/type", major, minor); | |
834 | if (load_sys(device, buf) != 0) { | |
835 | if (verbose) | |
836 | fprintf(stderr, Name ": failed to read device type for %s\n", | |
837 | path); | |
838 | err = 2; | |
839 | free(device); | |
840 | break; | |
841 | } | |
842 | type = strtoul(buf, NULL, 10); | |
843 | ||
844 | /* if it's not a disk print the vendor and model */ | |
845 | if (!(type == 0 || type == 7 || type == 14)) { | |
846 | vendor[0] = '\0'; | |
847 | model[0] = '\0'; | |
848 | sprintf(device, "/sys/dev/block/%d:%d/device/vendor", major, minor); | |
849 | if (load_sys(device, buf) == 0) { | |
850 | strncpy(vendor, buf, sizeof(vendor)); | |
851 | vendor[sizeof(vendor) - 1] = '\0'; | |
852 | c = (char *) &vendor[sizeof(vendor) - 1]; | |
853 | while (isspace(*c) || *c == '\0') | |
854 | *c-- = '\0'; | |
855 | ||
856 | } | |
857 | sprintf(device, "/sys/dev/block/%d:%d/device/model", major, minor); | |
858 | if (load_sys(device, buf) == 0) { | |
859 | strncpy(model, buf, sizeof(model)); | |
860 | model[sizeof(model) - 1] = '\0'; | |
861 | c = (char *) &model[sizeof(model) - 1]; | |
862 | while (isspace(*c) || *c == '\0') | |
863 | *c-- = '\0'; | |
864 | } | |
865 | ||
866 | if (vendor[0] && model[0]) | |
867 | sprintf(buf, "%.64s %.64s", vendor, model); | |
868 | else | |
869 | switch (type) { /* numbers from hald/linux/device.c */ | |
870 | case 1: sprintf(buf, "tape"); break; | |
871 | case 2: sprintf(buf, "printer"); break; | |
872 | case 3: sprintf(buf, "processor"); break; | |
873 | case 4: | |
874 | case 5: sprintf(buf, "cdrom"); break; | |
875 | case 6: sprintf(buf, "scanner"); break; | |
876 | case 8: sprintf(buf, "media_changer"); break; | |
877 | case 9: sprintf(buf, "comm"); break; | |
878 | case 12: sprintf(buf, "raid"); break; | |
879 | default: sprintf(buf, "unknown"); | |
880 | } | |
881 | } else | |
882 | buf[0] = '\0'; | |
883 | free(device); | |
884 | ||
885 | /* chop device path to 'host%d' and calculate the port number */ | |
886 | c = strchr(&path[hba_len], '/'); | |
887 | *c = '\0'; | |
888 | if (sscanf(&path[hba_len], "host%d", &port) == 1) | |
889 | port -= host_base; | |
890 | else { | |
891 | if (verbose) { | |
892 | *c = '/'; /* repair the full string */ | |
893 | fprintf(stderr, Name ": failed to determine port number for %s\n", | |
894 | path); | |
895 | } | |
896 | err = 2; | |
897 | break; | |
898 | } | |
899 | ||
900 | /* mark this port as used */ | |
901 | port_mask &= ~(1 << port); | |
902 | ||
903 | /* print out the device information */ | |
904 | if (buf[0]) { | |
905 | printf(" Port%d : - non-disk device (%s) -\n", port, buf); | |
906 | continue; | |
907 | } | |
908 | ||
909 | fd = dev_open(ent->d_name, O_RDONLY); | |
910 | if (fd < 0) | |
911 | printf(" Port%d : - disk info unavailable -\n", port); | |
912 | else { | |
913 | fd2devname(fd, buf); | |
914 | printf(" Port%d : %s", port, buf); | |
915 | if (imsm_read_serial(fd, NULL, (__u8 *) buf) == 0) | |
916 | printf(" (%s)\n", buf); | |
917 | else | |
918 | printf("()\n"); | |
919 | } | |
920 | close(fd); | |
921 | free(path); | |
922 | path = NULL; | |
923 | } | |
924 | if (path) | |
925 | free(path); | |
926 | if (dir) | |
927 | closedir(dir); | |
928 | if (err == 0) { | |
929 | int i; | |
930 | ||
931 | for (i = 0; i < port_count; i++) | |
932 | if (port_mask & (1 << i)) | |
933 | printf(" Port%d : - no device attached -\n", i); | |
934 | } | |
935 | ||
936 | return err; | |
937 | } | |
938 | ||
939 | static int detail_platform_imsm(int verbose, int enumerate_only) | |
940 | { | |
941 | /* There are two components to imsm platform support, the ahci SATA | |
942 | * controller and the option-rom. To find the SATA controller we | |
943 | * simply look in /sys/bus/pci/drivers/ahci to see if an ahci | |
944 | * controller with the Intel vendor id is present. This approach | |
945 | * allows mdadm to leverage the kernel's ahci detection logic, with the | |
946 | * caveat that if ahci.ko is not loaded mdadm will not be able to | |
947 | * detect platform raid capabilities. The option-rom resides in a | |
948 | * platform "Adapter ROM". We scan for its signature to retrieve the | |
949 | * platform capabilities. If raid support is disabled in the BIOS the | |
950 | * option-rom capability structure will not be available. | |
951 | */ | |
952 | const struct imsm_orom *orom; | |
953 | struct sys_dev *list, *hba; | |
954 | DIR *dir; | |
955 | struct dirent *ent; | |
956 | const char *hba_path; | |
957 | int host_base = 0; | |
958 | int port_count = 0; | |
959 | ||
960 | if (enumerate_only) { | |
961 | if (check_env("IMSM_NO_PLATFORM") || find_imsm_orom()) | |
962 | return 0; | |
963 | return 2; | |
964 | } | |
965 | ||
966 | list = find_driver_devices("pci", "ahci"); | |
967 | for (hba = list; hba; hba = hba->next) | |
968 | if (devpath_to_vendor(hba->path) == 0x8086) | |
969 | break; | |
970 | ||
971 | if (!hba) { | |
972 | if (verbose) | |
973 | fprintf(stderr, Name ": unable to find active ahci controller\n"); | |
974 | free_sys_dev(&list); | |
975 | return 2; | |
976 | } else if (verbose) | |
977 | fprintf(stderr, Name ": found Intel SATA AHCI Controller\n"); | |
978 | hba_path = hba->path; | |
979 | hba->path = NULL; | |
980 | free_sys_dev(&list); | |
981 | ||
982 | orom = find_imsm_orom(); | |
983 | if (!orom) { | |
984 | if (verbose) | |
985 | fprintf(stderr, Name ": imsm option-rom not found\n"); | |
986 | return 2; | |
987 | } | |
988 | ||
989 | printf(" Platform : Intel(R) Matrix Storage Manager\n"); | |
990 | printf(" Version : %d.%d.%d.%d\n", orom->major_ver, orom->minor_ver, | |
991 | orom->hotfix_ver, orom->build); | |
992 | printf(" RAID Levels :%s%s%s%s%s\n", | |
993 | imsm_orom_has_raid0(orom) ? " raid0" : "", | |
994 | imsm_orom_has_raid1(orom) ? " raid1" : "", | |
995 | imsm_orom_has_raid1e(orom) ? " raid1e" : "", | |
996 | imsm_orom_has_raid10(orom) ? " raid10" : "", | |
997 | imsm_orom_has_raid5(orom) ? " raid5" : ""); | |
998 | printf(" Chunk Sizes :%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", | |
999 | imsm_orom_has_chunk(orom, 2) ? " 2k" : "", | |
1000 | imsm_orom_has_chunk(orom, 4) ? " 4k" : "", | |
1001 | imsm_orom_has_chunk(orom, 8) ? " 8k" : "", | |
1002 | imsm_orom_has_chunk(orom, 16) ? " 16k" : "", | |
1003 | imsm_orom_has_chunk(orom, 32) ? " 32k" : "", | |
1004 | imsm_orom_has_chunk(orom, 64) ? " 64k" : "", | |
1005 | imsm_orom_has_chunk(orom, 128) ? " 128k" : "", | |
1006 | imsm_orom_has_chunk(orom, 256) ? " 256k" : "", | |
1007 | imsm_orom_has_chunk(orom, 512) ? " 512k" : "", | |
1008 | imsm_orom_has_chunk(orom, 1024*1) ? " 1M" : "", | |
1009 | imsm_orom_has_chunk(orom, 1024*2) ? " 2M" : "", | |
1010 | imsm_orom_has_chunk(orom, 1024*4) ? " 4M" : "", | |
1011 | imsm_orom_has_chunk(orom, 1024*8) ? " 8M" : "", | |
1012 | imsm_orom_has_chunk(orom, 1024*16) ? " 16M" : "", | |
1013 | imsm_orom_has_chunk(orom, 1024*32) ? " 32M" : "", | |
1014 | imsm_orom_has_chunk(orom, 1024*64) ? " 64M" : ""); | |
1015 | printf(" Max Disks : %d\n", orom->tds); | |
1016 | printf(" Max Volumes : %d\n", orom->vpa); | |
1017 | printf(" I/O Controller : %s\n", hba_path); | |
1018 | ||
1019 | /* find the smallest scsi host number to determine a port number base */ | |
1020 | dir = opendir(hba_path); | |
1021 | for (ent = dir ? readdir(dir) : NULL; ent; ent = readdir(dir)) { | |
1022 | int host; | |
1023 | ||
1024 | if (sscanf(ent->d_name, "host%d", &host) != 1) | |
1025 | continue; | |
1026 | if (port_count == 0) | |
1027 | host_base = host; | |
1028 | else if (host < host_base) | |
1029 | host_base = host; | |
1030 | ||
1031 | if (host + 1 > port_count + host_base) | |
1032 | port_count = host + 1 - host_base; | |
1033 | ||
1034 | } | |
1035 | if (dir) | |
1036 | closedir(dir); | |
1037 | ||
1038 | if (!port_count || imsm_enumerate_ports(hba_path, port_count, | |
1039 | host_base, verbose) != 0) { | |
1040 | if (verbose) | |
1041 | fprintf(stderr, Name ": failed to enumerate ports\n"); | |
1042 | return 2; | |
1043 | } | |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | #endif | |
1048 | ||
1049 | static int match_home_imsm(struct supertype *st, char *homehost) | |
1050 | { | |
1051 | /* the imsm metadata format does not specify any host | |
1052 | * identification information. We return -1 since we can never | |
1053 | * confirm nor deny whether a given array is "meant" for this | |
1054 | * host. We rely on compare_super and the 'family_num' field to | |
1055 | * exclude member disks that do not belong, and we rely on | |
1056 | * mdadm.conf to specify the arrays that should be assembled. | |
1057 | * Auto-assembly may still pick up "foreign" arrays. | |
1058 | */ | |
1059 | ||
1060 | return -1; | |
1061 | } | |
1062 | ||
1063 | static void uuid_from_super_imsm(struct supertype *st, int uuid[4]) | |
1064 | { | |
1065 | /* The uuid returned here is used for: | |
1066 | * uuid to put into bitmap file (Create, Grow) | |
1067 | * uuid for backup header when saving critical section (Grow) | |
1068 | * comparing uuids when re-adding a device into an array | |
1069 | * In these cases the uuid required is that of the data-array, | |
1070 | * not the device-set. | |
1071 | * uuid to recognise same set when adding a missing device back | |
1072 | * to an array. This is a uuid for the device-set. | |
1073 | * | |
1074 | * For each of these we can make do with a truncated | |
1075 | * or hashed uuid rather than the original, as long as | |
1076 | * everyone agrees. | |
1077 | * In each case the uuid required is that of the data-array, | |
1078 | * not the device-set. | |
1079 | */ | |
1080 | /* imsm does not track uuid's so we synthesis one using sha1 on | |
1081 | * - The signature (Which is constant for all imsm array, but no matter) | |
1082 | * - the family_num of the container | |
1083 | * - the index number of the volume | |
1084 | * - the 'serial' number of the volume. | |
1085 | * Hopefully these are all constant. | |
1086 | */ | |
1087 | struct intel_super *super = st->sb; | |
1088 | ||
1089 | char buf[20]; | |
1090 | struct sha1_ctx ctx; | |
1091 | struct imsm_dev *dev = NULL; | |
1092 | ||
1093 | sha1_init_ctx(&ctx); | |
1094 | sha1_process_bytes(super->anchor->sig, MPB_SIG_LEN, &ctx); | |
1095 | sha1_process_bytes(&super->anchor->family_num, sizeof(__u32), &ctx); | |
1096 | if (super->current_vol >= 0) | |
1097 | dev = get_imsm_dev(super, super->current_vol); | |
1098 | if (dev) { | |
1099 | __u32 vol = super->current_vol; | |
1100 | sha1_process_bytes(&vol, sizeof(vol), &ctx); | |
1101 | sha1_process_bytes(dev->volume, MAX_RAID_SERIAL_LEN, &ctx); | |
1102 | } | |
1103 | sha1_finish_ctx(&ctx, buf); | |
1104 | memcpy(uuid, buf, 4*4); | |
1105 | } | |
1106 | ||
1107 | #if 0 | |
1108 | static void | |
1109 | get_imsm_numerical_version(struct imsm_super *mpb, int *m, int *p) | |
1110 | { | |
1111 | __u8 *v = get_imsm_version(mpb); | |
1112 | __u8 *end = mpb->sig + MAX_SIGNATURE_LENGTH; | |
1113 | char major[] = { 0, 0, 0 }; | |
1114 | char minor[] = { 0 ,0, 0 }; | |
1115 | char patch[] = { 0, 0, 0 }; | |
1116 | char *ver_parse[] = { major, minor, patch }; | |
1117 | int i, j; | |
1118 | ||
1119 | i = j = 0; | |
1120 | while (*v != '\0' && v < end) { | |
1121 | if (*v != '.' && j < 2) | |
1122 | ver_parse[i][j++] = *v; | |
1123 | else { | |
1124 | i++; | |
1125 | j = 0; | |
1126 | } | |
1127 | v++; | |
1128 | } | |
1129 | ||
1130 | *m = strtol(minor, NULL, 0); | |
1131 | *p = strtol(patch, NULL, 0); | |
1132 | } | |
1133 | #endif | |
1134 | ||
1135 | static int imsm_level_to_layout(int level) | |
1136 | { | |
1137 | switch (level) { | |
1138 | case 0: | |
1139 | case 1: | |
1140 | return 0; | |
1141 | case 5: | |
1142 | case 6: | |
1143 | return ALGORITHM_LEFT_ASYMMETRIC; | |
1144 | case 10: | |
1145 | return 0x102; | |
1146 | } | |
1147 | return UnSet; | |
1148 | } | |
1149 | ||
1150 | static void getinfo_super_imsm_volume(struct supertype *st, struct mdinfo *info) | |
1151 | { | |
1152 | struct intel_super *super = st->sb; | |
1153 | struct imsm_dev *dev = get_imsm_dev(super, super->current_vol); | |
1154 | struct imsm_map *map = get_imsm_map(dev, 0); | |
1155 | struct dl *dl; | |
1156 | ||
1157 | for (dl = super->disks; dl; dl = dl->next) | |
1158 | if (dl->raiddisk == info->disk.raid_disk) | |
1159 | break; | |
1160 | info->container_member = super->current_vol; | |
1161 | info->array.raid_disks = map->num_members; | |
1162 | info->array.level = get_imsm_raid_level(map); | |
1163 | info->array.layout = imsm_level_to_layout(info->array.level); | |
1164 | info->array.md_minor = -1; | |
1165 | info->array.ctime = 0; | |
1166 | info->array.utime = 0; | |
1167 | info->array.chunk_size = __le16_to_cpu(map->blocks_per_strip) << 9; | |
1168 | info->array.state = !dev->vol.dirty; | |
1169 | ||
1170 | info->disk.major = 0; | |
1171 | info->disk.minor = 0; | |
1172 | if (dl) { | |
1173 | info->disk.major = dl->major; | |
1174 | info->disk.minor = dl->minor; | |
1175 | } | |
1176 | ||
1177 | info->data_offset = __le32_to_cpu(map->pba_of_lba0); | |
1178 | info->component_size = __le32_to_cpu(map->blocks_per_member); | |
1179 | memset(info->uuid, 0, sizeof(info->uuid)); | |
1180 | ||
1181 | if (map->map_state == IMSM_T_STATE_UNINITIALIZED || dev->vol.dirty) | |
1182 | info->resync_start = 0; | |
1183 | else if (dev->vol.migr_state) | |
1184 | info->resync_start = __le32_to_cpu(dev->vol.curr_migr_unit); | |
1185 | else | |
1186 | info->resync_start = ~0ULL; | |
1187 | ||
1188 | strncpy(info->name, (char *) dev->volume, MAX_RAID_SERIAL_LEN); | |
1189 | info->name[MAX_RAID_SERIAL_LEN] = 0; | |
1190 | ||
1191 | info->array.major_version = -1; | |
1192 | info->array.minor_version = -2; | |
1193 | sprintf(info->text_version, "/%s/%d", | |
1194 | devnum2devname(st->container_dev), | |
1195 | info->container_member); | |
1196 | info->safe_mode_delay = 4000; /* 4 secs like the Matrix driver */ | |
1197 | uuid_from_super_imsm(st, info->uuid); | |
1198 | } | |
1199 | ||
1200 | /* check the config file to see if we can return a real uuid for this spare */ | |
1201 | static void fixup_container_spare_uuid(struct mdinfo *inf) | |
1202 | { | |
1203 | struct mddev_ident_s *array_list; | |
1204 | ||
1205 | if (inf->array.level != LEVEL_CONTAINER || | |
1206 | memcmp(inf->uuid, uuid_match_any, sizeof(int[4])) != 0) | |
1207 | return; | |
1208 | ||
1209 | array_list = conf_get_ident(NULL); | |
1210 | ||
1211 | for (; array_list; array_list = array_list->next) { | |
1212 | if (array_list->uuid_set) { | |
1213 | struct supertype *_sst; /* spare supertype */ | |
1214 | struct supertype *_cst; /* container supertype */ | |
1215 | ||
1216 | _cst = array_list->st; | |
1217 | _sst = _cst->ss->match_metadata_desc(inf->text_version); | |
1218 | if (_sst) { | |
1219 | memcpy(inf->uuid, array_list->uuid, sizeof(int[4])); | |
1220 | free(_sst); | |
1221 | break; | |
1222 | } | |
1223 | } | |
1224 | } | |
1225 | } | |
1226 | ||
1227 | static void getinfo_super_imsm(struct supertype *st, struct mdinfo *info) | |
1228 | { | |
1229 | struct intel_super *super = st->sb; | |
1230 | struct imsm_disk *disk; | |
1231 | __u32 s; | |
1232 | ||
1233 | if (super->current_vol >= 0) { | |
1234 | getinfo_super_imsm_volume(st, info); | |
1235 | return; | |
1236 | } | |
1237 | ||
1238 | /* Set raid_disks to zero so that Assemble will always pull in valid | |
1239 | * spares | |
1240 | */ | |
1241 | info->array.raid_disks = 0; | |
1242 | info->array.level = LEVEL_CONTAINER; | |
1243 | info->array.layout = 0; | |
1244 | info->array.md_minor = -1; | |
1245 | info->array.ctime = 0; /* N/A for imsm */ | |
1246 | info->array.utime = 0; | |
1247 | info->array.chunk_size = 0; | |
1248 | ||
1249 | info->disk.major = 0; | |
1250 | info->disk.minor = 0; | |
1251 | info->disk.raid_disk = -1; | |
1252 | info->reshape_active = 0; | |
1253 | info->array.major_version = -1; | |
1254 | info->array.minor_version = -2; | |
1255 | strcpy(info->text_version, "imsm"); | |
1256 | info->safe_mode_delay = 0; | |
1257 | info->disk.number = -1; | |
1258 | info->disk.state = 0; | |
1259 | info->name[0] = 0; | |
1260 | ||
1261 | if (super->disks) { | |
1262 | __u32 reserved = imsm_reserved_sectors(super, super->disks); | |
1263 | ||
1264 | disk = &super->disks->disk; | |
1265 | info->data_offset = __le32_to_cpu(disk->total_blocks) - reserved; | |
1266 | info->component_size = reserved; | |
1267 | s = disk->status; | |
1268 | info->disk.state = s & CONFIGURED_DISK ? (1 << MD_DISK_ACTIVE) : 0; | |
1269 | /* we don't change info->disk.raid_disk here because | |
1270 | * this state will be finalized in mdmon after we have | |
1271 | * found the 'most fresh' version of the metadata | |
1272 | */ | |
1273 | info->disk.state |= s & FAILED_DISK ? (1 << MD_DISK_FAULTY) : 0; | |
1274 | info->disk.state |= s & SPARE_DISK ? 0 : (1 << MD_DISK_SYNC); | |
1275 | } | |
1276 | ||
1277 | /* only call uuid_from_super_imsm when this disk is part of a populated container, | |
1278 | * ->compare_super may have updated the 'num_raid_devs' field for spares | |
1279 | */ | |
1280 | if (info->disk.state & (1 << MD_DISK_SYNC) || super->anchor->num_raid_devs) | |
1281 | uuid_from_super_imsm(st, info->uuid); | |
1282 | else { | |
1283 | memcpy(info->uuid, uuid_match_any, sizeof(int[4])); | |
1284 | fixup_container_spare_uuid(info); | |
1285 | } | |
1286 | } | |
1287 | ||
1288 | static int update_super_imsm(struct supertype *st, struct mdinfo *info, | |
1289 | char *update, char *devname, int verbose, | |
1290 | int uuid_set, char *homehost) | |
1291 | { | |
1292 | /* FIXME */ | |
1293 | ||
1294 | /* For 'assemble' and 'force' we need to return non-zero if any | |
1295 | * change was made. For others, the return value is ignored. | |
1296 | * Update options are: | |
1297 | * force-one : This device looks a bit old but needs to be included, | |
1298 | * update age info appropriately. | |
1299 | * assemble: clear any 'faulty' flag to allow this device to | |
1300 | * be assembled. | |
1301 | * force-array: Array is degraded but being forced, mark it clean | |
1302 | * if that will be needed to assemble it. | |
1303 | * | |
1304 | * newdev: not used ???? | |
1305 | * grow: Array has gained a new device - this is currently for | |
1306 | * linear only | |
1307 | * resync: mark as dirty so a resync will happen. | |
1308 | * name: update the name - preserving the homehost | |
1309 | * | |
1310 | * Following are not relevant for this imsm: | |
1311 | * sparc2.2 : update from old dodgey metadata | |
1312 | * super-minor: change the preferred_minor number | |
1313 | * summaries: update redundant counters. | |
1314 | * uuid: Change the uuid of the array to match watch is given | |
1315 | * homehost: update the recorded homehost | |
1316 | * _reshape_progress: record new reshape_progress position. | |
1317 | */ | |
1318 | int rv = 0; | |
1319 | //struct intel_super *super = st->sb; | |
1320 | //struct imsm_super *mpb = super->mpb; | |
1321 | ||
1322 | if (strcmp(update, "grow") == 0) { | |
1323 | } | |
1324 | if (strcmp(update, "resync") == 0) { | |
1325 | /* dev->vol.dirty = 1; */ | |
1326 | } | |
1327 | ||
1328 | /* IMSM has no concept of UUID or homehost */ | |
1329 | ||
1330 | return rv; | |
1331 | } | |
1332 | ||
1333 | static size_t disks_to_mpb_size(int disks) | |
1334 | { | |
1335 | size_t size; | |
1336 | ||
1337 | size = sizeof(struct imsm_super); | |
1338 | size += (disks - 1) * sizeof(struct imsm_disk); | |
1339 | size += 2 * sizeof(struct imsm_dev); | |
1340 | /* up to 2 maps per raid device (-2 for imsm_maps in imsm_dev */ | |
1341 | size += (4 - 2) * sizeof(struct imsm_map); | |
1342 | /* 4 possible disk_ord_tbl's */ | |
1343 | size += 4 * (disks - 1) * sizeof(__u32); | |
1344 | ||
1345 | return size; | |
1346 | } | |
1347 | ||
1348 | static __u64 avail_size_imsm(struct supertype *st, __u64 devsize) | |
1349 | { | |
1350 | if (devsize < (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS)) | |
1351 | return 0; | |
1352 | ||
1353 | return devsize - (MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS); | |
1354 | } | |
1355 | ||
1356 | static void free_devlist(struct intel_super *super) | |
1357 | { | |
1358 | struct intel_dev *dv; | |
1359 | ||
1360 | while (super->devlist) { | |
1361 | dv = super->devlist->next; | |
1362 | free(super->devlist->dev); | |
1363 | free(super->devlist); | |
1364 | super->devlist = dv; | |
1365 | } | |
1366 | } | |
1367 | ||
1368 | static void imsm_copy_dev(struct imsm_dev *dest, struct imsm_dev *src) | |
1369 | { | |
1370 | memcpy(dest, src, sizeof_imsm_dev(src, 0)); | |
1371 | } | |
1372 | ||
1373 | static int compare_super_imsm(struct supertype *st, struct supertype *tst) | |
1374 | { | |
1375 | /* | |
1376 | * return: | |
1377 | * 0 same, or first was empty, and second was copied | |
1378 | * 1 second had wrong number | |
1379 | * 2 wrong uuid | |
1380 | * 3 wrong other info | |
1381 | */ | |
1382 | struct intel_super *first = st->sb; | |
1383 | struct intel_super *sec = tst->sb; | |
1384 | ||
1385 | if (!first) { | |
1386 | st->sb = tst->sb; | |
1387 | tst->sb = NULL; | |
1388 | return 0; | |
1389 | } | |
1390 | ||
1391 | if (memcmp(first->anchor->sig, sec->anchor->sig, MAX_SIGNATURE_LENGTH) != 0) | |
1392 | return 3; | |
1393 | ||
1394 | /* if an anchor does not have num_raid_devs set then it is a free | |
1395 | * floating spare | |
1396 | */ | |
1397 | if (first->anchor->num_raid_devs > 0 && | |
1398 | sec->anchor->num_raid_devs > 0) { | |
1399 | if (first->anchor->family_num != sec->anchor->family_num) | |
1400 | return 3; | |
1401 | } | |
1402 | ||
1403 | /* if 'first' is a spare promote it to a populated mpb with sec's | |
1404 | * family number | |
1405 | */ | |
1406 | if (first->anchor->num_raid_devs == 0 && | |
1407 | sec->anchor->num_raid_devs > 0) { | |
1408 | int i; | |
1409 | struct intel_dev *dv; | |
1410 | struct imsm_dev *dev; | |
1411 | ||
1412 | /* we need to copy raid device info from sec if an allocation | |
1413 | * fails here we don't associate the spare | |
1414 | */ | |
1415 | for (i = 0; i < sec->anchor->num_raid_devs; i++) { | |
1416 | dv = malloc(sizeof(*dv)); | |
1417 | if (!dv) | |
1418 | break; | |
1419 | dev = malloc(sizeof_imsm_dev(get_imsm_dev(sec, i), 1)); | |
1420 | if (!dev) { | |
1421 | free(dv); | |
1422 | break; | |
1423 | } | |
1424 | dv->dev = dev; | |
1425 | dv->index = i; | |
1426 | dv->next = first->devlist; | |
1427 | first->devlist = dv; | |
1428 | } | |
1429 | if (i <= sec->anchor->num_raid_devs) { | |
1430 | /* allocation failure */ | |
1431 | free_devlist(first); | |
1432 | fprintf(stderr, "imsm: failed to associate spare\n"); | |
1433 | return 3; | |
1434 | } | |
1435 | for (i = 0; i < sec->anchor->num_raid_devs; i++) | |
1436 | imsm_copy_dev(get_imsm_dev(first, i), get_imsm_dev(sec, i)); | |
1437 | ||
1438 | first->anchor->num_raid_devs = sec->anchor->num_raid_devs; | |
1439 | first->anchor->family_num = sec->anchor->family_num; | |
1440 | } | |
1441 | ||
1442 | return 0; | |
1443 | } | |
1444 | ||
1445 | static void fd2devname(int fd, char *name) | |
1446 | { | |
1447 | struct stat st; | |
1448 | char path[256]; | |
1449 | char dname[100]; | |
1450 | char *nm; | |
1451 | int rv; | |
1452 | ||
1453 | name[0] = '\0'; | |
1454 | if (fstat(fd, &st) != 0) | |
1455 | return; | |
1456 | sprintf(path, "/sys/dev/block/%d:%d", | |
1457 | major(st.st_rdev), minor(st.st_rdev)); | |
1458 | ||
1459 | rv = readlink(path, dname, sizeof(dname)); | |
1460 | if (rv <= 0) | |
1461 | return; | |
1462 | ||
1463 | dname[rv] = '\0'; | |
1464 | nm = strrchr(dname, '/'); | |
1465 | nm++; | |
1466 | snprintf(name, MAX_RAID_SERIAL_LEN, "/dev/%s", nm); | |
1467 | } | |
1468 | ||
1469 | ||
1470 | extern int scsi_get_serial(int fd, void *buf, size_t buf_len); | |
1471 | ||
1472 | static int imsm_read_serial(int fd, char *devname, | |
1473 | __u8 serial[MAX_RAID_SERIAL_LEN]) | |
1474 | { | |
1475 | unsigned char scsi_serial[255]; | |
1476 | int rv; | |
1477 | int rsp_len; | |
1478 | int len; | |
1479 | char *dest; | |
1480 | char *src; | |
1481 | char *rsp_buf; | |
1482 | int i; | |
1483 | ||
1484 | memset(scsi_serial, 0, sizeof(scsi_serial)); | |
1485 | ||
1486 | rv = scsi_get_serial(fd, scsi_serial, sizeof(scsi_serial)); | |
1487 | ||
1488 | if (rv && check_env("IMSM_DEVNAME_AS_SERIAL")) { | |
1489 | memset(serial, 0, MAX_RAID_SERIAL_LEN); | |
1490 | fd2devname(fd, (char *) serial); | |
1491 | return 0; | |
1492 | } | |
1493 | ||
1494 | if (rv != 0) { | |
1495 | if (devname) | |
1496 | fprintf(stderr, | |
1497 | Name ": Failed to retrieve serial for %s\n", | |
1498 | devname); | |
1499 | return rv; | |
1500 | } | |
1501 | ||
1502 | rsp_len = scsi_serial[3]; | |
1503 | if (!rsp_len) { | |
1504 | if (devname) | |
1505 | fprintf(stderr, | |
1506 | Name ": Failed to retrieve serial for %s\n", | |
1507 | devname); | |
1508 | return 2; | |
1509 | } | |
1510 | rsp_buf = (char *) &scsi_serial[4]; | |
1511 | ||
1512 | /* trim all whitespace and non-printable characters and convert | |
1513 | * ':' to ';' | |
1514 | */ | |
1515 | for (i = 0, dest = rsp_buf; i < rsp_len; i++) { | |
1516 | src = &rsp_buf[i]; | |
1517 | if (*src > 0x20) { | |
1518 | /* ':' is reserved for use in placeholder serial | |
1519 | * numbers for missing disks | |
1520 | */ | |
1521 | if (*src == ':') | |
1522 | *dest++ = ';'; | |
1523 | else | |
1524 | *dest++ = *src; | |
1525 | } | |
1526 | } | |
1527 | len = dest - rsp_buf; | |
1528 | dest = rsp_buf; | |
1529 | ||
1530 | /* truncate leading characters */ | |
1531 | if (len > MAX_RAID_SERIAL_LEN) { | |
1532 | dest += len - MAX_RAID_SERIAL_LEN; | |
1533 | len = MAX_RAID_SERIAL_LEN; | |
1534 | } | |
1535 | ||
1536 | memset(serial, 0, MAX_RAID_SERIAL_LEN); | |
1537 | memcpy(serial, dest, len); | |
1538 | ||
1539 | return 0; | |
1540 | } | |
1541 | ||
1542 | static int serialcmp(__u8 *s1, __u8 *s2) | |
1543 | { | |
1544 | return strncmp((char *) s1, (char *) s2, MAX_RAID_SERIAL_LEN); | |
1545 | } | |
1546 | ||
1547 | static void serialcpy(__u8 *dest, __u8 *src) | |
1548 | { | |
1549 | strncpy((char *) dest, (char *) src, MAX_RAID_SERIAL_LEN); | |
1550 | } | |
1551 | ||
1552 | static struct dl *serial_to_dl(__u8 *serial, struct intel_super *super) | |
1553 | { | |
1554 | struct dl *dl; | |
1555 | ||
1556 | for (dl = super->disks; dl; dl = dl->next) | |
1557 | if (serialcmp(dl->serial, serial) == 0) | |
1558 | break; | |
1559 | ||
1560 | return dl; | |
1561 | } | |
1562 | ||
1563 | static int | |
1564 | load_imsm_disk(int fd, struct intel_super *super, char *devname, int keep_fd) | |
1565 | { | |
1566 | struct dl *dl; | |
1567 | struct stat stb; | |
1568 | int rv; | |
1569 | int i; | |
1570 | int alloc = 1; | |
1571 | __u8 serial[MAX_RAID_SERIAL_LEN]; | |
1572 | ||
1573 | rv = imsm_read_serial(fd, devname, serial); | |
1574 | ||
1575 | if (rv != 0) | |
1576 | return 2; | |
1577 | ||
1578 | /* check if this is a disk we have seen before. it may be a spare in | |
1579 | * super->disks while the current anchor believes it is a raid member, | |
1580 | * check if we need to update dl->index | |
1581 | */ | |
1582 | dl = serial_to_dl(serial, super); | |
1583 | if (!dl) | |
1584 | dl = malloc(sizeof(*dl)); | |
1585 | else | |
1586 | alloc = 0; | |
1587 | ||
1588 | if (!dl) { | |
1589 | if (devname) | |
1590 | fprintf(stderr, | |
1591 | Name ": failed to allocate disk buffer for %s\n", | |
1592 | devname); | |
1593 | return 2; | |
1594 | } | |
1595 | ||
1596 | if (alloc) { | |
1597 | fstat(fd, &stb); | |
1598 | dl->major = major(stb.st_rdev); | |
1599 | dl->minor = minor(stb.st_rdev); | |
1600 | dl->next = super->disks; | |
1601 | dl->fd = keep_fd ? fd : -1; | |
1602 | dl->devname = devname ? strdup(devname) : NULL; | |
1603 | serialcpy(dl->serial, serial); | |
1604 | dl->index = -2; | |
1605 | dl->e = NULL; | |
1606 | } else if (keep_fd) { | |
1607 | close(dl->fd); | |
1608 | dl->fd = fd; | |
1609 | } | |
1610 | ||
1611 | /* look up this disk's index in the current anchor */ | |
1612 | for (i = 0; i < super->anchor->num_disks; i++) { | |
1613 | struct imsm_disk *disk_iter; | |
1614 | ||
1615 | disk_iter = __get_imsm_disk(super->anchor, i); | |
1616 | ||
1617 | if (serialcmp(disk_iter->serial, dl->serial) == 0) { | |
1618 | dl->disk = *disk_iter; | |
1619 | /* only set index on disks that are a member of a | |
1620 | * populated contianer, i.e. one with raid_devs | |
1621 | */ | |
1622 | if (dl->disk.status & FAILED_DISK) | |
1623 | dl->index = -2; | |
1624 | else if (dl->disk.status & SPARE_DISK) | |
1625 | dl->index = -1; | |
1626 | else | |
1627 | dl->index = i; | |
1628 | ||
1629 | break; | |
1630 | } | |
1631 | } | |
1632 | ||
1633 | /* no match, maybe a stale failed drive */ | |
1634 | if (i == super->anchor->num_disks && dl->index >= 0) { | |
1635 | dl->disk = *__get_imsm_disk(super->anchor, dl->index); | |
1636 | if (dl->disk.status & FAILED_DISK) | |
1637 | dl->index = -2; | |
1638 | } | |
1639 | ||
1640 | if (alloc) | |
1641 | super->disks = dl; | |
1642 | ||
1643 | return 0; | |
1644 | } | |
1645 | ||
1646 | #ifndef MDASSEMBLE | |
1647 | /* When migrating map0 contains the 'destination' state while map1 | |
1648 | * contains the current state. When not migrating map0 contains the | |
1649 | * current state. This routine assumes that map[0].map_state is set to | |
1650 | * the current array state before being called. | |
1651 | * | |
1652 | * Migration is indicated by one of the following states | |
1653 | * 1/ Idle (migr_state=0 map0state=normal||unitialized||degraded||failed) | |
1654 | * 2/ Initialize (migr_state=1 migr_type=MIGR_INIT map0state=normal | |
1655 | * map1state=unitialized) | |
1656 | * 3/ Verify (Resync) (migr_state=1 migr_type=MIGR_REBUILD map0state=normal | |
1657 | * map1state=normal) | |
1658 | * 4/ Rebuild (migr_state=1 migr_type=MIGR_REBUILD map0state=normal | |
1659 | * map1state=degraded) | |
1660 | */ | |
1661 | static void migrate(struct imsm_dev *dev, __u8 to_state, int migr_type) | |
1662 | { | |
1663 | struct imsm_map *dest; | |
1664 | struct imsm_map *src = get_imsm_map(dev, 0); | |
1665 | ||
1666 | dev->vol.migr_state = 1; | |
1667 | dev->vol.migr_type = migr_type; | |
1668 | dev->vol.curr_migr_unit = 0; | |
1669 | dest = get_imsm_map(dev, 1); | |
1670 | ||
1671 | /* duplicate and then set the target end state in map[0] */ | |
1672 | memcpy(dest, src, sizeof_imsm_map(src)); | |
1673 | if (migr_type == MIGR_REBUILD) { | |
1674 | __u32 ord; | |
1675 | int i; | |
1676 | ||
1677 | for (i = 0; i < src->num_members; i++) { | |
1678 | ord = __le32_to_cpu(src->disk_ord_tbl[i]); | |
1679 | set_imsm_ord_tbl_ent(src, i, ord_to_idx(ord)); | |
1680 | } | |
1681 | } | |
1682 | ||
1683 | src->map_state = to_state; | |
1684 | } | |
1685 | ||
1686 | static void end_migration(struct imsm_dev *dev, __u8 map_state) | |
1687 | { | |
1688 | struct imsm_map *map = get_imsm_map(dev, 0); | |
1689 | struct imsm_map *prev = get_imsm_map(dev, dev->vol.migr_state); | |
1690 | int i; | |
1691 | ||
1692 | /* merge any IMSM_ORD_REBUILD bits that were not successfully | |
1693 | * completed in the last migration. | |
1694 | * | |
1695 | * FIXME add support for online capacity expansion and | |
1696 | * raid-level-migration | |
1697 | */ | |
1698 | for (i = 0; i < prev->num_members; i++) | |
1699 | map->disk_ord_tbl[i] |= prev->disk_ord_tbl[i]; | |
1700 | ||
1701 | dev->vol.migr_state = 0; | |
1702 | dev->vol.curr_migr_unit = 0; | |
1703 | map->map_state = map_state; | |
1704 | } | |
1705 | #endif | |
1706 | ||
1707 | static int parse_raid_devices(struct intel_super *super) | |
1708 | { | |
1709 | int i; | |
1710 | struct imsm_dev *dev_new; | |
1711 | size_t len, len_migr; | |
1712 | size_t space_needed = 0; | |
1713 | struct imsm_super *mpb = super->anchor; | |
1714 | ||
1715 | for (i = 0; i < super->anchor->num_raid_devs; i++) { | |
1716 | struct imsm_dev *dev_iter = __get_imsm_dev(super->anchor, i); | |
1717 | struct intel_dev *dv; | |
1718 | ||
1719 | len = sizeof_imsm_dev(dev_iter, 0); | |
1720 | len_migr = sizeof_imsm_dev(dev_iter, 1); | |
1721 | if (len_migr > len) | |
1722 | space_needed += len_migr - len; | |
1723 | ||
1724 | dv = malloc(sizeof(*dv)); | |
1725 | if (!dv) | |
1726 | return 1; | |
1727 | dev_new = malloc(len_migr); | |
1728 | if (!dev_new) { | |
1729 | free(dv); | |
1730 | return 1; | |
1731 | } | |
1732 | imsm_copy_dev(dev_new, dev_iter); | |
1733 | dv->dev = dev_new; | |
1734 | dv->index = i; | |
1735 | dv->next = super->devlist; | |
1736 | super->devlist = dv; | |
1737 | } | |
1738 | ||
1739 | /* ensure that super->buf is large enough when all raid devices | |
1740 | * are migrating | |
1741 | */ | |
1742 | if (__le32_to_cpu(mpb->mpb_size) + space_needed > super->len) { | |
1743 | void *buf; | |
1744 | ||
1745 | len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + space_needed, 512); | |
1746 | if (posix_memalign(&buf, 512, len) != 0) | |
1747 | return 1; | |
1748 | ||
1749 | memcpy(buf, super->buf, super->len); | |
1750 | memset(buf + super->len, 0, len - super->len); | |
1751 | free(super->buf); | |
1752 | super->buf = buf; | |
1753 | super->len = len; | |
1754 | } | |
1755 | ||
1756 | return 0; | |
1757 | } | |
1758 | ||
1759 | /* retrieve a pointer to the bbm log which starts after all raid devices */ | |
1760 | struct bbm_log *__get_imsm_bbm_log(struct imsm_super *mpb) | |
1761 | { | |
1762 | void *ptr = NULL; | |
1763 | ||
1764 | if (__le32_to_cpu(mpb->bbm_log_size)) { | |
1765 | ptr = mpb; | |
1766 | ptr += mpb->mpb_size - __le32_to_cpu(mpb->bbm_log_size); | |
1767 | } | |
1768 | ||
1769 | return ptr; | |
1770 | } | |
1771 | ||
1772 | static void __free_imsm(struct intel_super *super, int free_disks); | |
1773 | ||
1774 | /* load_imsm_mpb - read matrix metadata | |
1775 | * allocates super->mpb to be freed by free_super | |
1776 | */ | |
1777 | static int load_imsm_mpb(int fd, struct intel_super *super, char *devname) | |
1778 | { | |
1779 | unsigned long long dsize; | |
1780 | unsigned long long sectors; | |
1781 | struct stat; | |
1782 | struct imsm_super *anchor; | |
1783 | __u32 check_sum; | |
1784 | int rc; | |
1785 | ||
1786 | get_dev_size(fd, NULL, &dsize); | |
1787 | ||
1788 | if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) { | |
1789 | if (devname) | |
1790 | fprintf(stderr, | |
1791 | Name ": Cannot seek to anchor block on %s: %s\n", | |
1792 | devname, strerror(errno)); | |
1793 | return 1; | |
1794 | } | |
1795 | ||
1796 | if (posix_memalign((void**)&anchor, 512, 512) != 0) { | |
1797 | if (devname) | |
1798 | fprintf(stderr, | |
1799 | Name ": Failed to allocate imsm anchor buffer" | |
1800 | " on %s\n", devname); | |
1801 | return 1; | |
1802 | } | |
1803 | if (read(fd, anchor, 512) != 512) { | |
1804 | if (devname) | |
1805 | fprintf(stderr, | |
1806 | Name ": Cannot read anchor block on %s: %s\n", | |
1807 | devname, strerror(errno)); | |
1808 | free(anchor); | |
1809 | return 1; | |
1810 | } | |
1811 | ||
1812 | if (strncmp((char *) anchor->sig, MPB_SIGNATURE, MPB_SIG_LEN) != 0) { | |
1813 | if (devname) | |
1814 | fprintf(stderr, | |
1815 | Name ": no IMSM anchor on %s\n", devname); | |
1816 | free(anchor); | |
1817 | return 2; | |
1818 | } | |
1819 | ||
1820 | __free_imsm(super, 0); | |
1821 | super->len = ROUND_UP(anchor->mpb_size, 512); | |
1822 | if (posix_memalign(&super->buf, 512, super->len) != 0) { | |
1823 | if (devname) | |
1824 | fprintf(stderr, | |
1825 | Name ": unable to allocate %zu byte mpb buffer\n", | |
1826 | super->len); | |
1827 | free(anchor); | |
1828 | return 2; | |
1829 | } | |
1830 | memcpy(super->buf, anchor, 512); | |
1831 | ||
1832 | sectors = mpb_sectors(anchor) - 1; | |
1833 | free(anchor); | |
1834 | if (!sectors) { | |
1835 | check_sum = __gen_imsm_checksum(super->anchor); | |
1836 | if (check_sum != __le32_to_cpu(super->anchor->check_sum)) { | |
1837 | if (devname) | |
1838 | fprintf(stderr, | |
1839 | Name ": IMSM checksum %x != %x on %s\n", | |
1840 | check_sum, | |
1841 | __le32_to_cpu(super->anchor->check_sum), | |
1842 | devname); | |
1843 | return 2; | |
1844 | } | |
1845 | ||
1846 | rc = load_imsm_disk(fd, super, devname, 0); | |
1847 | if (rc == 0) | |
1848 | rc = parse_raid_devices(super); | |
1849 | return rc; | |
1850 | } | |
1851 | ||
1852 | /* read the extended mpb */ | |
1853 | if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0) { | |
1854 | if (devname) | |
1855 | fprintf(stderr, | |
1856 | Name ": Cannot seek to extended mpb on %s: %s\n", | |
1857 | devname, strerror(errno)); | |
1858 | return 1; | |
1859 | } | |
1860 | ||
1861 | if (read(fd, super->buf + 512, super->len - 512) != super->len - 512) { | |
1862 | if (devname) | |
1863 | fprintf(stderr, | |
1864 | Name ": Cannot read extended mpb on %s: %s\n", | |
1865 | devname, strerror(errno)); | |
1866 | return 2; | |
1867 | } | |
1868 | ||
1869 | check_sum = __gen_imsm_checksum(super->anchor); | |
1870 | if (check_sum != __le32_to_cpu(super->anchor->check_sum)) { | |
1871 | if (devname) | |
1872 | fprintf(stderr, | |
1873 | Name ": IMSM checksum %x != %x on %s\n", | |
1874 | check_sum, __le32_to_cpu(super->anchor->check_sum), | |
1875 | devname); | |
1876 | return 3; | |
1877 | } | |
1878 | ||
1879 | /* FIXME the BBM log is disk specific so we cannot use this global | |
1880 | * buffer for all disks. Ok for now since we only look at the global | |
1881 | * bbm_log_size parameter to gate assembly | |
1882 | */ | |
1883 | super->bbm_log = __get_imsm_bbm_log(super->anchor); | |
1884 | ||
1885 | rc = load_imsm_disk(fd, super, devname, 0); | |
1886 | if (rc == 0) | |
1887 | rc = parse_raid_devices(super); | |
1888 | ||
1889 | return rc; | |
1890 | } | |
1891 | ||
1892 | static void __free_imsm_disk(struct dl *d) | |
1893 | { | |
1894 | if (d->fd >= 0) | |
1895 | close(d->fd); | |
1896 | if (d->devname) | |
1897 | free(d->devname); | |
1898 | if (d->e) | |
1899 | free(d->e); | |
1900 | free(d); | |
1901 | ||
1902 | } | |
1903 | static void free_imsm_disks(struct intel_super *super) | |
1904 | { | |
1905 | struct dl *d; | |
1906 | ||
1907 | while (super->disks) { | |
1908 | d = super->disks; | |
1909 | super->disks = d->next; | |
1910 | __free_imsm_disk(d); | |
1911 | } | |
1912 | while (super->missing) { | |
1913 | d = super->missing; | |
1914 | super->missing = d->next; | |
1915 | __free_imsm_disk(d); | |
1916 | } | |
1917 | ||
1918 | } | |
1919 | ||
1920 | /* free all the pieces hanging off of a super pointer */ | |
1921 | static void __free_imsm(struct intel_super *super, int free_disks) | |
1922 | { | |
1923 | if (super->buf) { | |
1924 | free(super->buf); | |
1925 | super->buf = NULL; | |
1926 | } | |
1927 | if (free_disks) | |
1928 | free_imsm_disks(super); | |
1929 | free_devlist(super); | |
1930 | if (super->hba) { | |
1931 | free((void *) super->hba); | |
1932 | super->hba = NULL; | |
1933 | } | |
1934 | } | |
1935 | ||
1936 | static void free_imsm(struct intel_super *super) | |
1937 | { | |
1938 | __free_imsm(super, 1); | |
1939 | free(super); | |
1940 | } | |
1941 | ||
1942 | static void free_super_imsm(struct supertype *st) | |
1943 | { | |
1944 | struct intel_super *super = st->sb; | |
1945 | ||
1946 | if (!super) | |
1947 | return; | |
1948 | ||
1949 | free_imsm(super); | |
1950 | st->sb = NULL; | |
1951 | } | |
1952 | ||
1953 | static struct intel_super *alloc_super(int creating_imsm) | |
1954 | { | |
1955 | struct intel_super *super = malloc(sizeof(*super)); | |
1956 | ||
1957 | if (super) { | |
1958 | memset(super, 0, sizeof(*super)); | |
1959 | super->creating_imsm = creating_imsm; | |
1960 | super->current_vol = -1; | |
1961 | super->create_offset = ~((__u32 ) 0); | |
1962 | if (!check_env("IMSM_NO_PLATFORM")) | |
1963 | super->orom = find_imsm_orom(); | |
1964 | if (super->orom && !check_env("IMSM_TEST_OROM")) { | |
1965 | struct sys_dev *list, *ent; | |
1966 | ||
1967 | /* find the first intel ahci controller */ | |
1968 | list = find_driver_devices("pci", "ahci"); | |
1969 | for (ent = list; ent; ent = ent->next) | |
1970 | if (devpath_to_vendor(ent->path) == 0x8086) | |
1971 | break; | |
1972 | if (ent) { | |
1973 | super->hba = ent->path; | |
1974 | ent->path = NULL; | |
1975 | } | |
1976 | free_sys_dev(&list); | |
1977 | } | |
1978 | } | |
1979 | ||
1980 | return super; | |
1981 | } | |
1982 | ||
1983 | #ifndef MDASSEMBLE | |
1984 | /* find_missing - helper routine for load_super_imsm_all that identifies | |
1985 | * disks that have disappeared from the system. This routine relies on | |
1986 | * the mpb being uptodate, which it is at load time. | |
1987 | */ | |
1988 | static int find_missing(struct intel_super *super) | |
1989 | { | |
1990 | int i; | |
1991 | struct imsm_super *mpb = super->anchor; | |
1992 | struct dl *dl; | |
1993 | struct imsm_disk *disk; | |
1994 | ||
1995 | for (i = 0; i < mpb->num_disks; i++) { | |
1996 | disk = __get_imsm_disk(mpb, i); | |
1997 | dl = serial_to_dl(disk->serial, super); | |
1998 | if (dl) | |
1999 | continue; | |
2000 | ||
2001 | dl = malloc(sizeof(*dl)); | |
2002 | if (!dl) | |
2003 | return 1; | |
2004 | dl->major = 0; | |
2005 | dl->minor = 0; | |
2006 | dl->fd = -1; | |
2007 | dl->devname = strdup("missing"); | |
2008 | dl->index = i; | |
2009 | serialcpy(dl->serial, disk->serial); | |
2010 | dl->disk = *disk; | |
2011 | dl->e = NULL; | |
2012 | dl->next = super->missing; | |
2013 | super->missing = dl; | |
2014 | } | |
2015 | ||
2016 | return 0; | |
2017 | } | |
2018 | ||
2019 | static int load_super_imsm_all(struct supertype *st, int fd, void **sbp, | |
2020 | char *devname, int keep_fd) | |
2021 | { | |
2022 | struct mdinfo *sra; | |
2023 | struct intel_super *super; | |
2024 | struct mdinfo *sd, *best = NULL; | |
2025 | __u32 bestgen = 0; | |
2026 | __u32 gen; | |
2027 | char nm[20]; | |
2028 | int dfd; | |
2029 | int rv; | |
2030 | int devnum = fd2devnum(fd); | |
2031 | int retry; | |
2032 | enum sysfs_read_flags flags; | |
2033 | ||
2034 | flags = GET_LEVEL|GET_VERSION|GET_DEVS|GET_STATE; | |
2035 | if (mdmon_running(devnum)) | |
2036 | flags |= SKIP_GONE_DEVS; | |
2037 | ||
2038 | /* check if 'fd' an opened container */ | |
2039 | sra = sysfs_read(fd, 0, flags); | |
2040 | if (!sra) | |
2041 | return 1; | |
2042 | ||
2043 | if (sra->array.major_version != -1 || | |
2044 | sra->array.minor_version != -2 || | |
2045 | strcmp(sra->text_version, "imsm") != 0) | |
2046 | return 1; | |
2047 | ||
2048 | super = alloc_super(0); | |
2049 | if (!super) | |
2050 | return 1; | |
2051 | ||
2052 | /* find the most up to date disk in this array, skipping spares */ | |
2053 | for (sd = sra->devs; sd; sd = sd->next) { | |
2054 | sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor); | |
2055 | dfd = dev_open(nm, keep_fd ? O_RDWR : O_RDONLY); | |
2056 | if (dfd < 0) { | |
2057 | free_imsm(super); | |
2058 | return 2; | |
2059 | } | |
2060 | rv = load_imsm_mpb(dfd, super, NULL); | |
2061 | ||
2062 | /* retry the load if we might have raced against mdmon */ | |
2063 | if (rv == 3 && mdmon_running(devnum)) | |
2064 | for (retry = 0; retry < 3; retry++) { | |
2065 | usleep(3000); | |
2066 | rv = load_imsm_mpb(dfd, super, NULL); | |
2067 | if (rv != 3) | |
2068 | break; | |
2069 | } | |
2070 | if (!keep_fd) | |
2071 | close(dfd); | |
2072 | if (rv == 0) { | |
2073 | if (super->anchor->num_raid_devs == 0) | |
2074 | gen = 0; | |
2075 | else | |
2076 | gen = __le32_to_cpu(super->anchor->generation_num); | |
2077 | if (!best || gen > bestgen) { | |
2078 | bestgen = gen; | |
2079 | best = sd; | |
2080 | } | |
2081 | } else { | |
2082 | free_imsm(super); | |
2083 | return rv; | |
2084 | } | |
2085 | } | |
2086 | ||
2087 | if (!best) { | |
2088 | free_imsm(super); | |
2089 | return 1; | |
2090 | } | |
2091 | ||
2092 | /* load the most up to date anchor */ | |
2093 | sprintf(nm, "%d:%d", best->disk.major, best->disk.minor); | |
2094 | dfd = dev_open(nm, O_RDONLY); | |
2095 | if (dfd < 0) { | |
2096 | free_imsm(super); | |
2097 | return 1; | |
2098 | } | |
2099 | rv = load_imsm_mpb(dfd, super, NULL); | |
2100 | close(dfd); | |
2101 | if (rv != 0) { | |
2102 | free_imsm(super); | |
2103 | return 2; | |
2104 | } | |
2105 | ||
2106 | /* re-parse the disk list with the current anchor */ | |
2107 | for (sd = sra->devs ; sd ; sd = sd->next) { | |
2108 | sprintf(nm, "%d:%d", sd->disk.major, sd->disk.minor); | |
2109 | dfd = dev_open(nm, keep_fd? O_RDWR : O_RDONLY); | |
2110 | if (dfd < 0) { | |
2111 | free_imsm(super); | |
2112 | return 2; | |
2113 | } | |
2114 | load_imsm_disk(dfd, super, NULL, keep_fd); | |
2115 | if (!keep_fd) | |
2116 | close(dfd); | |
2117 | } | |
2118 | ||
2119 | ||
2120 | if (find_missing(super) != 0) { | |
2121 | free_imsm(super); | |
2122 | return 2; | |
2123 | } | |
2124 | ||
2125 | if (st->subarray[0]) { | |
2126 | if (atoi(st->subarray) <= super->anchor->num_raid_devs) | |
2127 | super->current_vol = atoi(st->subarray); | |
2128 | else | |
2129 | return 1; | |
2130 | } | |
2131 | ||
2132 | *sbp = super; | |
2133 | st->container_dev = devnum; | |
2134 | if (st->ss == NULL) { | |
2135 | st->ss = &super_imsm; | |
2136 | st->minor_version = 0; | |
2137 | st->max_devs = IMSM_MAX_DEVICES; | |
2138 | } | |
2139 | st->loaded_container = 1; | |
2140 | ||
2141 | return 0; | |
2142 | } | |
2143 | #endif | |
2144 | ||
2145 | static int load_super_imsm(struct supertype *st, int fd, char *devname) | |
2146 | { | |
2147 | struct intel_super *super; | |
2148 | int rv; | |
2149 | ||
2150 | #ifndef MDASSEMBLE | |
2151 | if (load_super_imsm_all(st, fd, &st->sb, devname, 1) == 0) | |
2152 | return 0; | |
2153 | #endif | |
2154 | if (st->subarray[0]) | |
2155 | return 1; /* FIXME */ | |
2156 | ||
2157 | super = alloc_super(0); | |
2158 | if (!super) { | |
2159 | fprintf(stderr, | |
2160 | Name ": malloc of %zu failed.\n", | |
2161 | sizeof(*super)); | |
2162 | return 1; | |
2163 | } | |
2164 | ||
2165 | rv = load_imsm_mpb(fd, super, devname); | |
2166 | ||
2167 | if (rv) { | |
2168 | if (devname) | |
2169 | fprintf(stderr, | |
2170 | Name ": Failed to load all information " | |
2171 | "sections on %s\n", devname); | |
2172 | free_imsm(super); | |
2173 | return rv; | |
2174 | } | |
2175 | ||
2176 | st->sb = super; | |
2177 | if (st->ss == NULL) { | |
2178 | st->ss = &super_imsm; | |
2179 | st->minor_version = 0; | |
2180 | st->max_devs = IMSM_MAX_DEVICES; | |
2181 | } | |
2182 | st->loaded_container = 0; | |
2183 | ||
2184 | return 0; | |
2185 | } | |
2186 | ||
2187 | static __u16 info_to_blocks_per_strip(mdu_array_info_t *info) | |
2188 | { | |
2189 | if (info->level == 1) | |
2190 | return 128; | |
2191 | return info->chunk_size >> 9; | |
2192 | } | |
2193 | ||
2194 | static __u32 info_to_num_data_stripes(mdu_array_info_t *info) | |
2195 | { | |
2196 | __u32 num_stripes; | |
2197 | ||
2198 | num_stripes = (info->size * 2) / info_to_blocks_per_strip(info); | |
2199 | if (info->level == 1) | |
2200 | num_stripes /= 2; | |
2201 | ||
2202 | return num_stripes; | |
2203 | } | |
2204 | ||
2205 | static __u32 info_to_blocks_per_member(mdu_array_info_t *info) | |
2206 | { | |
2207 | if (info->level == 1) | |
2208 | return info->size * 2; | |
2209 | else | |
2210 | return (info->size * 2) & ~(info_to_blocks_per_strip(info) - 1); | |
2211 | } | |
2212 | ||
2213 | static void imsm_update_version_info(struct intel_super *super) | |
2214 | { | |
2215 | /* update the version and attributes */ | |
2216 | struct imsm_super *mpb = super->anchor; | |
2217 | char *version; | |
2218 | struct imsm_dev *dev; | |
2219 | struct imsm_map *map; | |
2220 | int i; | |
2221 | ||
2222 | for (i = 0; i < mpb->num_raid_devs; i++) { | |
2223 | dev = get_imsm_dev(super, i); | |
2224 | map = get_imsm_map(dev, 0); | |
2225 | if (__le32_to_cpu(dev->size_high) > 0) | |
2226 | mpb->attributes |= MPB_ATTRIB_2TB; | |
2227 | ||
2228 | /* FIXME detect when an array spans a port multiplier */ | |
2229 | #if 0 | |
2230 | mpb->attributes |= MPB_ATTRIB_PM; | |
2231 | #endif | |
2232 | ||
2233 | if (mpb->num_raid_devs > 1 || | |
2234 | mpb->attributes != MPB_ATTRIB_CHECKSUM_VERIFY) { | |
2235 | version = MPB_VERSION_ATTRIBS; | |
2236 | switch (get_imsm_raid_level(map)) { | |
2237 | case 0: mpb->attributes |= MPB_ATTRIB_RAID0; break; | |
2238 | case 1: mpb->attributes |= MPB_ATTRIB_RAID1; break; | |
2239 | case 10: mpb->attributes |= MPB_ATTRIB_RAID10; break; | |
2240 | case 5: mpb->attributes |= MPB_ATTRIB_RAID5; break; | |
2241 | } | |
2242 | } else { | |
2243 | if (map->num_members >= 5) | |
2244 | version = MPB_VERSION_5OR6_DISK_ARRAY; | |
2245 | else if (dev->status == DEV_CLONE_N_GO) | |
2246 | version = MPB_VERSION_CNG; | |
2247 | else if (get_imsm_raid_level(map) == 5) | |
2248 | version = MPB_VERSION_RAID5; | |
2249 | else if (map->num_members >= 3) | |
2250 | version = MPB_VERSION_3OR4_DISK_ARRAY; | |
2251 | else if (get_imsm_raid_level(map) == 1) | |
2252 | version = MPB_VERSION_RAID1; | |
2253 | else | |
2254 | version = MPB_VERSION_RAID0; | |
2255 | } | |
2256 | strcpy(((char *) mpb->sig) + strlen(MPB_SIGNATURE), version); | |
2257 | } | |
2258 | } | |
2259 | ||
2260 | static int init_super_imsm_volume(struct supertype *st, mdu_array_info_t *info, | |
2261 | unsigned long long size, char *name, | |
2262 | char *homehost, int *uuid) | |
2263 | { | |
2264 | /* We are creating a volume inside a pre-existing container. | |
2265 | * so st->sb is already set. | |
2266 | */ | |
2267 | struct intel_super *super = st->sb; | |
2268 | struct imsm_super *mpb = super->anchor; | |
2269 | struct intel_dev *dv; | |
2270 | struct imsm_dev *dev; | |
2271 | struct imsm_vol *vol; | |
2272 | struct imsm_map *map; | |
2273 | int idx = mpb->num_raid_devs; | |
2274 | int i; | |
2275 | unsigned long long array_blocks; | |
2276 | size_t size_old, size_new; | |
2277 | ||
2278 | if (super->orom && mpb->num_raid_devs >= super->orom->vpa) { | |
2279 | fprintf(stderr, Name": This imsm-container already has the " | |
2280 | "maximum of %d volumes\n", super->orom->vpa); | |
2281 | return 0; | |
2282 | } | |
2283 | ||
2284 | /* ensure the mpb is large enough for the new data */ | |
2285 | size_old = __le32_to_cpu(mpb->mpb_size); | |
2286 | size_new = disks_to_mpb_size(info->nr_disks); | |
2287 | if (size_new > size_old) { | |
2288 | void *mpb_new; | |
2289 | size_t size_round = ROUND_UP(size_new, 512); | |
2290 | ||
2291 | if (posix_memalign(&mpb_new, 512, size_round) != 0) { | |
2292 | fprintf(stderr, Name": could not allocate new mpb\n"); | |
2293 | return 0; | |
2294 | } | |
2295 | memcpy(mpb_new, mpb, size_old); | |
2296 | free(mpb); | |
2297 | mpb = mpb_new; | |
2298 | super->anchor = mpb_new; | |
2299 | mpb->mpb_size = __cpu_to_le32(size_new); | |
2300 | memset(mpb_new + size_old, 0, size_round - size_old); | |
2301 | } | |
2302 | super->current_vol = idx; | |
2303 | /* when creating the first raid device in this container set num_disks | |
2304 | * to zero, i.e. delete this spare and add raid member devices in | |
2305 | * add_to_super_imsm_volume() | |
2306 | */ | |
2307 | if (super->current_vol == 0) | |
2308 | mpb->num_disks = 0; | |
2309 | ||
2310 | for (i = 0; i < super->current_vol; i++) { | |
2311 | dev = get_imsm_dev(super, i); | |
2312 | if (strncmp((char *) dev->volume, name, | |
2313 | MAX_RAID_SERIAL_LEN) == 0) { | |
2314 | fprintf(stderr, Name": '%s' is already defined for this container\n", | |
2315 | name); | |
2316 | return 0; | |
2317 | } | |
2318 | } | |
2319 | ||
2320 | sprintf(st->subarray, "%d", idx); | |
2321 | dv = malloc(sizeof(*dv)); | |
2322 | if (!dv) { | |
2323 | fprintf(stderr, Name ": failed to allocate device list entry\n"); | |
2324 | return 0; | |
2325 | } | |
2326 | dev = malloc(sizeof(*dev) + sizeof(__u32) * (info->raid_disks - 1)); | |
2327 | if (!dev) { | |
2328 | free(dv); | |
2329 | fprintf(stderr, Name": could not allocate raid device\n"); | |
2330 | return 0; | |
2331 | } | |
2332 | strncpy((char *) dev->volume, name, MAX_RAID_SERIAL_LEN); | |
2333 | if (info->level == 1) | |
2334 | array_blocks = info_to_blocks_per_member(info); | |
2335 | else | |
2336 | array_blocks = calc_array_size(info->level, info->raid_disks, | |
2337 | info->layout, info->chunk_size, | |
2338 | info->size*2); | |
2339 | dev->size_low = __cpu_to_le32((__u32) array_blocks); | |
2340 | dev->size_high = __cpu_to_le32((__u32) (array_blocks >> 32)); | |
2341 | dev->status = __cpu_to_le32(0); | |
2342 | dev->reserved_blocks = __cpu_to_le32(0); | |
2343 | vol = &dev->vol; | |
2344 | vol->migr_state = 0; | |
2345 | vol->migr_type = MIGR_INIT; | |
2346 | vol->dirty = 0; | |
2347 | vol->curr_migr_unit = 0; | |
2348 | map = get_imsm_map(dev, 0); | |
2349 | map->pba_of_lba0 = __cpu_to_le32(super->create_offset); | |
2350 | map->blocks_per_member = __cpu_to_le32(info_to_blocks_per_member(info)); | |
2351 | map->blocks_per_strip = __cpu_to_le16(info_to_blocks_per_strip(info)); | |
2352 | map->num_data_stripes = __cpu_to_le32(info_to_num_data_stripes(info)); | |
2353 | map->failed_disk_num = ~0; | |
2354 | map->map_state = info->level ? IMSM_T_STATE_UNINITIALIZED : | |
2355 | IMSM_T_STATE_NORMAL; | |
2356 | ||
2357 | if (info->level == 1 && info->raid_disks > 2) { | |
2358 | fprintf(stderr, Name": imsm does not support more than 2 disks" | |
2359 | "in a raid1 volume\n"); | |
2360 | return 0; | |
2361 | } | |
2362 | if (info->level == 10) { | |
2363 | map->raid_level = 1; | |
2364 | map->num_domains = info->raid_disks / 2; | |
2365 | } else { | |
2366 | map->raid_level = info->level; | |
2367 | map->num_domains = !!map->raid_level; | |
2368 | } | |
2369 | ||
2370 | map->num_members = info->raid_disks; | |
2371 | for (i = 0; i < map->num_members; i++) { | |
2372 | /* initialized in add_to_super */ | |
2373 | set_imsm_ord_tbl_ent(map, i, 0); | |
2374 | } | |
2375 | mpb->num_raid_devs++; | |
2376 | ||
2377 | dv->dev = dev; | |
2378 | dv->index = super->current_vol; | |
2379 | dv->next = super->devlist; | |
2380 | super->devlist = dv; | |
2381 | ||
2382 | imsm_update_version_info(super); | |
2383 | ||
2384 | return 1; | |
2385 | } | |
2386 | ||
2387 | static int init_super_imsm(struct supertype *st, mdu_array_info_t *info, | |
2388 | unsigned long long size, char *name, | |
2389 | char *homehost, int *uuid) | |
2390 | { | |
2391 | /* This is primarily called by Create when creating a new array. | |
2392 | * We will then get add_to_super called for each component, and then | |
2393 | * write_init_super called to write it out to each device. | |
2394 | * For IMSM, Create can create on fresh devices or on a pre-existing | |
2395 | * array. | |
2396 | * To create on a pre-existing array a different method will be called. | |
2397 | * This one is just for fresh drives. | |
2398 | */ | |
2399 | struct intel_super *super; | |
2400 | struct imsm_super *mpb; | |
2401 | size_t mpb_size; | |
2402 | char *version; | |
2403 | ||
2404 | if (!info) { | |
2405 | st->sb = NULL; | |
2406 | return 0; | |
2407 | } | |
2408 | if (st->sb) | |
2409 | return init_super_imsm_volume(st, info, size, name, homehost, | |
2410 | uuid); | |
2411 | ||
2412 | super = alloc_super(1); | |
2413 | if (!super) | |
2414 | return 0; | |
2415 | mpb_size = disks_to_mpb_size(info->nr_disks); | |
2416 | if (posix_memalign(&super->buf, 512, mpb_size) != 0) { | |
2417 | free(super); | |
2418 | return 0; | |
2419 | } | |
2420 | mpb = super->buf; | |
2421 | memset(mpb, 0, mpb_size); | |
2422 | ||
2423 | mpb->attributes = MPB_ATTRIB_CHECKSUM_VERIFY; | |
2424 | ||
2425 | version = (char *) mpb->sig; | |
2426 | strcpy(version, MPB_SIGNATURE); | |
2427 | version += strlen(MPB_SIGNATURE); | |
2428 | strcpy(version, MPB_VERSION_RAID0); | |
2429 | mpb->mpb_size = mpb_size; | |
2430 | ||
2431 | st->sb = super; | |
2432 | return 1; | |
2433 | } | |
2434 | ||
2435 | #ifndef MDASSEMBLE | |
2436 | static int add_to_super_imsm_volume(struct supertype *st, mdu_disk_info_t *dk, | |
2437 | int fd, char *devname) | |
2438 | { | |
2439 | struct intel_super *super = st->sb; | |
2440 | struct imsm_super *mpb = super->anchor; | |
2441 | struct dl *dl; | |
2442 | struct imsm_dev *dev; | |
2443 | struct imsm_map *map; | |
2444 | ||
2445 | dev = get_imsm_dev(super, super->current_vol); | |
2446 | map = get_imsm_map(dev, 0); | |
2447 | ||
2448 | if (! (dk->state & (1<<MD_DISK_SYNC))) { | |
2449 | fprintf(stderr, Name ": %s: Cannot add spare devices to IMSM volume\n", | |
2450 | devname); | |
2451 | return 1; | |
2452 | } | |
2453 | ||
2454 | if (fd == -1) { | |
2455 | /* we're doing autolayout so grab the pre-marked (in | |
2456 | * validate_geometry) raid_disk | |
2457 | */ | |
2458 | for (dl = super->disks; dl; dl = dl->next) | |
2459 | if (dl->raiddisk == dk->raid_disk) | |
2460 | break; | |
2461 | } else { | |
2462 | for (dl = super->disks; dl ; dl = dl->next) | |
2463 | if (dl->major == dk->major && | |
2464 | dl->minor == dk->minor) | |
2465 | break; | |
2466 | } | |
2467 | ||
2468 | if (!dl) { | |
2469 | fprintf(stderr, Name ": %s is not a member of the same container\n", devname); | |
2470 | return 1; | |
2471 | } | |
2472 | ||
2473 | /* add a pristine spare to the metadata */ | |
2474 | if (dl->index < 0) { | |
2475 | dl->index = super->anchor->num_disks; | |
2476 | super->anchor->num_disks++; | |
2477 | } | |
2478 | set_imsm_ord_tbl_ent(map, dk->number, dl->index); | |
2479 | dl->disk.status = CONFIGURED_DISK | USABLE_DISK; | |
2480 | ||
2481 | /* if we are creating the first raid device update the family number */ | |
2482 | if (super->current_vol == 0) { | |
2483 | __u32 sum; | |
2484 | struct imsm_dev *_dev = __get_imsm_dev(mpb, 0); | |
2485 | struct imsm_disk *_disk = __get_imsm_disk(mpb, dl->index); | |
2486 | ||
2487 | *_dev = *dev; | |
2488 | *_disk = dl->disk; | |
2489 | sum = __gen_imsm_checksum(mpb); | |
2490 | mpb->family_num = __cpu_to_le32(sum); | |
2491 | } | |
2492 | ||
2493 | return 0; | |
2494 | } | |
2495 | ||
2496 | static int add_to_super_imsm(struct supertype *st, mdu_disk_info_t *dk, | |
2497 | int fd, char *devname) | |
2498 | { | |
2499 | struct intel_super *super = st->sb; | |
2500 | struct dl *dd; | |
2501 | unsigned long long size; | |
2502 | __u32 id; | |
2503 | int rv; | |
2504 | struct stat stb; | |
2505 | ||
2506 | /* if we are on an RAID enabled platform check that the disk is | |
2507 | * attached to the raid controller | |
2508 | */ | |
2509 | if (super->hba && !disk_attached_to_hba(fd, super->hba)) { | |
2510 | fprintf(stderr, | |
2511 | Name ": %s is not attached to the raid controller: %s\n", | |
2512 | devname ? : "disk", super->hba); | |
2513 | return 1; | |
2514 | } | |
2515 | ||
2516 | if (super->current_vol >= 0) | |
2517 | return add_to_super_imsm_volume(st, dk, fd, devname); | |
2518 | ||
2519 | fstat(fd, &stb); | |
2520 | dd = malloc(sizeof(*dd)); | |
2521 | if (!dd) { | |
2522 | fprintf(stderr, | |
2523 | Name ": malloc failed %s:%d.\n", __func__, __LINE__); | |
2524 | return 1; | |
2525 | } | |
2526 | memset(dd, 0, sizeof(*dd)); | |
2527 | dd->major = major(stb.st_rdev); | |
2528 | dd->minor = minor(stb.st_rdev); | |
2529 | dd->index = -1; | |
2530 | dd->devname = devname ? strdup(devname) : NULL; | |
2531 | dd->fd = fd; | |
2532 | dd->e = NULL; | |
2533 | rv = imsm_read_serial(fd, devname, dd->serial); | |
2534 | if (rv) { | |
2535 | fprintf(stderr, | |
2536 | Name ": failed to retrieve scsi serial, aborting\n"); | |
2537 | free(dd); | |
2538 | abort(); | |
2539 | } | |
2540 | ||
2541 | get_dev_size(fd, NULL, &size); | |
2542 | size /= 512; | |
2543 | serialcpy(dd->disk.serial, dd->serial); | |
2544 | dd->disk.total_blocks = __cpu_to_le32(size); | |
2545 | dd->disk.status = USABLE_DISK | SPARE_DISK; | |
2546 | if (sysfs_disk_to_scsi_id(fd, &id) == 0) | |
2547 | dd->disk.scsi_id = __cpu_to_le32(id); | |
2548 | else | |
2549 | dd->disk.scsi_id = __cpu_to_le32(0); | |
2550 | ||
2551 | if (st->update_tail) { | |
2552 | dd->next = super->add; | |
2553 | super->add = dd; | |
2554 | } else { | |
2555 | dd->next = super->disks; | |
2556 | super->disks = dd; | |
2557 | } | |
2558 | ||
2559 | return 0; | |
2560 | } | |
2561 | ||
2562 | static int store_imsm_mpb(int fd, struct intel_super *super); | |
2563 | ||
2564 | /* spare records have their own family number and do not have any defined raid | |
2565 | * devices | |
2566 | */ | |
2567 | static int write_super_imsm_spares(struct intel_super *super, int doclose) | |
2568 | { | |
2569 | struct imsm_super mpb_save; | |
2570 | struct imsm_super *mpb = super->anchor; | |
2571 | __u32 sum; | |
2572 | struct dl *d; | |
2573 | ||
2574 | mpb_save = *mpb; | |
2575 | mpb->num_raid_devs = 0; | |
2576 | mpb->num_disks = 1; | |
2577 | mpb->mpb_size = sizeof(struct imsm_super); | |
2578 | mpb->generation_num = __cpu_to_le32(1UL); | |
2579 | ||
2580 | for (d = super->disks; d; d = d->next) { | |
2581 | if (d->index != -1) | |
2582 | continue; | |
2583 | ||
2584 | mpb->disk[0] = d->disk; | |
2585 | sum = __gen_imsm_checksum(mpb); | |
2586 | mpb->family_num = __cpu_to_le32(sum); | |
2587 | sum = __gen_imsm_checksum(mpb); | |
2588 | mpb->check_sum = __cpu_to_le32(sum); | |
2589 | ||
2590 | if (store_imsm_mpb(d->fd, super)) { | |
2591 | fprintf(stderr, "%s: failed for device %d:%d %s\n", | |
2592 | __func__, d->major, d->minor, strerror(errno)); | |
2593 | *mpb = mpb_save; | |
2594 | return 1; | |
2595 | } | |
2596 | if (doclose) { | |
2597 | close(d->fd); | |
2598 | d->fd = -1; | |
2599 | } | |
2600 | } | |
2601 | ||
2602 | *mpb = mpb_save; | |
2603 | return 0; | |
2604 | } | |
2605 | ||
2606 | static int write_super_imsm(struct intel_super *super, int doclose) | |
2607 | { | |
2608 | struct imsm_super *mpb = super->anchor; | |
2609 | struct dl *d; | |
2610 | __u32 generation; | |
2611 | __u32 sum; | |
2612 | int spares = 0; | |
2613 | int i; | |
2614 | __u32 mpb_size = sizeof(struct imsm_super) - sizeof(struct imsm_disk); | |
2615 | ||
2616 | /* 'generation' is incremented everytime the metadata is written */ | |
2617 | generation = __le32_to_cpu(mpb->generation_num); | |
2618 | generation++; | |
2619 | mpb->generation_num = __cpu_to_le32(generation); | |
2620 | ||
2621 | mpb_size += sizeof(struct imsm_disk) * mpb->num_disks; | |
2622 | for (d = super->disks; d; d = d->next) { | |
2623 | if (d->index == -1) | |
2624 | spares++; | |
2625 | else | |
2626 | mpb->disk[d->index] = d->disk; | |
2627 | } | |
2628 | for (d = super->missing; d; d = d->next) | |
2629 | mpb->disk[d->index] = d->disk; | |
2630 | ||
2631 | for (i = 0; i < mpb->num_raid_devs; i++) { | |
2632 | struct imsm_dev *dev = __get_imsm_dev(mpb, i); | |
2633 | ||
2634 | imsm_copy_dev(dev, get_imsm_dev(super, i)); | |
2635 | mpb_size += sizeof_imsm_dev(dev, 0); | |
2636 | } | |
2637 | mpb_size += __le32_to_cpu(mpb->bbm_log_size); | |
2638 | mpb->mpb_size = __cpu_to_le32(mpb_size); | |
2639 | ||
2640 | /* recalculate checksum */ | |
2641 | sum = __gen_imsm_checksum(mpb); | |
2642 | mpb->check_sum = __cpu_to_le32(sum); | |
2643 | ||
2644 | /* write the mpb for disks that compose raid devices */ | |
2645 | for (d = super->disks; d ; d = d->next) { | |
2646 | if (d->index < 0) | |
2647 | continue; | |
2648 | if (store_imsm_mpb(d->fd, super)) | |
2649 | fprintf(stderr, "%s: failed for device %d:%d %s\n", | |
2650 | __func__, d->major, d->minor, strerror(errno)); | |
2651 | if (doclose) { | |
2652 | close(d->fd); | |
2653 | d->fd = -1; | |
2654 | } | |
2655 | } | |
2656 | ||
2657 | if (spares) | |
2658 | return write_super_imsm_spares(super, doclose); | |
2659 | ||
2660 | return 0; | |
2661 | } | |
2662 | ||
2663 | ||
2664 | static int create_array(struct supertype *st) | |
2665 | { | |
2666 | size_t len; | |
2667 | struct imsm_update_create_array *u; | |
2668 | struct intel_super *super = st->sb; | |
2669 | struct imsm_dev *dev = get_imsm_dev(super, super->current_vol); | |
2670 | struct imsm_map *map = get_imsm_map(dev, 0); | |
2671 | struct disk_info *inf; | |
2672 | struct imsm_disk *disk; | |
2673 | int i; | |
2674 | int idx; | |
2675 | ||
2676 | len = sizeof(*u) - sizeof(*dev) + sizeof_imsm_dev(dev, 0) + | |
2677 | sizeof(*inf) * map->num_members; | |
2678 | u = malloc(len); | |
2679 | if (!u) { | |
2680 | fprintf(stderr, "%s: failed to allocate update buffer\n", | |
2681 | __func__); | |
2682 | return 1; | |
2683 | } | |
2684 | ||
2685 | u->type = update_create_array; | |
2686 | u->dev_idx = super->current_vol; | |
2687 | imsm_copy_dev(&u->dev, dev); | |
2688 | inf = get_disk_info(u); | |
2689 | for (i = 0; i < map->num_members; i++) { | |
2690 | idx = get_imsm_disk_idx(dev, i); | |
2691 | disk = get_imsm_disk(super, idx); | |
2692 | serialcpy(inf[i].serial, disk->serial); | |
2693 | } | |
2694 | append_metadata_update(st, u, len); | |
2695 | ||
2696 | return 0; | |
2697 | } | |
2698 | ||
2699 | static int _add_disk(struct supertype *st) | |
2700 | { | |
2701 | struct intel_super *super = st->sb; | |
2702 | size_t len; | |
2703 | struct imsm_update_add_disk *u; | |
2704 | ||
2705 | if (!super->add) | |
2706 | return 0; | |
2707 | ||
2708 | len = sizeof(*u); | |
2709 | u = malloc(len); | |
2710 | if (!u) { | |
2711 | fprintf(stderr, "%s: failed to allocate update buffer\n", | |
2712 | __func__); | |
2713 | return 1; | |
2714 | } | |
2715 | ||
2716 | u->type = update_add_disk; | |
2717 | append_metadata_update(st, u, len); | |
2718 | ||
2719 | return 0; | |
2720 | } | |
2721 | ||
2722 | static int write_init_super_imsm(struct supertype *st) | |
2723 | { | |
2724 | if (st->update_tail) { | |
2725 | /* queue the recently created array / added disk | |
2726 | * as a metadata update */ | |
2727 | struct intel_super *super = st->sb; | |
2728 | struct dl *d; | |
2729 | int rv; | |
2730 | ||
2731 | /* determine if we are creating a volume or adding a disk */ | |
2732 | if (super->current_vol < 0) { | |
2733 | /* in the add disk case we are running in mdmon | |
2734 | * context, so don't close fd's | |
2735 | */ | |
2736 | return _add_disk(st); | |
2737 | } else | |
2738 | rv = create_array(st); | |
2739 | ||
2740 | for (d = super->disks; d ; d = d->next) { | |
2741 | close(d->fd); | |
2742 | d->fd = -1; | |
2743 | } | |
2744 | ||
2745 | return rv; | |
2746 | } else | |
2747 | return write_super_imsm(st->sb, 1); | |
2748 | } | |
2749 | #endif | |
2750 | ||
2751 | static int store_zero_imsm(struct supertype *st, int fd) | |
2752 | { | |
2753 | unsigned long long dsize; | |
2754 | void *buf; | |
2755 | ||
2756 | get_dev_size(fd, NULL, &dsize); | |
2757 | ||
2758 | /* first block is stored on second to last sector of the disk */ | |
2759 | if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) | |
2760 | return 1; | |
2761 | ||
2762 | if (posix_memalign(&buf, 512, 512) != 0) | |
2763 | return 1; | |
2764 | ||
2765 | memset(buf, 0, 512); | |
2766 | if (write(fd, buf, 512) != 512) | |
2767 | return 1; | |
2768 | return 0; | |
2769 | } | |
2770 | ||
2771 | static int imsm_bbm_log_size(struct imsm_super *mpb) | |
2772 | { | |
2773 | return __le32_to_cpu(mpb->bbm_log_size); | |
2774 | } | |
2775 | ||
2776 | #ifndef MDASSEMBLE | |
2777 | static int validate_geometry_imsm_container(struct supertype *st, int level, | |
2778 | int layout, int raiddisks, int chunk, | |
2779 | unsigned long long size, char *dev, | |
2780 | unsigned long long *freesize, | |
2781 | int verbose) | |
2782 | { | |
2783 | int fd; | |
2784 | unsigned long long ldsize; | |
2785 | const struct imsm_orom *orom; | |
2786 | ||
2787 | if (level != LEVEL_CONTAINER) | |
2788 | return 0; | |
2789 | if (!dev) | |
2790 | return 1; | |
2791 | ||
2792 | if (check_env("IMSM_NO_PLATFORM")) | |
2793 | orom = NULL; | |
2794 | else | |
2795 | orom = find_imsm_orom(); | |
2796 | if (orom && raiddisks > orom->tds) { | |
2797 | if (verbose) | |
2798 | fprintf(stderr, Name ": %d exceeds maximum number of" | |
2799 | " platform supported disks: %d\n", | |
2800 | raiddisks, orom->tds); | |
2801 | return 0; | |
2802 | } | |
2803 | ||
2804 | fd = open(dev, O_RDONLY|O_EXCL, 0); | |
2805 | if (fd < 0) { | |
2806 | if (verbose) | |
2807 | fprintf(stderr, Name ": imsm: Cannot open %s: %s\n", | |
2808 | dev, strerror(errno)); | |
2809 | return 0; | |
2810 | } | |
2811 | if (!get_dev_size(fd, dev, &ldsize)) { | |
2812 | close(fd); | |
2813 | return 0; | |
2814 | } | |
2815 | close(fd); | |
2816 | ||
2817 | *freesize = avail_size_imsm(st, ldsize >> 9); | |
2818 | ||
2819 | return 1; | |
2820 | } | |
2821 | ||
2822 | static unsigned long long find_size(struct extent *e, int *idx, int num_extents) | |
2823 | { | |
2824 | const unsigned long long base_start = e[*idx].start; | |
2825 | unsigned long long end = base_start + e[*idx].size; | |
2826 | int i; | |
2827 | ||
2828 | if (base_start == end) | |
2829 | return 0; | |
2830 | ||
2831 | *idx = *idx + 1; | |
2832 | for (i = *idx; i < num_extents; i++) { | |
2833 | /* extend overlapping extents */ | |
2834 | if (e[i].start >= base_start && | |
2835 | e[i].start <= end) { | |
2836 | if (e[i].size == 0) | |
2837 | return 0; | |
2838 | if (e[i].start + e[i].size > end) | |
2839 | end = e[i].start + e[i].size; | |
2840 | } else if (e[i].start > end) { | |
2841 | *idx = i; | |
2842 | break; | |
2843 | } | |
2844 | } | |
2845 | ||
2846 | return end - base_start; | |
2847 | } | |
2848 | ||
2849 | static unsigned long long merge_extents(struct intel_super *super, int sum_extents) | |
2850 | { | |
2851 | /* build a composite disk with all known extents and generate a new | |
2852 | * 'maxsize' given the "all disks in an array must share a common start | |
2853 | * offset" constraint | |
2854 | */ | |
2855 | struct extent *e = calloc(sum_extents, sizeof(*e)); | |
2856 | struct dl *dl; | |
2857 | int i, j; | |
2858 | int start_extent; | |
2859 | unsigned long long pos; | |
2860 | unsigned long long start = 0; | |
2861 | unsigned long long maxsize; | |
2862 | unsigned long reserve; | |
2863 | ||
2864 | if (!e) | |
2865 | return ~0ULL; /* error */ | |
2866 | ||
2867 | /* coalesce and sort all extents. also, check to see if we need to | |
2868 | * reserve space between member arrays | |
2869 | */ | |
2870 | j = 0; | |
2871 | for (dl = super->disks; dl; dl = dl->next) { | |
2872 | if (!dl->e) | |
2873 | continue; | |
2874 | for (i = 0; i < dl->extent_cnt; i++) | |
2875 | e[j++] = dl->e[i]; | |
2876 | } | |
2877 | qsort(e, sum_extents, sizeof(*e), cmp_extent); | |
2878 | ||
2879 | /* merge extents */ | |
2880 | i = 0; | |
2881 | j = 0; | |
2882 | while (i < sum_extents) { | |
2883 | e[j].start = e[i].start; | |
2884 | e[j].size = find_size(e, &i, sum_extents); | |
2885 | j++; | |
2886 | if (e[j-1].size == 0) | |
2887 | break; | |
2888 | } | |
2889 | ||
2890 | pos = 0; | |
2891 | maxsize = 0; | |
2892 | start_extent = 0; | |
2893 | i = 0; | |
2894 | do { | |
2895 | unsigned long long esize; | |
2896 | ||
2897 | esize = e[i].start - pos; | |
2898 | if (esize >= maxsize) { | |
2899 | maxsize = esize; | |
2900 | start = pos; | |
2901 | start_extent = i; | |
2902 | } | |
2903 | pos = e[i].start + e[i].size; | |
2904 | i++; | |
2905 | } while (e[i-1].size); | |
2906 | free(e); | |
2907 | ||
2908 | if (start_extent > 0) | |
2909 | reserve = IMSM_RESERVED_SECTORS; /* gap between raid regions */ | |
2910 | else | |
2911 | reserve = 0; | |
2912 | ||
2913 | if (maxsize < reserve) | |
2914 | return ~0ULL; | |
2915 | ||
2916 | super->create_offset = ~((__u32) 0); | |
2917 | if (start + reserve > super->create_offset) | |
2918 | return ~0ULL; /* start overflows create_offset */ | |
2919 | super->create_offset = start + reserve; | |
2920 | ||
2921 | return maxsize - reserve; | |
2922 | } | |
2923 | ||
2924 | static int is_raid_level_supported(const struct imsm_orom *orom, int level, int raiddisks) | |
2925 | { | |
2926 | if (level < 0 || level == 6 || level == 4) | |
2927 | return 0; | |
2928 | ||
2929 | /* if we have an orom prevent invalid raid levels */ | |
2930 | if (orom) | |
2931 | switch (level) { | |
2932 | case 0: return imsm_orom_has_raid0(orom); | |
2933 | case 1: | |
2934 | if (raiddisks > 2) | |
2935 | return imsm_orom_has_raid1e(orom); | |
2936 | return imsm_orom_has_raid1(orom) && raiddisks == 2; | |
2937 | case 10: return imsm_orom_has_raid10(orom) && raiddisks == 4; | |
2938 | case 5: return imsm_orom_has_raid5(orom) && raiddisks > 2; | |
2939 | } | |
2940 | else | |
2941 | return 1; /* not on an Intel RAID platform so anything goes */ | |
2942 | ||
2943 | return 0; | |
2944 | } | |
2945 | ||
2946 | #define pr_vrb(fmt, arg...) (void) (verbose && fprintf(stderr, Name fmt, ##arg)) | |
2947 | /* validate_geometry_imsm_volume - lifted from validate_geometry_ddf_bvd | |
2948 | * FIX ME add ahci details | |
2949 | */ | |
2950 | static int validate_geometry_imsm_volume(struct supertype *st, int level, | |
2951 | int layout, int raiddisks, int chunk, | |
2952 | unsigned long long size, char *dev, | |
2953 | unsigned long long *freesize, | |
2954 | int verbose) | |
2955 | { | |
2956 | struct stat stb; | |
2957 | struct intel_super *super = st->sb; | |
2958 | struct imsm_super *mpb = super->anchor; | |
2959 | struct dl *dl; | |
2960 | unsigned long long pos = 0; | |
2961 | unsigned long long maxsize; | |
2962 | struct extent *e; | |
2963 | int i; | |
2964 | ||
2965 | /* We must have the container info already read in. */ | |
2966 | if (!super) | |
2967 | return 0; | |
2968 | ||
2969 | if (!is_raid_level_supported(super->orom, level, raiddisks)) { | |
2970 | pr_vrb(": platform does not support raid%d with %d disk%s\n", | |
2971 | level, raiddisks, raiddisks > 1 ? "s" : ""); | |
2972 | return 0; | |
2973 | } | |
2974 | if (super->orom && level != 1 && | |
2975 | !imsm_orom_has_chunk(super->orom, chunk)) { | |
2976 | pr_vrb(": platform does not support a chunk size of: %d\n", chunk); | |
2977 | return 0; | |
2978 | } | |
2979 | if (layout != imsm_level_to_layout(level)) { | |
2980 | if (level == 5) | |
2981 | pr_vrb(": imsm raid 5 only supports the left-asymmetric layout\n"); | |
2982 | else if (level == 10) | |
2983 | pr_vrb(": imsm raid 10 only supports the n2 layout\n"); | |
2984 | else | |
2985 | pr_vrb(": imsm unknown layout %#x for this raid level %d\n", | |
2986 | layout, level); | |
2987 | return 0; | |
2988 | } | |
2989 | ||
2990 | if (!dev) { | |
2991 | /* General test: make sure there is space for | |
2992 | * 'raiddisks' device extents of size 'size' at a given | |
2993 | * offset | |
2994 | */ | |
2995 | unsigned long long minsize = size; | |
2996 | unsigned long long start_offset = ~0ULL; | |
2997 | int dcnt = 0; | |
2998 | if (minsize == 0) | |
2999 | minsize = MPB_SECTOR_CNT + IMSM_RESERVED_SECTORS; | |
3000 | for (dl = super->disks; dl ; dl = dl->next) { | |
3001 | int found = 0; | |
3002 | ||
3003 | pos = 0; | |
3004 | i = 0; | |
3005 | e = get_extents(super, dl); | |
3006 | if (!e) continue; | |
3007 | do { | |
3008 | unsigned long long esize; | |
3009 | esize = e[i].start - pos; | |
3010 | if (esize >= minsize) | |
3011 | found = 1; | |
3012 | if (found && start_offset == ~0ULL) { | |
3013 | start_offset = pos; | |
3014 | break; | |
3015 | } else if (found && pos != start_offset) { | |
3016 | found = 0; | |
3017 | break; | |
3018 | } | |
3019 | pos = e[i].start + e[i].size; | |
3020 | i++; | |
3021 | } while (e[i-1].size); | |
3022 | if (found) | |
3023 | dcnt++; | |
3024 | free(e); | |
3025 | } | |
3026 | if (dcnt < raiddisks) { | |
3027 | if (verbose) | |
3028 | fprintf(stderr, Name ": imsm: Not enough " | |
3029 | "devices with space for this array " | |
3030 | "(%d < %d)\n", | |
3031 | dcnt, raiddisks); | |
3032 | return 0; | |
3033 | } | |
3034 | return 1; | |
3035 | } | |
3036 | ||
3037 | /* This device must be a member of the set */ | |
3038 | if (stat(dev, &stb) < 0) | |
3039 | return 0; | |
3040 | if ((S_IFMT & stb.st_mode) != S_IFBLK) | |
3041 | return 0; | |
3042 | for (dl = super->disks ; dl ; dl = dl->next) { | |
3043 | if (dl->major == major(stb.st_rdev) && | |
3044 | dl->minor == minor(stb.st_rdev)) | |
3045 | break; | |
3046 | } | |
3047 | if (!dl) { | |
3048 | if (verbose) | |
3049 | fprintf(stderr, Name ": %s is not in the " | |
3050 | "same imsm set\n", dev); | |
3051 | return 0; | |
3052 | } else if (super->orom && dl->index < 0 && mpb->num_raid_devs) { | |
3053 | /* If a volume is present then the current creation attempt | |
3054 | * cannot incorporate new spares because the orom may not | |
3055 | * understand this configuration (all member disks must be | |
3056 | * members of each array in the container). | |
3057 | */ | |
3058 | fprintf(stderr, Name ": %s is a spare and a volume" | |
3059 | " is already defined for this container\n", dev); | |
3060 | fprintf(stderr, Name ": The option-rom requires all member" | |
3061 | " disks to be a member of all volumes\n"); | |
3062 | return 0; | |
3063 | } | |
3064 | ||
3065 | /* retrieve the largest free space block */ | |
3066 | e = get_extents(super, dl); | |
3067 | maxsize = 0; | |
3068 | i = 0; | |
3069 | if (e) { | |
3070 | do { | |
3071 | unsigned long long esize; | |
3072 | ||
3073 | esize = e[i].start - pos; | |
3074 | if (esize >= maxsize) | |
3075 | maxsize = esize; | |
3076 | pos = e[i].start + e[i].size; | |
3077 | i++; | |
3078 | } while (e[i-1].size); | |
3079 | dl->e = e; | |
3080 | dl->extent_cnt = i; | |
3081 | } else { | |
3082 | if (verbose) | |
3083 | fprintf(stderr, Name ": unable to determine free space for: %s\n", | |
3084 | dev); | |
3085 | return 0; | |
3086 | } | |
3087 | if (maxsize < size) { | |
3088 | if (verbose) | |
3089 | fprintf(stderr, Name ": %s not enough space (%llu < %llu)\n", | |
3090 | dev, maxsize, size); | |
3091 | return 0; | |
3092 | } | |
3093 | ||
3094 | /* count total number of extents for merge */ | |
3095 | i = 0; | |
3096 | for (dl = super->disks; dl; dl = dl->next) | |
3097 | if (dl->e) | |
3098 | i += dl->extent_cnt; | |
3099 | ||
3100 | maxsize = merge_extents(super, i); | |
3101 | if (maxsize < size) { | |
3102 | if (verbose) | |
3103 | fprintf(stderr, Name ": not enough space after merge (%llu < %llu)\n", | |
3104 | maxsize, size); | |
3105 | return 0; | |
3106 | } else if (maxsize == ~0ULL) { | |
3107 | if (verbose) | |
3108 | fprintf(stderr, Name ": failed to merge %d extents\n", i); | |
3109 | return 0; | |
3110 | } | |
3111 | ||
3112 | *freesize = maxsize; | |
3113 | ||
3114 | return 1; | |
3115 | } | |
3116 | ||
3117 | static int reserve_space(struct supertype *st, int raiddisks, | |
3118 | unsigned long long size, int chunk, | |
3119 | unsigned long long *freesize) | |
3120 | { | |
3121 | struct intel_super *super = st->sb; | |
3122 | struct imsm_super *mpb = super->anchor; | |
3123 | struct dl *dl; | |
3124 | int i; | |
3125 | int extent_cnt; | |
3126 | struct extent *e; | |
3127 | unsigned long long maxsize; | |
3128 | unsigned long long minsize; | |
3129 | int cnt; | |
3130 | int used; | |
3131 | ||
3132 | /* find the largest common start free region of the possible disks */ | |
3133 | used = 0; | |
3134 | extent_cnt = 0; | |
3135 | cnt = 0; | |
3136 | for (dl = super->disks; dl; dl = dl->next) { | |
3137 | dl->raiddisk = -1; | |
3138 | ||
3139 | if (dl->index >= 0) | |
3140 | used++; | |
3141 | ||
3142 | /* don't activate new spares if we are orom constrained | |
3143 | * and there is already a volume active in the container | |
3144 | */ | |
3145 | if (super->orom && dl->index < 0 && mpb->num_raid_devs) | |
3146 | continue; | |
3147 | ||
3148 | e = get_extents(super, dl); | |
3149 | if (!e) | |
3150 | continue; | |
3151 | for (i = 1; e[i-1].size; i++) | |
3152 | ; | |
3153 | dl->e = e; | |
3154 | dl->extent_cnt = i; | |
3155 | extent_cnt += i; | |
3156 | cnt++; | |
3157 | } | |
3158 | ||
3159 | maxsize = merge_extents(super, extent_cnt); | |
3160 | minsize = size; | |
3161 | if (size == 0) | |
3162 | minsize = chunk; | |
3163 | ||
3164 | if (cnt < raiddisks || | |
3165 | (super->orom && used && used != raiddisks) || | |
3166 | maxsize < minsize) { | |
3167 | fprintf(stderr, Name ": not enough devices with space to create array.\n"); | |
3168 | return 0; /* No enough free spaces large enough */ | |
3169 | } | |
3170 | ||
3171 | if (size == 0) { | |
3172 | size = maxsize; | |
3173 | if (chunk) { | |
3174 | size /= chunk; | |
3175 | size *= chunk; | |
3176 | } | |
3177 | } | |
3178 | ||
3179 | cnt = 0; | |
3180 | for (dl = super->disks; dl; dl = dl->next) | |
3181 | if (dl->e) | |
3182 | dl->raiddisk = cnt++; | |
3183 | ||
3184 | *freesize = size; | |
3185 | ||
3186 | return 1; | |
3187 | } | |
3188 | ||
3189 | static int validate_geometry_imsm(struct supertype *st, int level, int layout, | |
3190 | int raiddisks, int chunk, unsigned long long size, | |
3191 | char *dev, unsigned long long *freesize, | |
3192 | int verbose) | |
3193 | { | |
3194 | int fd, cfd; | |
3195 | struct mdinfo *sra; | |
3196 | ||
3197 | /* if given unused devices create a container | |
3198 | * if given given devices in a container create a member volume | |
3199 | */ | |
3200 | if (level == LEVEL_CONTAINER) { | |
3201 | /* Must be a fresh device to add to a container */ | |
3202 | return validate_geometry_imsm_container(st, level, layout, | |
3203 | raiddisks, chunk, size, | |
3204 | dev, freesize, | |
3205 | verbose); | |
3206 | } | |
3207 | ||
3208 | if (!dev) { | |
3209 | if (st->sb && freesize) { | |
3210 | /* we are being asked to automatically layout a | |
3211 | * new volume based on the current contents of | |
3212 | * the container. If the the parameters can be | |
3213 | * satisfied reserve_space will record the disks, | |
3214 | * start offset, and size of the volume to be | |
3215 | * created. add_to_super and getinfo_super | |
3216 | * detect when autolayout is in progress. | |
3217 | */ | |
3218 | return reserve_space(st, raiddisks, size, chunk, freesize); | |
3219 | } | |
3220 | return 1; | |
3221 | } | |
3222 | if (st->sb) { | |
3223 | /* creating in a given container */ | |
3224 | return validate_geometry_imsm_volume(st, level, layout, | |
3225 | raiddisks, chunk, size, | |
3226 | dev, freesize, verbose); | |
3227 | } | |
3228 | ||
3229 | /* limit creation to the following levels */ | |
3230 | if (!dev) | |
3231 | switch (level) { | |
3232 | case 0: | |
3233 | case 1: | |
3234 | case 10: | |
3235 | case 5: | |
3236 | break; | |
3237 | default: | |
3238 | return 1; | |
3239 | } | |
3240 | ||
3241 | /* This device needs to be a device in an 'imsm' container */ | |
3242 | fd = open(dev, O_RDONLY|O_EXCL, 0); | |
3243 | if (fd >= 0) { | |
3244 | if (verbose) | |
3245 | fprintf(stderr, | |
3246 | Name ": Cannot create this array on device %s\n", | |
3247 | dev); | |
3248 | close(fd); | |
3249 | return 0; | |
3250 | } | |
3251 | if (errno != EBUSY || (fd = open(dev, O_RDONLY, 0)) < 0) { | |
3252 | if (verbose) | |
3253 | fprintf(stderr, Name ": Cannot open %s: %s\n", | |
3254 | dev, strerror(errno)); | |
3255 | return 0; | |
3256 | } | |
3257 | /* Well, it is in use by someone, maybe an 'imsm' container. */ | |
3258 | cfd = open_container(fd); | |
3259 | if (cfd < 0) { | |
3260 | close(fd); | |
3261 | if (verbose) | |
3262 | fprintf(stderr, Name ": Cannot use %s: It is busy\n", | |
3263 | dev); | |
3264 | return 0; | |
3265 | } | |
3266 | sra = sysfs_read(cfd, 0, GET_VERSION); | |
3267 | close(fd); | |
3268 | if (sra && sra->array.major_version == -1 && | |
3269 | strcmp(sra->text_version, "imsm") == 0) { | |
3270 | /* This is a member of a imsm container. Load the container | |
3271 | * and try to create a volume | |
3272 | */ | |
3273 | struct intel_super *super; | |
3274 | ||
3275 | if (load_super_imsm_all(st, cfd, (void **) &super, NULL, 1) == 0) { | |
3276 | st->sb = super; | |
3277 | st->container_dev = fd2devnum(cfd); | |
3278 | close(cfd); | |
3279 | return validate_geometry_imsm_volume(st, level, layout, | |
3280 | raiddisks, chunk, | |
3281 | size, dev, | |
3282 | freesize, verbose); | |
3283 | } | |
3284 | close(cfd); | |
3285 | } else /* may belong to another container */ | |
3286 | return 0; | |
3287 | ||
3288 | return 1; | |
3289 | } | |
3290 | #endif /* MDASSEMBLE */ | |
3291 | ||
3292 | static struct mdinfo *container_content_imsm(struct supertype *st) | |
3293 | { | |
3294 | /* Given a container loaded by load_super_imsm_all, | |
3295 | * extract information about all the arrays into | |
3296 | * an mdinfo tree. | |
3297 | * | |
3298 | * For each imsm_dev create an mdinfo, fill it in, | |
3299 | * then look for matching devices in super->disks | |
3300 | * and create appropriate device mdinfo. | |
3301 | */ | |
3302 | struct intel_super *super = st->sb; | |
3303 | struct imsm_super *mpb = super->anchor; | |
3304 | struct mdinfo *rest = NULL; | |
3305 | int i; | |
3306 | ||
3307 | /* do not assemble arrays that might have bad blocks */ | |
3308 | if (imsm_bbm_log_size(super->anchor)) { | |
3309 | fprintf(stderr, Name ": BBM log found in metadata. " | |
3310 | "Cannot activate array(s).\n"); | |
3311 | return NULL; | |
3312 | } | |
3313 | ||
3314 | for (i = 0; i < mpb->num_raid_devs; i++) { | |
3315 | struct imsm_dev *dev = get_imsm_dev(super, i); | |
3316 | struct imsm_map *map = get_imsm_map(dev, 0); | |
3317 | struct mdinfo *this; | |
3318 | int slot; | |
3319 | ||
3320 | this = malloc(sizeof(*this)); | |
3321 | memset(this, 0, sizeof(*this)); | |
3322 | this->next = rest; | |
3323 | ||
3324 | super->current_vol = i; | |
3325 | getinfo_super_imsm_volume(st, this); | |
3326 | for (slot = 0 ; slot < map->num_members; slot++) { | |
3327 | struct mdinfo *info_d; | |
3328 | struct dl *d; | |
3329 | int idx; | |
3330 | int skip; | |
3331 | __u32 s; | |
3332 | __u32 ord; | |
3333 | ||
3334 | skip = 0; | |
3335 | idx = get_imsm_disk_idx(dev, slot); | |
3336 | ord = get_imsm_ord_tbl_ent(dev, slot); | |
3337 | for (d = super->disks; d ; d = d->next) | |
3338 | if (d->index == idx) | |
3339 | break; | |
3340 | ||
3341 | if (d == NULL) | |
3342 | skip = 1; | |
3343 | ||
3344 | s = d ? d->disk.status : 0; | |
3345 | if (s & FAILED_DISK) | |
3346 | skip = 1; | |
3347 | if (!(s & USABLE_DISK)) | |
3348 | skip = 1; | |
3349 | if (ord & IMSM_ORD_REBUILD) | |
3350 | skip = 1; | |
3351 | ||
3352 | /* | |
3353 | * if we skip some disks the array will be assmebled degraded; | |
3354 | * reset resync start to avoid a dirty-degraded situation | |
3355 | * | |
3356 | * FIXME handle dirty degraded | |
3357 | */ | |
3358 | if (skip && !dev->vol.dirty) | |
3359 | this->resync_start = ~0ULL; | |
3360 | if (skip) | |
3361 | continue; | |
3362 | ||
3363 | info_d = malloc(sizeof(*info_d)); | |
3364 | if (!info_d) { | |
3365 | fprintf(stderr, Name ": failed to allocate disk" | |
3366 | " for volume %s\n", (char *) dev->volume); | |
3367 | free(this); | |
3368 | this = rest; | |
3369 | break; | |
3370 | } | |
3371 | memset(info_d, 0, sizeof(*info_d)); | |
3372 | info_d->next = this->devs; | |
3373 | this->devs = info_d; | |
3374 | ||
3375 | info_d->disk.number = d->index; | |
3376 | info_d->disk.major = d->major; | |
3377 | info_d->disk.minor = d->minor; | |
3378 | info_d->disk.raid_disk = slot; | |
3379 | ||
3380 | this->array.working_disks++; | |
3381 | ||
3382 | info_d->events = __le32_to_cpu(mpb->generation_num); | |
3383 | info_d->data_offset = __le32_to_cpu(map->pba_of_lba0); | |
3384 | info_d->component_size = __le32_to_cpu(map->blocks_per_member); | |
3385 | if (d->devname) | |
3386 | strcpy(info_d->name, d->devname); | |
3387 | } | |
3388 | rest = this; | |
3389 | } | |
3390 | ||
3391 | return rest; | |
3392 | } | |
3393 | ||
3394 | ||
3395 | #ifndef MDASSEMBLE | |
3396 | static int imsm_open_new(struct supertype *c, struct active_array *a, | |
3397 | char *inst) | |
3398 | { | |
3399 | struct intel_super *super = c->sb; | |
3400 | struct imsm_super *mpb = super->anchor; | |
3401 | ||
3402 | if (atoi(inst) >= mpb->num_raid_devs) { | |
3403 | fprintf(stderr, "%s: subarry index %d, out of range\n", | |
3404 | __func__, atoi(inst)); | |
3405 | return -ENODEV; | |
3406 | } | |
3407 | ||
3408 | dprintf("imsm: open_new %s\n", inst); | |
3409 | a->info.container_member = atoi(inst); | |
3410 | return 0; | |
3411 | } | |
3412 | ||
3413 | static __u8 imsm_check_degraded(struct intel_super *super, struct imsm_dev *dev, int failed) | |
3414 | { | |
3415 | struct imsm_map *map = get_imsm_map(dev, 0); | |
3416 | ||
3417 | if (!failed) | |
3418 | return map->map_state == IMSM_T_STATE_UNINITIALIZED ? | |
3419 | IMSM_T_STATE_UNINITIALIZED : IMSM_T_STATE_NORMAL; | |
3420 | ||
3421 | switch (get_imsm_raid_level(map)) { | |
3422 | case 0: | |
3423 | return IMSM_T_STATE_FAILED; | |
3424 | break; | |
3425 | case 1: | |
3426 | if (failed < map->num_members) | |
3427 | return IMSM_T_STATE_DEGRADED; | |
3428 | else | |
3429 | return IMSM_T_STATE_FAILED; | |
3430 | break; | |
3431 | case 10: | |
3432 | { | |
3433 | /** | |
3434 | * check to see if any mirrors have failed, otherwise we | |
3435 | * are degraded. Even numbered slots are mirrored on | |
3436 | * slot+1 | |
3437 | */ | |
3438 | int i; | |
3439 | /* gcc -Os complains that this is unused */ | |
3440 | int insync = insync; | |
3441 | ||
3442 | for (i = 0; i < map->num_members; i++) { | |
3443 | __u32 ord = get_imsm_ord_tbl_ent(dev, i); | |
3444 | int idx = ord_to_idx(ord); | |
3445 | struct imsm_disk *disk; | |
3446 | ||
3447 | /* reset the potential in-sync count on even-numbered | |
3448 | * slots. num_copies is always 2 for imsm raid10 | |
3449 | */ | |
3450 | if ((i & 1) == 0) | |
3451 | insync = 2; | |
3452 | ||
3453 | disk = get_imsm_disk(super, idx); | |
3454 | if (!disk || disk->status & FAILED_DISK || | |
3455 | ord & IMSM_ORD_REBUILD) | |
3456 | insync--; | |
3457 | ||
3458 | /* no in-sync disks left in this mirror the | |
3459 | * array has failed | |
3460 | */ | |
3461 | if (insync == 0) | |
3462 | return IMSM_T_STATE_FAILED; | |
3463 | } | |
3464 | ||
3465 | return IMSM_T_STATE_DEGRADED; | |
3466 | } | |
3467 | case 5: | |
3468 | if (failed < 2) | |
3469 | return IMSM_T_STATE_DEGRADED; | |
3470 | else | |
3471 | return IMSM_T_STATE_FAILED; | |
3472 | break; | |
3473 | default: | |
3474 | break; | |
3475 | } | |
3476 | ||
3477 | return map->map_state; | |
3478 | } | |
3479 | ||
3480 | static int imsm_count_failed(struct intel_super *super, struct imsm_dev *dev) | |
3481 | { | |
3482 | int i; | |
3483 | int failed = 0; | |
3484 | struct imsm_disk *disk; | |
3485 | struct imsm_map *map = get_imsm_map(dev, 0); | |
3486 | struct imsm_map *prev = get_imsm_map(dev, dev->vol.migr_state); | |
3487 | __u32 ord; | |
3488 | int idx; | |
3489 | ||
3490 | /* at the beginning of migration we set IMSM_ORD_REBUILD on | |
3491 | * disks that are being rebuilt. New failures are recorded to | |
3492 | * map[0]. So we look through all the disks we started with and | |
3493 | * see if any failures are still present, or if any new ones | |
3494 | * have arrived | |
3495 | * | |
3496 | * FIXME add support for online capacity expansion and | |
3497 | * raid-level-migration | |
3498 | */ | |
3499 | for (i = 0; i < prev->num_members; i++) { | |
3500 | ord = __le32_to_cpu(prev->disk_ord_tbl[i]); | |
3501 | ord |= __le32_to_cpu(map->disk_ord_tbl[i]); | |
3502 | idx = ord_to_idx(ord); | |
3503 | ||
3504 | disk = get_imsm_disk(super, idx); | |
3505 | if (!disk || disk->status & FAILED_DISK || | |
3506 | ord & IMSM_ORD_REBUILD) | |
3507 | failed++; | |
3508 | } | |
3509 | ||
3510 | return failed; | |
3511 | } | |
3512 | ||
3513 | static int is_resyncing(struct imsm_dev *dev) | |
3514 | { | |
3515 | struct imsm_map *migr_map; | |
3516 | ||
3517 | if (!dev->vol.migr_state) | |
3518 | return 0; | |
3519 | ||
3520 | if (dev->vol.migr_type == MIGR_INIT) | |
3521 | return 1; | |
3522 | ||
3523 | migr_map = get_imsm_map(dev, 1); | |
3524 | ||
3525 | if (migr_map->map_state == IMSM_T_STATE_NORMAL) | |
3526 | return 1; | |
3527 | else | |
3528 | return 0; | |
3529 | } | |
3530 | ||
3531 | static int is_rebuilding(struct imsm_dev *dev) | |
3532 | { | |
3533 | struct imsm_map *migr_map; | |
3534 | ||
3535 | if (!dev->vol.migr_state) | |
3536 | return 0; | |
3537 | ||
3538 | if (dev->vol.migr_type != MIGR_REBUILD) | |
3539 | return 0; | |
3540 | ||
3541 | migr_map = get_imsm_map(dev, 1); | |
3542 | ||
3543 | if (migr_map->map_state == IMSM_T_STATE_DEGRADED) | |
3544 | return 1; | |
3545 | else | |
3546 | return 0; | |
3547 | } | |
3548 | ||
3549 | /* return true if we recorded new information */ | |
3550 | static int mark_failure(struct imsm_dev *dev, struct imsm_disk *disk, int idx) | |
3551 | { | |
3552 | __u32 ord; | |
3553 | int slot; | |
3554 | struct imsm_map *map; | |
3555 | ||
3556 | /* new failures are always set in map[0] */ | |
3557 | map = get_imsm_map(dev, 0); | |
3558 | ||
3559 | slot = get_imsm_disk_slot(map, idx); | |
3560 | if (slot < 0) | |
3561 | return 0; | |
3562 | ||
3563 | ord = __le32_to_cpu(map->disk_ord_tbl[slot]); | |
3564 | if ((disk->status & FAILED_DISK) && (ord & IMSM_ORD_REBUILD)) | |
3565 | return 0; | |
3566 | ||
3567 | disk->status |= FAILED_DISK; | |
3568 | set_imsm_ord_tbl_ent(map, slot, idx | IMSM_ORD_REBUILD); | |
3569 | if (map->failed_disk_num == ~0) | |
3570 | map->failed_disk_num = slot; | |
3571 | return 1; | |
3572 | } | |
3573 | ||
3574 | static void mark_missing(struct imsm_dev *dev, struct imsm_disk *disk, int idx) | |
3575 | { | |
3576 | mark_failure(dev, disk, idx); | |
3577 | ||
3578 | if (disk->scsi_id == __cpu_to_le32(~(__u32)0)) | |
3579 | return; | |
3580 | ||
3581 | disk->scsi_id = __cpu_to_le32(~(__u32)0); | |
3582 | memmove(&disk->serial[0], &disk->serial[1], MAX_RAID_SERIAL_LEN - 1); | |
3583 | } | |
3584 | ||
3585 | /* Handle dirty -> clean transititions and resync. Degraded and rebuild | |
3586 | * states are handled in imsm_set_disk() with one exception, when a | |
3587 | * resync is stopped due to a new failure this routine will set the | |
3588 | * 'degraded' state for the array. | |
3589 | */ | |
3590 | static int imsm_set_array_state(struct active_array *a, int consistent) | |
3591 | { | |
3592 | int inst = a->info.container_member; | |
3593 | struct intel_super *super = a->container->sb; | |
3594 | struct imsm_dev *dev = get_imsm_dev(super, inst); | |
3595 | struct imsm_map *map = get_imsm_map(dev, 0); | |
3596 | int failed = imsm_count_failed(super, dev); | |
3597 | __u8 map_state = imsm_check_degraded(super, dev, failed); | |
3598 | ||
3599 | /* before we activate this array handle any missing disks */ | |
3600 | if (consistent == 2 && super->missing) { | |
3601 | struct dl *dl; | |
3602 | ||
3603 | dprintf("imsm: mark missing\n"); | |
3604 | end_migration(dev, map_state); | |
3605 | for (dl = super->missing; dl; dl = dl->next) | |
3606 | mark_missing(dev, &dl->disk, dl->index); | |
3607 | super->updates_pending++; | |
3608 | } | |
3609 | ||
3610 | if (consistent == 2 && | |
3611 | (!is_resync_complete(a) || | |
3612 | map_state != IMSM_T_STATE_NORMAL || | |
3613 | dev->vol.migr_state)) | |
3614 | consistent = 0; | |
3615 | ||
3616 | if (is_resync_complete(a)) { | |
3617 | /* complete intialization / resync, | |
3618 | * recovery and interrupted recovery is completed in | |
3619 | * ->set_disk | |
3620 | */ | |
3621 | if (is_resyncing(dev)) { | |
3622 | dprintf("imsm: mark resync done\n"); | |
3623 | end_migration(dev, map_state); | |
3624 | super->updates_pending++; | |
3625 | } | |
3626 | } else if (!is_resyncing(dev) && !failed) { | |
3627 | /* mark the start of the init process if nothing is failed */ | |
3628 | dprintf("imsm: mark resync start (%llu)\n", a->resync_start); | |
3629 | if (map->map_state == IMSM_T_STATE_NORMAL) | |
3630 | migrate(dev, IMSM_T_STATE_NORMAL, MIGR_REBUILD); | |
3631 | else | |
3632 | migrate(dev, IMSM_T_STATE_NORMAL, MIGR_INIT); | |
3633 | super->updates_pending++; | |
3634 | } | |
3635 | ||
3636 | /* check if we can update the migration checkpoint */ | |
3637 | if (dev->vol.migr_state && | |
3638 | __le32_to_cpu(dev->vol.curr_migr_unit) != a->resync_start) { | |
3639 | dprintf("imsm: checkpoint migration (%llu)\n", a->resync_start); | |
3640 | dev->vol.curr_migr_unit = __cpu_to_le32(a->resync_start); | |
3641 | super->updates_pending++; | |
3642 | } | |
3643 | ||
3644 | /* mark dirty / clean */ | |
3645 | if (dev->vol.dirty != !consistent) { | |
3646 | dprintf("imsm: mark '%s' (%llu)\n", | |
3647 | consistent ? "clean" : "dirty", a->resync_start); | |
3648 | if (consistent) | |
3649 | dev->vol.dirty = 0; | |
3650 | else | |
3651 | dev->vol.dirty = 1; | |
3652 | super->updates_pending++; | |
3653 | } | |
3654 | return consistent; | |
3655 | } | |
3656 | ||
3657 | static void imsm_set_disk(struct active_array *a, int n, int state) | |
3658 | { | |
3659 | int inst = a->info.container_member; | |
3660 | struct intel_super *super = a->container->sb; | |
3661 | struct imsm_dev *dev = get_imsm_dev(super, inst); | |
3662 | struct imsm_map *map = get_imsm_map(dev, 0); | |
3663 | struct imsm_disk *disk; | |
3664 | int failed; | |
3665 | __u32 ord; | |
3666 | __u8 map_state; | |
3667 | ||
3668 | if (n > map->num_members) | |
3669 | fprintf(stderr, "imsm: set_disk %d out of range 0..%d\n", | |
3670 | n, map->num_members - 1); | |
3671 | ||
3672 | if (n < 0) | |
3673 | return; | |
3674 | ||
3675 | dprintf("imsm: set_disk %d:%x\n", n, state); | |
3676 | ||
3677 | ord = get_imsm_ord_tbl_ent(dev, n); | |
3678 | disk = get_imsm_disk(super, ord_to_idx(ord)); | |
3679 | ||
3680 | /* check for new failures */ | |
3681 | if (state & DS_FAULTY) { | |
3682 | if (mark_failure(dev, disk, ord_to_idx(ord))) | |
3683 | super->updates_pending++; | |
3684 | } | |
3685 | ||
3686 | /* check if in_sync */ | |
3687 | if (state & DS_INSYNC && ord & IMSM_ORD_REBUILD && is_rebuilding(dev)) { | |
3688 | struct imsm_map *migr_map = get_imsm_map(dev, 1); | |
3689 | ||
3690 | set_imsm_ord_tbl_ent(migr_map, n, ord_to_idx(ord)); | |
3691 | super->updates_pending++; | |
3692 | } | |
3693 | ||
3694 | failed = imsm_count_failed(super, dev); | |
3695 | map_state = imsm_check_degraded(super, dev, failed); | |
3696 | ||
3697 | /* check if recovery complete, newly degraded, or failed */ | |
3698 | if (map_state == IMSM_T_STATE_NORMAL && is_rebuilding(dev)) { | |
3699 | end_migration(dev, map_state); | |
3700 | map = get_imsm_map(dev, 0); | |
3701 | map->failed_disk_num = ~0; | |
3702 | super->updates_pending++; | |
3703 | } else if (map_state == IMSM_T_STATE_DEGRADED && | |
3704 | map->map_state != map_state && | |
3705 | !dev->vol.migr_state) { | |
3706 | dprintf("imsm: mark degraded\n"); | |
3707 | map->map_state = map_state; | |
3708 | super->updates_pending++; | |
3709 | } else if (map_state == IMSM_T_STATE_FAILED && | |
3710 | map->map_state != map_state) { | |
3711 | dprintf("imsm: mark failed\n"); | |
3712 | end_migration(dev, map_state); | |
3713 | super->updates_pending++; | |
3714 | } | |
3715 | } | |
3716 | ||
3717 | static int store_imsm_mpb(int fd, struct intel_super *super) | |
3718 | { | |
3719 | struct imsm_super *mpb = super->anchor; | |
3720 | __u32 mpb_size = __le32_to_cpu(mpb->mpb_size); | |
3721 | unsigned long long dsize; | |
3722 | unsigned long long sectors; | |
3723 | ||
3724 | get_dev_size(fd, NULL, &dsize); | |
3725 | ||
3726 | if (mpb_size > 512) { | |
3727 | /* -1 to account for anchor */ | |
3728 | sectors = mpb_sectors(mpb) - 1; | |
3729 | ||
3730 | /* write the extended mpb to the sectors preceeding the anchor */ | |
3731 | if (lseek64(fd, dsize - (512 * (2 + sectors)), SEEK_SET) < 0) | |
3732 | return 1; | |
3733 | ||
3734 | if (write(fd, super->buf + 512, 512 * sectors) != 512 * sectors) | |
3735 | return 1; | |
3736 | } | |
3737 | ||
3738 | /* first block is stored on second to last sector of the disk */ | |
3739 | if (lseek64(fd, dsize - (512 * 2), SEEK_SET) < 0) | |
3740 | return 1; | |
3741 | ||
3742 | if (write(fd, super->buf, 512) != 512) | |
3743 | return 1; | |
3744 | ||
3745 | return 0; | |
3746 | } | |
3747 | ||
3748 | static void imsm_sync_metadata(struct supertype *container) | |
3749 | { | |
3750 | struct intel_super *super = container->sb; | |
3751 | ||
3752 | if (!super->updates_pending) | |
3753 | return; | |
3754 | ||
3755 | write_super_imsm(super, 0); | |
3756 | ||
3757 | super->updates_pending = 0; | |
3758 | } | |
3759 | ||
3760 | static struct dl *imsm_readd(struct intel_super *super, int idx, struct active_array *a) | |
3761 | { | |
3762 | struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member); | |
3763 | int i = get_imsm_disk_idx(dev, idx); | |
3764 | struct dl *dl; | |
3765 | ||
3766 | for (dl = super->disks; dl; dl = dl->next) | |
3767 | if (dl->index == i) | |
3768 | break; | |
3769 | ||
3770 | if (dl && dl->disk.status & FAILED_DISK) | |
3771 | dl = NULL; | |
3772 | ||
3773 | if (dl) | |
3774 | dprintf("%s: found %x:%x\n", __func__, dl->major, dl->minor); | |
3775 | ||
3776 | return dl; | |
3777 | } | |
3778 | ||
3779 | static struct dl *imsm_add_spare(struct intel_super *super, int slot, | |
3780 | struct active_array *a, int activate_new) | |
3781 | { | |
3782 | struct imsm_dev *dev = get_imsm_dev(super, a->info.container_member); | |
3783 | int idx = get_imsm_disk_idx(dev, slot); | |
3784 | struct imsm_super *mpb = super->anchor; | |
3785 | struct imsm_map *map; | |
3786 | unsigned long long esize; | |
3787 | unsigned long long pos; | |
3788 | struct mdinfo *d; | |
3789 | struct extent *ex; | |
3790 | int i, j; | |
3791 | int found; | |
3792 | __u32 array_start; | |
3793 | __u32 blocks; | |
3794 | struct dl *dl; | |
3795 | ||
3796 | for (dl = super->disks; dl; dl = dl->next) { | |
3797 | /* If in this array, skip */ | |
3798 | for (d = a->info.devs ; d ; d = d->next) | |
3799 | if (d->state_fd >= 0 && | |
3800 | d->disk.major == dl->major && | |
3801 | d->disk.minor == dl->minor) { | |
3802 | dprintf("%x:%x already in array\n", dl->major, dl->minor); | |
3803 | break; | |
3804 | } | |
3805 | if (d) | |
3806 | continue; | |
3807 | ||
3808 | /* skip in use or failed drives */ | |
3809 | if (dl->disk.status & FAILED_DISK || idx == dl->index || | |
3810 | dl->index == -2) { | |
3811 | dprintf("%x:%x status (failed: %d index: %d)\n", | |
3812 | dl->major, dl->minor, | |
3813 | (dl->disk.status & FAILED_DISK) == FAILED_DISK, idx); | |
3814 | continue; | |
3815 | } | |
3816 | ||
3817 | /* skip pure spares when we are looking for partially | |
3818 | * assimilated drives | |
3819 | */ | |
3820 | if (dl->index == -1 && !activate_new) | |
3821 | continue; | |
3822 | ||
3823 | /* Does this unused device have the requisite free space? | |
3824 | * It needs to be able to cover all member volumes | |
3825 | */ | |
3826 | ex = get_extents(super, dl); | |
3827 | if (!ex) { | |
3828 | dprintf("cannot get extents\n"); | |
3829 | continue; | |
3830 | } | |
3831 | for (i = 0; i < mpb->num_raid_devs; i++) { | |
3832 | dev = get_imsm_dev(super, i); | |
3833 | map = get_imsm_map(dev, 0); | |
3834 | ||
3835 | /* check if this disk is already a member of | |
3836 | * this array | |
3837 | */ | |
3838 | if (get_imsm_disk_slot(map, dl->index) >= 0) | |
3839 | continue; | |
3840 | ||
3841 | found = 0; | |
3842 | j = 0; | |
3843 | pos = 0; | |
3844 | array_start = __le32_to_cpu(map->pba_of_lba0); | |
3845 | blocks = __le32_to_cpu(map->blocks_per_member); | |
3846 | ||
3847 | do { | |
3848 | /* check that we can start at pba_of_lba0 with | |
3849 | * blocks_per_member of space | |
3850 | */ | |
3851 | esize = ex[j].start - pos; | |
3852 | if (array_start >= pos && | |
3853 | array_start + blocks < ex[j].start) { | |
3854 | found = 1; | |
3855 | break; | |
3856 | } | |
3857 | pos = ex[j].start + ex[j].size; | |
3858 | j++; | |
3859 | } while (ex[j-1].size); | |
3860 | ||
3861 | if (!found) | |
3862 | break; | |
3863 | } | |
3864 | ||
3865 | free(ex); | |
3866 | if (i < mpb->num_raid_devs) { | |
3867 | dprintf("%x:%x does not have %u at %u\n", | |
3868 | dl->major, dl->minor, | |
3869 | blocks, array_start); | |
3870 | /* No room */ | |
3871 | continue; | |
3872 | } | |
3873 | return dl; | |
3874 | } | |
3875 | ||
3876 | return dl; | |
3877 | } | |
3878 | ||
3879 | static struct mdinfo *imsm_activate_spare(struct active_array *a, | |
3880 | struct metadata_update **updates) | |
3881 | { | |
3882 | /** | |
3883 | * Find a device with unused free space and use it to replace a | |
3884 | * failed/vacant region in an array. We replace failed regions one a | |
3885 | * array at a time. The result is that a new spare disk will be added | |
3886 | * to the first failed array and after the monitor has finished | |
3887 | * propagating failures the remainder will be consumed. | |
3888 | * | |
3889 | * FIXME add a capability for mdmon to request spares from another | |
3890 | * container. | |
3891 | */ | |
3892 | ||
3893 | struct intel_super *super = a->container->sb; | |
3894 | int inst = a->info.container_member; | |
3895 | struct imsm_dev *dev = get_imsm_dev(super, inst); | |
3896 | struct imsm_map *map = get_imsm_map(dev, 0); | |
3897 | int failed = a->info.array.raid_disks; | |
3898 | struct mdinfo *rv = NULL; | |
3899 | struct mdinfo *d; | |
3900 | struct mdinfo *di; | |
3901 | struct metadata_update *mu; | |
3902 | struct dl *dl; | |
3903 | struct imsm_update_activate_spare *u; | |
3904 | int num_spares = 0; | |
3905 | int i; | |
3906 | ||
3907 | for (d = a->info.devs ; d ; d = d->next) { | |
3908 | if ((d->curr_state & DS_FAULTY) && | |
3909 | d->state_fd >= 0) | |
3910 | /* wait for Removal to happen */ | |
3911 | return NULL; | |
3912 | if (d->state_fd >= 0) | |
3913 | failed--; | |
3914 | } | |
3915 | ||
3916 | dprintf("imsm: activate spare: inst=%d failed=%d (%d) level=%d\n", | |
3917 | inst, failed, a->info.array.raid_disks, a->info.array.level); | |
3918 | if (imsm_check_degraded(super, dev, failed) != IMSM_T_STATE_DEGRADED) | |
3919 | return NULL; | |
3920 | ||
3921 | /* For each slot, if it is not working, find a spare */ | |
3922 | for (i = 0; i < a->info.array.raid_disks; i++) { | |
3923 | for (d = a->info.devs ; d ; d = d->next) | |
3924 | if (d->disk.raid_disk == i) | |
3925 | break; | |
3926 | dprintf("found %d: %p %x\n", i, d, d?d->curr_state:0); | |
3927 | if (d && (d->state_fd >= 0)) | |
3928 | continue; | |
3929 | ||
3930 | /* | |
3931 | * OK, this device needs recovery. Try to re-add the | |
3932 | * previous occupant of this slot, if this fails see if | |
3933 | * we can continue the assimilation of a spare that was | |
3934 | * partially assimilated, finally try to activate a new | |
3935 | * spare. | |
3936 | */ | |
3937 | dl = imsm_readd(super, i, a); | |
3938 | if (!dl) | |
3939 | dl = imsm_add_spare(super, i, a, 0); | |
3940 | if (!dl) | |
3941 | dl = imsm_add_spare(super, i, a, 1); | |
3942 | if (!dl) | |
3943 | continue; | |
3944 | ||
3945 | /* found a usable disk with enough space */ | |
3946 | di = malloc(sizeof(*di)); | |
3947 | if (!di) | |
3948 | continue; | |
3949 | memset(di, 0, sizeof(*di)); | |
3950 | ||
3951 | /* dl->index will be -1 in the case we are activating a | |
3952 | * pristine spare. imsm_process_update() will create a | |
3953 | * new index in this case. Once a disk is found to be | |
3954 | * failed in all member arrays it is kicked from the | |
3955 | * metadata | |
3956 | */ | |
3957 | di->disk.number = dl->index; | |
3958 | ||
3959 | /* (ab)use di->devs to store a pointer to the device | |
3960 | * we chose | |
3961 | */ | |
3962 | di->devs = (struct mdinfo *) dl; | |
3963 | ||
3964 | di->disk.raid_disk = i; | |
3965 | di->disk.major = dl->major; | |
3966 | di->disk.minor = dl->minor; | |
3967 | di->disk.state = 0; | |
3968 | di->data_offset = __le32_to_cpu(map->pba_of_lba0); | |
3969 | di->component_size = a->info.component_size; | |
3970 | di->container_member = inst; | |
3971 | di->next = rv; | |
3972 | rv = di; | |
3973 | num_spares++; | |
3974 | dprintf("%x:%x to be %d at %llu\n", dl->major, dl->minor, | |
3975 | i, di->data_offset); | |
3976 | ||
3977 | break; | |
3978 | } | |
3979 | ||
3980 | if (!rv) | |
3981 | /* No spares found */ | |
3982 | return rv; | |
3983 | /* Now 'rv' has a list of devices to return. | |
3984 | * Create a metadata_update record to update the | |
3985 | * disk_ord_tbl for the array | |
3986 | */ | |
3987 | mu = malloc(sizeof(*mu)); | |
3988 | if (mu) { | |
3989 | mu->buf = malloc(sizeof(struct imsm_update_activate_spare) * num_spares); | |
3990 | if (mu->buf == NULL) { | |
3991 | free(mu); | |
3992 | mu = NULL; | |
3993 | } | |
3994 | } | |
3995 | if (!mu) { | |
3996 | while (rv) { | |
3997 | struct mdinfo *n = rv->next; | |
3998 | ||
3999 | free(rv); | |
4000 | rv = n; | |
4001 | } | |
4002 | return NULL; | |
4003 | } | |
4004 | ||
4005 | mu->space = NULL; | |
4006 | mu->len = sizeof(struct imsm_update_activate_spare) * num_spares; | |
4007 | mu->next = *updates; | |
4008 | u = (struct imsm_update_activate_spare *) mu->buf; | |
4009 | ||
4010 | for (di = rv ; di ; di = di->next) { | |
4011 | u->type = update_activate_spare; | |
4012 | u->dl = (struct dl *) di->devs; | |
4013 | di->devs = NULL; | |
4014 | u->slot = di->disk.raid_disk; | |
4015 | u->array = inst; | |
4016 | u->next = u + 1; | |
4017 | u++; | |
4018 | } | |
4019 | (u-1)->next = NULL; | |
4020 | *updates = mu; | |
4021 | ||
4022 | return rv; | |
4023 | } | |
4024 | ||
4025 | static int disks_overlap(struct intel_super *super, int idx, struct imsm_update_create_array *u) | |
4026 | { | |
4027 | struct imsm_dev *dev = get_imsm_dev(super, idx); | |
4028 | struct imsm_map *map = get_imsm_map(dev, 0); | |
4029 | struct imsm_map *new_map = get_imsm_map(&u->dev, 0); | |
4030 | struct disk_info *inf = get_disk_info(u); | |
4031 | struct imsm_disk *disk; | |
4032 | int i; | |
4033 | int j; | |
4034 | ||
4035 | for (i = 0; i < map->num_members; i++) { | |
4036 | disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i)); | |
4037 | for (j = 0; j < new_map->num_members; j++) | |
4038 | if (serialcmp(disk->serial, inf[j].serial) == 0) | |
4039 | return 1; | |
4040 | } | |
4041 | ||
4042 | return 0; | |
4043 | } | |
4044 | ||
4045 | static void imsm_delete(struct intel_super *super, struct dl **dlp, int index); | |
4046 | ||
4047 | static void imsm_process_update(struct supertype *st, | |
4048 | struct metadata_update *update) | |
4049 | { | |
4050 | /** | |
4051 | * crack open the metadata_update envelope to find the update record | |
4052 | * update can be one of: | |
4053 | * update_activate_spare - a spare device has replaced a failed | |
4054 | * device in an array, update the disk_ord_tbl. If this disk is | |
4055 | * present in all member arrays then also clear the SPARE_DISK | |
4056 | * flag | |
4057 | */ | |
4058 | struct intel_super *super = st->sb; | |
4059 | struct imsm_super *mpb; | |
4060 | enum imsm_update_type type = *(enum imsm_update_type *) update->buf; | |
4061 | ||
4062 | /* update requires a larger buf but the allocation failed */ | |
4063 | if (super->next_len && !super->next_buf) { | |
4064 | super->next_len = 0; | |
4065 | return; | |
4066 | } | |
4067 | ||
4068 | if (super->next_buf) { | |
4069 | memcpy(super->next_buf, super->buf, super->len); | |
4070 | free(super->buf); | |
4071 | super->len = super->next_len; | |
4072 | super->buf = super->next_buf; | |
4073 | ||
4074 | super->next_len = 0; | |
4075 | super->next_buf = NULL; | |
4076 | } | |
4077 | ||
4078 | mpb = super->anchor; | |
4079 | ||
4080 | switch (type) { | |
4081 | case update_activate_spare: { | |
4082 | struct imsm_update_activate_spare *u = (void *) update->buf; | |
4083 | struct imsm_dev *dev = get_imsm_dev(super, u->array); | |
4084 | struct imsm_map *map = get_imsm_map(dev, 0); | |
4085 | struct imsm_map *migr_map; | |
4086 | struct active_array *a; | |
4087 | struct imsm_disk *disk; | |
4088 | __u8 to_state; | |
4089 | struct dl *dl; | |
4090 | unsigned int found; | |
4091 | int failed; | |
4092 | int victim = get_imsm_disk_idx(dev, u->slot); | |
4093 | int i; | |
4094 | ||
4095 | for (dl = super->disks; dl; dl = dl->next) | |
4096 | if (dl == u->dl) | |
4097 | break; | |
4098 | ||
4099 | if (!dl) { | |
4100 | fprintf(stderr, "error: imsm_activate_spare passed " | |
4101 | "an unknown disk (index: %d)\n", | |
4102 | u->dl->index); | |
4103 | return; | |
4104 | } | |
4105 | ||
4106 | super->updates_pending++; | |
4107 | ||
4108 | /* count failures (excluding rebuilds and the victim) | |
4109 | * to determine map[0] state | |
4110 | */ | |
4111 | failed = 0; | |
4112 | for (i = 0; i < map->num_members; i++) { | |
4113 | if (i == u->slot) | |
4114 | continue; | |
4115 | disk = get_imsm_disk(super, get_imsm_disk_idx(dev, i)); | |
4116 | if (!disk || disk->status & FAILED_DISK) | |
4117 | failed++; | |
4118 | } | |
4119 | ||
4120 | /* adding a pristine spare, assign a new index */ | |
4121 | if (dl->index < 0) { | |
4122 | dl->index = super->anchor->num_disks; | |
4123 | super->anchor->num_disks++; | |
4124 | } | |
4125 | disk = &dl->disk; | |
4126 | disk->status |= CONFIGURED_DISK; | |
4127 | disk->status &= ~SPARE_DISK; | |
4128 | ||
4129 | /* mark rebuild */ | |
4130 | to_state = imsm_check_degraded(super, dev, failed); | |
4131 | map->map_state = IMSM_T_STATE_DEGRADED; | |
4132 | migrate(dev, to_state, MIGR_REBUILD); | |
4133 | migr_map = get_imsm_map(dev, 1); | |
4134 | set_imsm_ord_tbl_ent(map, u->slot, dl->index); | |
4135 | set_imsm_ord_tbl_ent(migr_map, u->slot, dl->index | IMSM_ORD_REBUILD); | |
4136 | ||
4137 | /* count arrays using the victim in the metadata */ | |
4138 | found = 0; | |
4139 | for (a = st->arrays; a ; a = a->next) { | |
4140 | dev = get_imsm_dev(super, a->info.container_member); | |
4141 | map = get_imsm_map(dev, 0); | |
4142 | ||
4143 | if (get_imsm_disk_slot(map, victim) >= 0) | |
4144 | found++; | |
4145 | } | |
4146 | ||
4147 | /* delete the victim if it is no longer being | |
4148 | * utilized anywhere | |
4149 | */ | |
4150 | if (!found) { | |
4151 | struct dl **dlp; | |
4152 | ||
4153 | /* We know that 'manager' isn't touching anything, | |
4154 | * so it is safe to delete | |
4155 | */ | |
4156 | for (dlp = &super->disks; *dlp; dlp = &(*dlp)->next) | |
4157 | if ((*dlp)->index == victim) | |
4158 | break; | |
4159 | ||
4160 | /* victim may be on the missing list */ | |
4161 | if (!*dlp) | |
4162 | for (dlp = &super->missing; *dlp; dlp = &(*dlp)->next) | |
4163 | if ((*dlp)->index == victim) | |
4164 | break; | |
4165 | imsm_delete(super, dlp, victim); | |
4166 | } | |
4167 | break; | |
4168 | } | |
4169 | case update_create_array: { | |
4170 | /* someone wants to create a new array, we need to be aware of | |
4171 | * a few races/collisions: | |
4172 | * 1/ 'Create' called by two separate instances of mdadm | |
4173 | * 2/ 'Create' versus 'activate_spare': mdadm has chosen | |
4174 | * devices that have since been assimilated via | |
4175 | * activate_spare. | |
4176 | * In the event this update can not be carried out mdadm will | |
4177 | * (FIX ME) notice that its update did not take hold. | |
4178 | */ | |
4179 | struct imsm_update_create_array *u = (void *) update->buf; | |
4180 | struct intel_dev *dv; | |
4181 | struct imsm_dev *dev; | |
4182 | struct imsm_map *map, *new_map; | |
4183 | unsigned long long start, end; | |
4184 | unsigned long long new_start, new_end; | |
4185 | int i; | |
4186 | struct disk_info *inf; | |
4187 | struct dl *dl; | |
4188 | ||
4189 | /* handle racing creates: first come first serve */ | |
4190 | if (u->dev_idx < mpb->num_raid_devs) { | |
4191 | dprintf("%s: subarray %d already defined\n", | |
4192 | __func__, u->dev_idx); | |
4193 | goto create_error; | |
4194 | } | |
4195 | ||
4196 | /* check update is next in sequence */ | |
4197 | if (u->dev_idx != mpb->num_raid_devs) { | |
4198 | dprintf("%s: can not create array %d expected index %d\n", | |
4199 | __func__, u->dev_idx, mpb->num_raid_devs); | |
4200 | goto create_error; | |
4201 | } | |
4202 | ||
4203 | new_map = get_imsm_map(&u->dev, 0); | |
4204 | new_start = __le32_to_cpu(new_map->pba_of_lba0); | |
4205 | new_end = new_start + __le32_to_cpu(new_map->blocks_per_member); | |
4206 | inf = get_disk_info(u); | |
4207 | ||
4208 | /* handle activate_spare versus create race: | |
4209 | * check to make sure that overlapping arrays do not include | |
4210 | * overalpping disks | |
4211 | */ | |
4212 | for (i = 0; i < mpb->num_raid_devs; i++) { | |
4213 | dev = get_imsm_dev(super, i); | |
4214 | map = get_imsm_map(dev, 0); | |
4215 | start = __le32_to_cpu(map->pba_of_lba0); | |
4216 | end = start + __le32_to_cpu(map->blocks_per_member); | |
4217 | if ((new_start >= start && new_start <= end) || | |
4218 | (start >= new_start && start <= new_end)) | |
4219 | /* overlap */; | |
4220 | else | |
4221 | continue; | |
4222 | ||
4223 | if (disks_overlap(super, i, u)) { | |
4224 | dprintf("%s: arrays overlap\n", __func__); | |
4225 | goto create_error; | |
4226 | } | |
4227 | } | |
4228 | ||
4229 | /* check that prepare update was successful */ | |
4230 | if (!update->space) { | |
4231 | dprintf("%s: prepare update failed\n", __func__); | |
4232 | goto create_error; | |
4233 | } | |
4234 | ||
4235 | /* check that all disks are still active before committing | |
4236 | * changes. FIXME: could we instead handle this by creating a | |
4237 | * degraded array? That's probably not what the user expects, | |
4238 | * so better to drop this update on the floor. | |
4239 | */ | |
4240 | for (i = 0; i < new_map->num_members; i++) { | |
4241 | dl = serial_to_dl(inf[i].serial, super); | |
4242 | if (!dl) { | |
4243 | dprintf("%s: disk disappeared\n", __func__); | |
4244 | goto create_error; | |
4245 | } | |
4246 | } | |
4247 | ||
4248 | super->updates_pending++; | |
4249 | ||
4250 | /* convert spares to members and fixup ord_tbl */ | |
4251 | for (i = 0; i < new_map->num_members; i++) { | |
4252 | dl = serial_to_dl(inf[i].serial, super); | |
4253 | if (dl->index == -1) { | |
4254 | dl->index = mpb->num_disks; | |
4255 | mpb->num_disks++; | |
4256 | dl->disk.status |= CONFIGURED_DISK; | |
4257 | dl->disk.status &= ~SPARE_DISK; | |
4258 | } | |
4259 | set_imsm_ord_tbl_ent(new_map, i, dl->index); | |
4260 | } | |
4261 | ||
4262 | dv = update->space; | |
4263 | dev = dv->dev; | |
4264 | update->space = NULL; | |
4265 | imsm_copy_dev(dev, &u->dev); | |
4266 | dv->index = u->dev_idx; | |
4267 | dv->next = super->devlist; | |
4268 | super->devlist = dv; | |
4269 | mpb->num_raid_devs++; | |
4270 | ||
4271 | imsm_update_version_info(super); | |
4272 | break; | |
4273 | create_error: | |
4274 | /* mdmon knows how to release update->space, but not | |
4275 | * ((struct intel_dev *) update->space)->dev | |
4276 | */ | |
4277 | if (update->space) { | |
4278 | dv = update->space; | |
4279 | free(dv->dev); | |
4280 | } | |
4281 | break; | |
4282 | } | |
4283 | case update_add_disk: | |
4284 | ||
4285 | /* we may be able to repair some arrays if disks are | |
4286 | * being added */ | |
4287 | if (super->add) { | |
4288 | struct active_array *a; | |
4289 | ||
4290 | super->updates_pending++; | |
4291 | for (a = st->arrays; a; a = a->next) | |
4292 | a->check_degraded = 1; | |
4293 | } | |
4294 | /* add some spares to the metadata */ | |
4295 | while (super->add) { | |
4296 | struct dl *al; | |
4297 | ||
4298 | al = super->add; | |
4299 | super->add = al->next; | |
4300 | al->next = super->disks; | |
4301 | super->disks = al; | |
4302 | dprintf("%s: added %x:%x\n", | |
4303 | __func__, al->major, al->minor); | |
4304 | } | |
4305 | ||
4306 | break; | |
4307 | } | |
4308 | } | |
4309 | ||
4310 | static void imsm_prepare_update(struct supertype *st, | |
4311 | struct metadata_update *update) | |
4312 | { | |
4313 | /** | |
4314 | * Allocate space to hold new disk entries, raid-device entries or a new | |
4315 | * mpb if necessary. The manager synchronously waits for updates to | |
4316 | * complete in the monitor, so new mpb buffers allocated here can be | |
4317 | * integrated by the monitor thread without worrying about live pointers | |
4318 | * in the manager thread. | |
4319 | */ | |
4320 | enum imsm_update_type type = *(enum imsm_update_type *) update->buf; | |
4321 | struct intel_super *super = st->sb; | |
4322 | struct imsm_super *mpb = super->anchor; | |
4323 | size_t buf_len; | |
4324 | size_t len = 0; | |
4325 | ||
4326 | switch (type) { | |
4327 | case update_create_array: { | |
4328 | struct imsm_update_create_array *u = (void *) update->buf; | |
4329 | struct intel_dev *dv; | |
4330 | struct imsm_dev *dev = &u->dev; | |
4331 | struct imsm_map *map = get_imsm_map(dev, 0); | |
4332 | struct dl *dl; | |
4333 | struct disk_info *inf; | |
4334 | int i; | |
4335 | int activate = 0; | |
4336 | ||
4337 | inf = get_disk_info(u); | |
4338 | len = sizeof_imsm_dev(dev, 1); | |
4339 | /* allocate a new super->devlist entry */ | |
4340 | dv = malloc(sizeof(*dv)); | |
4341 | if (dv) { | |
4342 | dv->dev = malloc(len); | |
4343 | if (dv->dev) | |
4344 | update->space = dv; | |
4345 | else { | |
4346 | free(dv); | |
4347 | update->space = NULL; | |
4348 | } | |
4349 | } | |
4350 | ||
4351 | /* count how many spares will be converted to members */ | |
4352 | for (i = 0; i < map->num_members; i++) { | |
4353 | dl = serial_to_dl(inf[i].serial, super); | |
4354 | if (!dl) { | |
4355 | /* hmm maybe it failed?, nothing we can do about | |
4356 | * it here | |
4357 | */ | |
4358 | continue; | |
4359 | } | |
4360 | if (count_memberships(dl, super) == 0) | |
4361 | activate++; | |
4362 | } | |
4363 | len += activate * sizeof(struct imsm_disk); | |
4364 | break; | |
4365 | default: | |
4366 | break; | |
4367 | } | |
4368 | } | |
4369 | ||
4370 | /* check if we need a larger metadata buffer */ | |
4371 | if (super->next_buf) | |
4372 | buf_len = super->next_len; | |
4373 | else | |
4374 | buf_len = super->len; | |
4375 | ||
4376 | if (__le32_to_cpu(mpb->mpb_size) + len > buf_len) { | |
4377 | /* ok we need a larger buf than what is currently allocated | |
4378 | * if this allocation fails process_update will notice that | |
4379 | * ->next_len is set and ->next_buf is NULL | |
4380 | */ | |
4381 | buf_len = ROUND_UP(__le32_to_cpu(mpb->mpb_size) + len, 512); | |
4382 | if (super->next_buf) | |
4383 | free(super->next_buf); | |
4384 | ||
4385 | super->next_len = buf_len; | |
4386 | if (posix_memalign(&super->next_buf, 512, buf_len) == 0) | |
4387 | memset(super->next_buf, 0, buf_len); | |
4388 | else | |
4389 | super->next_buf = NULL; | |
4390 | } | |
4391 | } | |
4392 | ||
4393 | /* must be called while manager is quiesced */ | |
4394 | static void imsm_delete(struct intel_super *super, struct dl **dlp, int index) | |
4395 | { | |
4396 | struct imsm_super *mpb = super->anchor; | |
4397 | struct dl *iter; | |
4398 | struct imsm_dev *dev; | |
4399 | struct imsm_map *map; | |
4400 | int i, j, num_members; | |
4401 | __u32 ord; | |
4402 | ||
4403 | dprintf("%s: deleting device[%d] from imsm_super\n", | |
4404 | __func__, index); | |
4405 | ||
4406 | /* shift all indexes down one */ | |
4407 | for (iter = super->disks; iter; iter = iter->next) | |
4408 | if (iter->index > index) | |
4409 | iter->index--; | |
4410 | for (iter = super->missing; iter; iter = iter->next) | |
4411 | if (iter->index > index) | |
4412 | iter->index--; | |
4413 | ||
4414 | for (i = 0; i < mpb->num_raid_devs; i++) { | |
4415 | dev = get_imsm_dev(super, i); | |
4416 | map = get_imsm_map(dev, 0); | |
4417 | num_members = map->num_members; | |
4418 | for (j = 0; j < num_members; j++) { | |
4419 | /* update ord entries being careful not to propagate | |
4420 | * ord-flags to the first map | |
4421 | */ | |
4422 | ord = get_imsm_ord_tbl_ent(dev, j); | |
4423 | ||
4424 | if (ord_to_idx(ord) <= index) | |
4425 | continue; | |
4426 | ||
4427 | map = get_imsm_map(dev, 0); | |
4428 | set_imsm_ord_tbl_ent(map, j, ord_to_idx(ord - 1)); | |
4429 | map = get_imsm_map(dev, 1); | |
4430 | if (map) | |
4431 | set_imsm_ord_tbl_ent(map, j, ord - 1); | |
4432 | } | |
4433 | } | |
4434 | ||
4435 | mpb->num_disks--; | |
4436 | super->updates_pending++; | |
4437 | if (*dlp) { | |
4438 | struct dl *dl = *dlp; | |
4439 | ||
4440 | *dlp = (*dlp)->next; | |
4441 | __free_imsm_disk(dl); | |
4442 | } | |
4443 | } | |
4444 | #endif /* MDASSEMBLE */ | |
4445 | ||
4446 | struct superswitch super_imsm = { | |
4447 | #ifndef MDASSEMBLE | |
4448 | .examine_super = examine_super_imsm, | |
4449 | .brief_examine_super = brief_examine_super_imsm, | |
4450 | .export_examine_super = export_examine_super_imsm, | |
4451 | .detail_super = detail_super_imsm, | |
4452 | .brief_detail_super = brief_detail_super_imsm, | |
4453 | .write_init_super = write_init_super_imsm, | |
4454 | .validate_geometry = validate_geometry_imsm, | |
4455 | .add_to_super = add_to_super_imsm, | |
4456 | .detail_platform = detail_platform_imsm, | |
4457 | #endif | |
4458 | .match_home = match_home_imsm, | |
4459 | .uuid_from_super= uuid_from_super_imsm, | |
4460 | .getinfo_super = getinfo_super_imsm, | |
4461 | .update_super = update_super_imsm, | |
4462 | ||
4463 | .avail_size = avail_size_imsm, | |
4464 | ||
4465 | .compare_super = compare_super_imsm, | |
4466 | ||
4467 | .load_super = load_super_imsm, | |
4468 | .init_super = init_super_imsm, | |
4469 | .store_super = store_zero_imsm, | |
4470 | .free_super = free_super_imsm, | |
4471 | .match_metadata_desc = match_metadata_desc_imsm, | |
4472 | .container_content = container_content_imsm, | |
4473 | .default_layout = imsm_level_to_layout, | |
4474 | ||
4475 | .external = 1, | |
4476 | .name = "imsm", | |
4477 | ||
4478 | #ifndef MDASSEMBLE | |
4479 | /* for mdmon */ | |
4480 | .open_new = imsm_open_new, | |
4481 | .load_super = load_super_imsm, | |
4482 | .set_array_state= imsm_set_array_state, | |
4483 | .set_disk = imsm_set_disk, | |
4484 | .sync_metadata = imsm_sync_metadata, | |
4485 | .activate_spare = imsm_activate_spare, | |
4486 | .process_update = imsm_process_update, | |
4487 | .prepare_update = imsm_prepare_update, | |
4488 | #endif /* MDASSEMBLE */ | |
4489 | }; |