Changes Prior to this release
+ - Add "INSTALL" file.
+ - Fix some "i" variables that were not being set properly
+ - Initialise minsize and maxsize so that compilers don't complain.
+ - Tidy up Makefile and mdadm.spec installations
+ - Add "multipath" to documentation of valid levels
+
+Changes Prior to 0.8 release
- Fix another bug in Assemble.c due to confusing 'i' with 'j'
- Minimal, untested, support for multipath
- re-write of argument parsing to have more coherent modes,
* if runstop==run, or raiddisks diskswere used,
* RUN_ARRAY
*/
- int minsize, maxsize;
+ int minsize=0, maxsize=0;
char *mindisc = NULL;
char *maxdisc = NULL;
- int i;
+ int dnum;
mddev_dev_t dv;
int fail=0, warn=0;
struct stat stb;
/* now look at the subdevs */
array.active_disks = 0;
array.working_disks = 0;
- for (dv=devlist; dv; dv=dv->next) {
+ dnum = 0;
+ for (dv=devlist; dv; dv=dv->next, dnum++) {
char *dname = dv->devname;
int dsize, freesize;
int fd;
if (strcasecmp(dname, "missing")==0) {
- if (first_missing > i)
- first_missing = i;
+ if (first_missing > dnum)
+ first_missing = dnum;
missing_disks ++;
continue;
}
array.working_disks++;
- if (i < raiddisks)
+ if (dnum < raiddisks)
array.active_disks++;
fd = open(dname, O_RDONLY, 0);
if (fd <0 ) {
return 1;
}
- for (i=0, dv = devlist ; dv ; dv=dv->next, i++) {
+ for (dnum=0, dv = devlist ; dv ; dv=dv->next, dnum++) {
int fd;
struct stat stb;
mdu_disk_info_t disk;
- disk.number = i;
- if (i >= insert_point)
+ disk.number = dnum;
+ if (dnum >= insert_point)
disk.number++;
disk.raid_disk = disk.number;
if (disk.raid_disk < raiddisks)
--- /dev/null
+
+To build mdadm, simply run:
+
+ make
+
+to install, run
+
+ make install
+
+as root.
+
+
+No configuration is necessary.
# STRIP = -s
INSTALL = /usr/bin/install
-DESTDIR = /.
+DESTDIR =
BINDIR = /sbin
-MANDIR = /usr/share/man/man8
+MANDIR = /usr/share/man
+MAN4DIR = $(MANDIR)/man4
+MAN5DIR = $(MANDIR)/man5
+MAN8DIR = $(MANDIR)/man8
OBJS = mdadm.o config.o mdstat.o ReadMe.o util.o Manage.o Assemble.o Build.o Create.o Detail.o Examine.o Monitor.o dlink.o Kill.o Query.o
$(OBJS) : mdadm.h
-install : mdadm mdadm.8
- $(INSTALL) $(STRIP) -m 755 mdadm $(DESTDIR)/$(BINDIR)
- $(INSTALL) -m 644 mdadm.8 $(DESTDIR)/$(MANDIR)
+install : mdadm mdadm.8 md.4 mdadm.conf.5
+ $(INSTALL) -D $(STRIP) -m 755 mdadm $(DESTDIR)$(BINDIR)/mdadm
+ $(INSTALL) -D -m 644 mdadm.8 $(DESTDIR)$(MAN8DIR)/mdadm.8
+ $(INSTALL) -D -m 644 md.4 $(DESTDIR)$(MAN4DIR)/md.4
+ $(INSTALL) -D -m 644 mdadm.conf.5 $(DESTDIR)$(MAN5DIR)/mdadm.conf.5
clean :
- rm -f mdadm $(OBJS) core mdadm.man
+ rm -f mdadm $(OBJS) core *.man
dist : clean
./makedist
mdu_disk_info_t disc;
mddev_dev_t dv;
struct stat stb;
- int i,j;
+ int j;
int save_errno;
static char buf[4096];
}
switch(dv->disposition){
default:
- fprintf(stderr, Name ": internal error - devmode[%d]=%d\n",
- i, dv->disposition);
+ fprintf(stderr, Name ": internal error - devmode[%s]=%d\n",
+ dv->devname, dv->disposition);
return 1;
case 'a':
/* add the device - hot or cold */
#include "mdadm.h"
-char Version[] = Name " - v0.8 - 4 April 2002\n";
+char Version[] = Name " - v0.8.1 - 6 April 2002\n";
/*
* File: ReadMe.c
*
" For create or build:\n"
" --chunk= -c : chunk size of kibibytes\n"
" --rounding= : rounding factor for linear array (==chunck size)\n"
-" --level= -l : raid level: 0,1,4,5,linear. 0 or linear for build\n"
+" --level= -l : raid level: 0,1,4,5,linear,mp. 0 or linear for build\n"
" --paritiy= -p : raid5 parity algorith: {left,right}-{,a}symmetric\n"
" --layout= : same as --parity\n"
" --raid-disks= -n : number of active devices in array\n"
( cd .. ; ln -s mdadm mdadm-$version ; tar czhvf - --exclude='*,v' --exclude='*.o' --exclude mdadm --exclude=RCS mdadm-$version ; rm mdadm-$version ) > $target/$base
chmod a+r $target/$base
ls -l $target/$base
+
+rpm -ta $target/$base
+find /home/neilb/src/RPM -name "*mdadm-$version-*" \
+ -exec cp {} $target/RPM \;
striped array.
A RAID0 array is configured at creation with a
.B "Chunk Size"
-which must be a multiple of 4 kibibytes.
+which must be a power of two, and at least 4 kibibytes.
The RAID0 driver places the first chunk of the array to the first
device, the second chunk to the second device, and so on until all
+++ /dev/null
-MD(4) MD(4)
-
-
-
-N\bNA\bAM\bME\bE
- md - Multiple Device driver aka Linux Software Raid
-
-S\bSY\bYN\bNO\bOP\bPS\bSI\bIS\bS
- /\b/d\bde\bev\bv/\b/m\bmd\bd_\bn
- /\b/d\bde\bev\bv/\b/m\bmd\bd/\b/_\bn
-
-D\bDE\bES\bSC\bCR\bRI\bIP\bPT\bTI\bIO\bON\bN
- The m\bmd\bd driver provides virtual devices that are created
- from one or more independent underlying devices. This
- array of devices often contains redundancy, and hence the
- acronym RAID which stands for a Redundant Array of Inde-
- pendent Devices.
-
- m\bmd\bd support RAID levels 1 (mirroring) 4 (striped array with
- parity device) and 5 (striped array with distributed par-
- ity information. If a single underlying device fails
- while using one of these level, the array will continue to
- function.
-
- m\bmd\bd also supports a number of pseudo RAID (non-redundant)
- configurations including RAID0 (striped array), LINEAR
- (catenated array) and MULTIPATH (a set of different inter-
- faces to the same device).
-
-
- M\bMD\bD S\bSU\bUP\bPE\bER\bR B\bBL\bLO\bOC\bCK\bK
- With the exception of Legacy Arrays described below, each
- device that is incorporated into an MD array has a _\bs_\bu_\bp_\be_\br
- _\bb_\bl_\bo_\bc_\bk written towards the end of the device. This
- superblock records information about the structure and
- state of the array so that the array can be reliably re-
- assembled after a shutdown.
-
- The superblock is 4K long and is written into a 64K
- aligned block that starts at least 64K and less than 128K
- from the end of the device (i.e. to get the address of the
- superblock round the size of the device down to a multiple
- of 64K and then subtract 64K). The available size of each
- device is the amount of space before the super block, so
- between 64K and 128K is lost when a device in incorporated
- into an MD array.
-
- The superblock contains, among other things:
-
- LEVEL The manner in which the devices are arranged into
- the array (linear, raid0, raid1, raid4, raid5, mul-
- tipath).
-
- UUID a 128 bit Universally Unique Identifier that iden-
- tifies the array that this device is part of.
-
-
- L\bLE\bEG\bGA\bAC\bCY\bY A\bAR\bRR\bRA\bAY\bYS\bS
- Early versions of the m\bmd\bd driver only supported Linear and
- Raid0 configurations and so did not use an MD superblock
- (as there is not state that needs to be recorded). While
- it is strongly recommended that all newly created arrays
- utilise a superblock to help ensure that they are assem-
- bled properly, the m\bmd\bd driver still supports legacy linear
- and raid0 md arrays that do not have a superblock.
-
-
- L\bLI\bIN\bNE\bEA\bAR\bR
- A linear array simply catenates the available space on
- each drive together to form one large virtual drive.
-
- One advantage of this arrangement over the more common
- RAID0 arrangement is that the array may be reconfigured at
- a later time with an extra drive and so the array is made
- bigger without disturbing the data that is on the array.
- However this cannot be done on a live array.
-
-
-
- R\bRA\bAI\bID\bD0\b0
- A RAID0 array (which has zero redundancy) is also known as
- a striped array. A RAID0 array is configured at creation
- with a C\bCh\bhu\bun\bnk\bk S\bSi\biz\bze\be which must be a multiple of 4 kibibytes.
-
- The RAID0 driver places the first chunk of the array to
- the first device, the second chunk to the second device,
- and so on until all drives have been assigned one chuck.
- This collection of chunks forms a s\bst\btr\bri\bip\bpe\be. Further chunks
- are gathered into stripes in the same way which are
- assigned to the remaining space in the drives.
-
- If device in the array are not all the same size, then
- once the smallest devices has been exhausted, the RAID0
- driver starts collecting chunks into smaller stripes that
- only span the drives which still have remaining space.
-
-
-
- R\bRA\bAI\bID\bD1\b1
- A RAID1 array is also known as a mirrored set (though mir-
- rors tend to provide reflect images, which RAID1 does not)
- or a plex.
-
- Once initialised, each device in a RAID1 array contains
- exactly the same data. Changes are written to all devices
- in parallel. Data is read from any one device. The
- driver attempts to distribute read requests across all
- devices to maximise performance.
-
- All devices in a RAID1 array should be the same size. If
- they are not, then only the amount of space available on
- the smallest device is used. Any extra space on other
- devices is wasted.
-
-
- R\bRA\bAI\bID\bD4\b4
- A RAID4 array is like a RAID0 array with an extra device
- for storing parity. Unlike RAID0, RAID4 also requires
- that all stripes span all drives, so extra space on
- devices that are larger than the smallest is wasted.
-
- When any block in a RAID4 array is modified the parity
- block for that stripe (i.e. the block in the parity device
- at the same device offset as the stripe) is also modified
- so that the parity block always contains the "parity" for
- the whole stripe. i.e. its contents is equivalent to the
- result of performing an exclusive-or operation between all
- the data blocks in the stripe.
-
- This allows the array to continue to function if one
- device fails. The data that was on that device can be
- calculated as needed from the parity block and the other
- data blocks.
-
-
- R\bRA\bAI\bID\bD5\b5
- RAID5 is very similar to RAID4. The difference is that
- the parity blocks for each stripe, instead of being on a
- single device, are distributed across all devices. This
- allows more parallelism when writing as two different
- block updates will quite possibly affect parity blocks on
- different devices so there is less contention.
-
- This also allows more parallelism when reading as read
- requests are distributed over all the devices in the array
- instead of all but one.
-
-
- M\bMU\bUT\bTI\bIP\bPA\bAT\bTH\bH
- MULTIPATH is not really a RAID at all as there is only one
- real device in a MULTIPATH md array. However there are
- multiple access points (paths) to this device, and one of
- these paths might fail, so there are some similarities.
-
- A MULTIPATH array is composed of a number of different
- devices, often fibre channel interfaces, that all refer
- the the same real device. If one of these interfaces
- fails (e.g. due to cable problems), the multipath driver
- to attempt to redirect requests to another interface.
-
-
-
- U\bUN\bNC\bCL\bLE\bEA\bAN\bN S\bSH\bHU\bUT\bTD\bDO\bOW\bWN\bN
- When changes are made to an RAID1, RAID4, or RAID5 array
- there is a possibility of inconsistency for short periods
- of time as each update requires are least two block to be
- written to different devices, and these writes probably
- wont happen at exactly the same time. This is a system
- with one of these arrays is shutdown in the middle of a
- write operation (e.g. due to power failure), the array may
- not be consistent.
-
- The handle this situation, the md driver marks an array as
- "dirty" before writing any data to it, and marks it as
- "clean" when the array is being disabled, e.g. at shut-
- down. If the md driver finds an array to be dirty at
- startup, it proceeds to correct any possibly inconsis-
- tency. For RAID1, this involves copying the contents of
- the first drive onto all other drives. For RAID4 or RAID5
- this involves recalculating the parity for each stripe and
- making sure that the parity block has the correct data.
-
- If a RAID4 or RAID5 array is degraded (missing one drive)
- when it is restarted after an unclean shutdown, it cannot
- recalculate parity, and so it is possible that data might
- be undetectably corrupted. The md driver currently d\bdo\boe\bes\bs
- n\bno\bot\bt alert the operator to this condition. It should prob-
- ably fail to start an array in this condition without man-
- ual intervention.
-
-
- R\bRE\bEC\bCO\bOV\bVE\bER\bRY\bY
- If the md driver detects any error on a device in a RAID1,
- RAID4, or RAID5 array, it immediately disables that device
- (marking it as faulty) and continues operation on the
- remaining devices. If there is a spare drive, the driver
- will start recreating on one of the spare drives the data
- what was on that failed drive, either by copying a working
- drive in a RAID1 configuration, or by doing calculations
- with the parity block on RAID4 and RAID5.
-
- Why this recovery process is happening, the md driver will
- monitor accesses to the array and will slow down the rate
- of recovery if other activity is happening, so that normal
- access to the array will not be unduly affected. When no
- other activity is happening, the recovery process proceeds
- at full speed. The actual speed targets for the two dif-
- ferent situations can be controlled by the s\bsp\bpe\bee\bed\bd_\b_l\bli\bim\bmi\bit\bt_\b_m\bmi\bin\bn
- and s\bsp\bpe\bee\bed\bd_\b_l\bli\bim\bmi\bit\bt_\b_m\bma\bax\bx control files mentioned below.
-
-
-
-F\bFI\bIL\bLE\bES\bS
- /\b/p\bpr\bro\boc\bc/\b/m\bmd\bds\bst\bta\bat\bt
- Contains information about the status of currently
- running array.
-
- /\b/p\bpr\bro\boc\bc/\b/s\bsy\bys\bs/\b/d\bde\bev\bv/\b/r\bra\bai\bid\bd/\b/s\bsp\bpe\bee\bed\bd_\b_l\bli\bim\bmi\bit\bt_\b_m\bmi\bin\bn
- A readable and writable file that reflects the cur-
- rent goal rebuild speed for times when non-rebuild
- activity is current on an array. The speed is in
- Kibibytes per second, and is a per-device rate, not
- a per-array rate (which means that an array with
- more disc will shuffle more data for a given
- speed). The default is 100.
-
-
- /\b/p\bpr\bro\boc\bc/\b/s\bsy\bys\bs/\b/d\bde\bev\bv/\b/r\bra\bai\bid\bd/\b/s\bsp\bpe\bee\bed\bd_\b_l\bli\bim\bmi\bit\bt_\b_m\bma\bax\bx
- A readable and writable file that reflects the cur-
- rent goal rebuild speed for times when no non-
- rebuild activity is current on an array. The
- default is 100,000.
-
-
-S\bSE\bEE\bE A\bAL\bLS\bSO\bO
- m\bmd\bda\bad\bdm\bm(8), m\bmk\bkr\bra\bai\bid\bd(8).
-
-
-
- MD(4)
.TP
.BR -l ", " --level=
Set raid level. Options are: linear, raid0, 0, stripe, raid1, 1, mirror, raid5, 4,
-raid5, 5. Obviously some of these are synonymous.
+raid5, 5, multipath, mp. Obviously some of these are synonymous.
Only the first 4 are valid when Building.
.TP
if (devlist == NULL)
devlist = conf_get_devs(configfile);
if (devlist == NULL) {
- fprintf(stderr, Name ": No devices listed in %s\n", configfile);
+ fprintf(stderr, Name ": No devices listed in %s\n", configfile?configfile:DefaultConfFile);
exit(1);
}
rv = Examine(devlist, devlist?brief:!verbose, scan);
+++ /dev/null
-MDADM.CONF(5) MDADM.CONF(5)
-
-
-
-N\bNA\bAM\bME\bE
- mdadm.conf - configuration for management of Software Raid
- with mdadm
-
-S\bSY\bYN\bNO\bOP\bPS\bSI\bIS\bS
- /etc/mdadm.conf
-
-D\bDE\bES\bSC\bCR\bRI\bIP\bPT\bTI\bIO\bON\bN
- m\bmd\bda\bad\bdm\bm is a tool for creating, managing, and monitoring
- RAID devices using the m\bmd\bd driver in Linux.
-
- Some common tasks, such as assembling all arrays, can be
- simplified by describing the devices and array in this
- configuration file.
-
-
- S\bSY\bYN\bNT\bTA\bAX\bX
- The file should be seen as a collection of words separated
- by white space (space, tab, or newline). Any word that
- beings with a hash sign (#) starts a comment and that word
- together with the remainder of the line are ignored.
-
- Any line that start with white space (space or tab) is
- treated as though it were a continuation of the previous
- line.
-
- Empty lines are ignored, but otherwise each (non continua-
- tion) line must start with a keyword as listed below. The
- key words are case insensitive and can be abbreviated to 3
- characters.
-
- The keywords are:
-
- D\bDE\bEV\bVI\bIC\bCE\bE A d\bde\bev\bvi\bic\bce\be line lists the devices (whole devices or
- partitions) that might contain a component of an MD
- array. When looking for the components of an
- array, m\bmd\bda\bad\bdm\bm will scan these devices and no others.
-
- The d\bde\bev\bvi\bic\bce\be line may contain a number of different
- devices (separated by spaces) and each device name
- can contain wild cards as defined by g\bgl\blo\bob\bb(7).
-
- Also, there may be several device lines present in
- the file.
-
- For example:
-
- DEVICE /dev/hda* /dev/hdc*
- DEV /dev/sd*
- DEVICE /dev/discs/disc*/disc
-
-
- A\bAR\bRR\bRA\bAY\bY The ARRAY lines identify actual arrays. The second
- word on the line should be the name of the device
- where the array is normally assembled, such as
- /\b/d\bde\bev\bv/\b/m\bmd\bd1\b1. Subsequent words identify the array, or
- identify the array as a member of a group. If mul-
- tiple identities are given, then the array must
- match ALL identities to be considered a match.
- Each identity word has a tag, and equals sign, and
- some value. The options are:
-
-
- u\buu\bui\bid\bd=\b= The value should be a 128 bit uuid in hexadeci-
- mal, with punctuation interspersed if desired.
- This must match the uuid stored in the
- superblock.
-
- s\bsu\bup\bpe\ber\br-\b-m\bmi\bin\bno\bor\br=\b=
- The value is an integer which indicates the
- minor number that was stored in the superblock
- when the array was created. When an array is
- created as /dev/mdX, then the minor number X is
- stored.
-
- d\bde\bev\bvi\bic\bce\bes\bs=\b=
- The value is a comma separated list of device
- names. Precisely these devices will be used to
- assemble the array. Note that the devices
- listed there must also be listed on a DEVICE
- line.
-
- l\ble\bev\bve\bel\bl=\b= The value is a raid level. This is not nor-
- mally used to identify an array, but is sup-
- ported so that the output of
-
- m\bmd\bda\bad\bdm\bm -\b--\b-e\bex\bxa\bam\bmi\bin\bne\be -\b--\b-s\bsc\bca\ban\bn
-
- can be use directly in the configuration file.
-
- d\bdi\bis\bsk\bks\bs=\b= The value is the number of disks in a complete
- active array. As with l\ble\bev\bve\bel\bl=\b= this is mainly
- for compatibility with the output of
-
- m\bmd\bda\bad\bdm\bm -\b--\b-e\bex\bxa\bam\bmi\bin\bne\be -\b--\b-s\bsc\bca\ban\bn.
-
-
- s\bsp\bpa\bar\bre\be-\b-g\bgr\bro\bou\bup\bp=\b=
- The value is a textual name for a group of
- arrays. All arrays with the same s\bsp\bpa\bar\bre\be-\b-g\bgr\bro\bou\bup\bp
- name are considered to be part of the same
- group. The significance of a group of arrays
- is that m\bmd\bda\bad\bdm\bm will, when monitoring the arrays,
- move a spare drive from one array in a group to
- another array in that group if the first array
- had a failed or missing drive but no spare.
-
-
- M\bMA\bAI\bIL\bLA\bAD\bDD\bDR\bR
- The m\bma\bai\bil\bla\bad\bdd\bdr\br line gives an E-mail address that
- alerts should be sent to when is running in -\b--\b-m\bmo\bon\bni\bi-\b-
- t\bto\bor\br mode (and was given the -\b--\b-s\bsc\bca\ban\bn option). There
- should only be one M\bMA\bAI\bIL\bLA\bAD\bDD\bDR\bR line and it should have
- only one address.
-
-
-
- P\bPR\bRO\bOG\bGR\bRA\bAM\bM
- The p\bpr\bro\bog\bgr\bra\bam\bm line gives the name of a program to be
- run when m\bmd\bda\bad\bdm\bm -\b--\b-m\bmo\bon\bni\bit\bto\bor\br detects potentially inter-
- esting events on any of the arrays that it is moni-
- toring. This program gets run with two or three
- arguments, they being the Event, the md device, and
- possibly the related component device.
-
- There should only be one p\bpr\bro\bog\bgr\bra\bam\bm line and it should
- be give only one program.
-
-
-
-S\bSE\bEE\bE A\bAL\bLS\bSO\bO
- m\bmd\bda\bad\bdm\bm(8), m\bmd\bd(4).
-
-
-
-
- MDADM.CONF(5)
extern void put_md_name(char *name);
extern char *get_md_name(int dev);
+extern char DefaultConfFile[];
Summary: mdadm is used for controlling Linux md devices (aka RAID arrays)
Name: mdadm
-Version: 0.8
+Version: 0.8.1
Release: 1
Source: http://www.cse.unsw.edu.au/~neilb/source/mdadm/mdadm-%{version}.tgz
URL: http://www.cse.unsw.edu.au/~neilb/source/mdadm/
make CFLAGS="$RPM_OPT_FLAGS" SYSCONFDIR="%{_sysconfdir}"
%install
-#rm -rf $RPM_BUILD_ROOT
-mkdir -p $RPM_BUILD_ROOT/%{_sbindir}
-install -m755 mdadm $RPM_BUILD_ROOT/%{_sbindir}
-mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}
-install -m644 mdadm.conf-example $RPM_BUILD_ROOT/%{_sysconfdir}/mdadm.conf
-mkdir -p $RPM_BUILD_ROOT/%{_mandir}/man4
-mkdir -p $RPM_BUILD_ROOT/%{_mandir}/man5
-mkdir -p $RPM_BUILD_ROOT/%{_mandir}/man8
-install -m644 md.4 $RPM_BUILD_ROOT/%{_mandir}/man4/
-install -m644 mdadm.conf.5 $RPM_BUILD_ROOT/%{_mandir}/man5/
-install -m644 mdadm.8 $RPM_BUILD_ROOT/%{_mandir}/man8/
+make DESTDIR=$RPM_BUILD_ROOT MANDIR=%{_mandir} BINDIR=%{_sbindir} install
+install -D -m644 mdadm.conf-example $RPM_BUILD_ROOT/%{_sysconfdir}/mdadm.conf
%clean
rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,root,root)
-%doc TODO ChangeLog mdadm.man mdadm.conf-example COPYING
+%doc TODO ChangeLog mdadm.conf-example COPYING
%{_sbindir}/mdadm
%config(noreplace,missingok)/%{_sysconfdir}/mdadm.conf
%{_mandir}/man*/md*
%changelog
+* Sat Apr 6 2002 <neilb@cse.unsw.edu.au>
+- change %install to use "make install"
+
* Fri Mar 15 2002 <gleblanc@localhost.localdomain>
- beautification
- made mdadm.conf non-replaceable config