+ /* Compute power-of-2 table (exponent) */
+ v = 1;
+ for (i = 0; i < 256; i++) {
+ raid6_gfexp[i] = v;
+ v = gfmul(v, 2);
+ if (v == 1)
+ v = 0; /* For entry 255, not a real entry */
+ }
+
+ /* Compute inverse table x^-1 == x^254 */
+ for (i = 0; i < 256; i++)
+ raid6_gfinv[i] = gfpow(i, 254);
+
+ /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
+ for (i = 0; i < 256; i ++)
+ raid6_gfexi[i] = raid6_gfinv[raid6_gfexp[i] ^ 1];
+
+ /* Compute log and inverse log */
+ /* Modified code from:
+ * http://web.eecs.utk.edu/~plank/plank/papers/CS-96-332.html
+ */
+ b = 1;
+ raid6_gflog[0] = 0;
+ raid6_gfilog[255] = 0;
+
+ for (log = 0; log < 255; log++) {
+ raid6_gflog[b] = (uint8_t) log;
+ raid6_gfilog[log] = (uint8_t) b;
+ b = b << 1;
+ if (b & 256) b = b ^ 0435;
+ }
+
+ tables_ready = 1;
+}
+
+uint8_t *zero;
+int zero_size;
+
+void ensure_zero_has_size(int chunk_size)
+{
+ if (zero == NULL || chunk_size > zero_size) {
+ if (zero)
+ free(zero);
+ zero = xcalloc(1, chunk_size);
+ zero_size = chunk_size;
+ }
+}
+
+/* Following was taken from linux/drivers/md/raid6recov.c */
+
+/* Recover two failed data blocks. */
+
+void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
+ uint8_t **ptrs, int neg_offset)
+{
+ uint8_t *p, *q, *dp, *dq;
+ uint8_t px, qx, db;
+ const uint8_t *pbmul; /* P multiplier table for B data */
+ const uint8_t *qmul; /* Q multiplier table (for both) */
+
+ if (faila > failb) {
+ int t = faila;
+ faila = failb;
+ failb = t;
+ }
+
+ if (neg_offset) {
+ p = ptrs[-1];
+ q = ptrs[-2];
+ } else {
+ p = ptrs[disks-2];
+ q = ptrs[disks-1];
+ }
+
+ /* Compute syndrome with zero for the missing data pages
+ Use the dead data pages as temporary storage for
+ delta p and delta q */
+ dp = ptrs[faila];
+ ptrs[faila] = zero;
+ dq = ptrs[failb];
+ ptrs[failb] = zero;
+
+ qsyndrome(dp, dq, ptrs, disks-2, bytes);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
+
+ /* Now do it... */
+ while ( bytes-- ) {
+ px = *p ^ *dp;
+ qx = qmul[*q ^ *dq];
+ *dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
+ *dp++ = db ^ px; /* Reconstructed A */
+ p++; q++;
+ }
+}
+
+/* Recover failure of one data block plus the P block */
+void raid6_datap_recov(int disks, size_t bytes, int faila, uint8_t **ptrs,
+ int neg_offset)
+{
+ uint8_t *p, *q, *dq;
+ const uint8_t *qmul; /* Q multiplier table */
+
+ if (neg_offset) {
+ p = ptrs[-1];
+ q = ptrs[-2];
+ } else {
+ p = ptrs[disks-2];
+ q = ptrs[disks-1];
+ }
+
+ /* Compute syndrome with zero for the missing data page
+ Use the dead data page as temporary storage for delta q */
+ dq = ptrs[faila];
+ ptrs[faila] = zero;
+
+ qsyndrome(p, dq, ptrs, disks-2, bytes);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ /* Now do it... */
+ while ( bytes-- ) {
+ *p++ ^= *dq = qmul[*q ^ *dq];
+ q++; dq++;
+ }
+}
+
+/* Try to find out if a specific disk has a problem */
+int raid6_check_disks(int data_disks, int start, int chunk_size,
+ int level, int layout, int diskP, int diskQ,
+ uint8_t *p, uint8_t *q, char **stripes)
+{
+ int i;
+ int data_id, diskD;
+ uint8_t Px, Qx;
+ int curr_broken_disk = -1;
+ int prev_broken_disk = -1;
+ int broken_status = 0;
+
+ for(i = 0; i < chunk_size; i++) {
+ Px = (uint8_t)stripes[diskP][i] ^ (uint8_t)p[i];
+ Qx = (uint8_t)stripes[diskQ][i] ^ (uint8_t)q[i];
+
+ if((Px != 0) && (Qx == 0))
+ curr_broken_disk = diskP;
+
+ if((Px == 0) && (Qx != 0))
+ curr_broken_disk = diskQ;
+
+ if((Px != 0) && (Qx != 0)) {
+ data_id = (raid6_gflog[Qx] - raid6_gflog[Px]);
+ if(data_id < 0) data_id += 255;
+ diskD = geo_map(data_id, start/chunk_size,
+ data_disks + 2, level, layout);
+ curr_broken_disk = diskD;
+ }
+
+ if((Px == 0) && (Qx == 0))
+ curr_broken_disk = prev_broken_disk;
+
+ if(curr_broken_disk >= data_disks + 2)
+ broken_status = 2;
+
+ switch(broken_status) {
+ case 0:
+ if(curr_broken_disk != -1) {
+ prev_broken_disk = curr_broken_disk;
+ broken_status = 1;
+ }
+ break;
+
+ case 1:
+ if(curr_broken_disk != prev_broken_disk)
+ broken_status = 2;
+ break;
+
+ case 2:
+ default:
+ curr_broken_disk = prev_broken_disk = -2;
+ break;
+ }
+ }
+
+ return curr_broken_disk;
+}
+
+/*******************************************************************************
+ * Function: save_stripes
+ * Description:
+ * Function reads data (only data without P and Q) from array and writes
+ * it to buf and opcjonaly to backup files
+ * Parameters:
+ * source : A list of 'fds' of the active disks.
+ * Some may be absent
+ * offsets : A list of offsets on disk belonging
+ * to the array [bytes]
+ * raid_disks : geometry: number of disks in the array
+ * chunk_size : geometry: chunk size [bytes]
+ * level : geometry: RAID level
+ * layout : geometry: layout
+ * nwrites : number of backup files
+ * dest : A list of 'fds' for mirrored targets
+ * (e.g. backup files). They are already seeked to right
+ * (write) location. If NULL, data will be wrote
+ * to the buf only
+ * start : start address of data to read (must be stripe-aligned)
+ * [bytes]
+ * length - : length of data to read (must be stripe-aligned)
+ * [bytes]
+ * buf : buffer for data. It is large enough to hold
+ * one stripe. It is stripe aligned
+ * Returns:
+ * 0 : success
+ * -1 : fail
+ ******************************************************************************/