Annotation of sys/dev/raidframe/rf_parityscan.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: rf_parityscan.c,v 1.7 2002/12/16 07:01:04 tdeval Exp $ */
2: /* $NetBSD: rf_parityscan.c,v 1.9 2000/05/28 03:00:31 oster Exp $ */
3:
4: /*
5: * Copyright (c) 1995 Carnegie-Mellon University.
6: * All rights reserved.
7: *
8: * Author: Mark Holland
9: *
10: * Permission to use, copy, modify and distribute this software and
11: * its documentation is hereby granted, provided that both the copyright
12: * notice and this permission notice appear in all copies of the
13: * software, derivative works or modified versions, and any portions
14: * thereof, and that both notices appear in supporting documentation.
15: *
16: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19: *
20: * Carnegie Mellon requests users of this software to return to
21: *
22: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23: * School of Computer Science
24: * Carnegie Mellon University
25: * Pittsburgh PA 15213-3890
26: *
27: * any improvements or extensions that they make and grant Carnegie the
28: * rights to redistribute these changes.
29: */
30:
31: /*****************************************************************************
32: *
33: * rf_parityscan.c -- Misc utilities related to parity verification.
34: *
35: *****************************************************************************/
36:
37: #include "rf_types.h"
38: #include "rf_raid.h"
39: #include "rf_dag.h"
40: #include "rf_dagfuncs.h"
41: #include "rf_dagutils.h"
42: #include "rf_mcpair.h"
43: #include "rf_general.h"
44: #include "rf_engine.h"
45: #include "rf_parityscan.h"
46: #include "rf_map.h"
47:
48:
49: /*****************************************************************************
50: *
51: * Walk through the entire arry and write new parity.
52: * This works by creating two DAGs, one to read a stripe of data and one to
53: * write new parity. The first is executed, the data is xored together, and
54: * then the second is executed. To avoid constantly building and tearing down
55: * the DAGs, we create them a priori and fill them in with the mapping
56: * information as we go along.
57: *
58: * There should never be more than one thread running this.
59: *
60: *****************************************************************************/
61: int
62: rf_RewriteParity(RF_Raid_t *raidPtr)
63: {
64: RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
65: RF_AccessStripeMapHeader_t *asm_h;
66: int ret_val;
67: int rc;
68: RF_PhysDiskAddr_t pda;
69: RF_SectorNum_t i;
70:
71: if (raidPtr->Layout.map->faultsTolerated == 0) {
72: /* There isn't any parity. Call it "okay." */
73: return (RF_PARITY_OKAY);
74: }
75: if (raidPtr->status[0] != rf_rs_optimal) {
76: /*
77: * We're in degraded mode. Don't try to verify parity now !
78: * XXX: This should be a "we don't want to", not a
79: * "we can't" error.
80: */
81: return (RF_PARITY_COULD_NOT_VERIFY);
82: }
83:
84: ret_val = 0;
85:
86: pda.startSector = 0;
87: pda.numSector = raidPtr->Layout.sectorsPerStripeUnit;
88: rc = RF_PARITY_OKAY;
89:
90: for (i = 0; i < raidPtr->totalSectors && rc <= RF_PARITY_CORRECTED;
91: i += layoutPtr->dataSectorsPerStripe) {
92: if (raidPtr->waitShutdown) {
93: /*
94: * Someone is pulling the plug on this set...
95: * Abort the re-write.
96: */
97: return (1);
98: }
99: asm_h = rf_MapAccess(raidPtr, i,
100: layoutPtr->dataSectorsPerStripe, NULL, RF_DONT_REMAP);
101: raidPtr->parity_rewrite_stripes_done =
102: i / layoutPtr->dataSectorsPerStripe ;
103: rc = rf_VerifyParity(raidPtr, asm_h->stripeMap, 1, 0);
104: switch (rc) {
105: case RF_PARITY_OKAY:
106: case RF_PARITY_CORRECTED:
107: break;
108: case RF_PARITY_BAD:
109: printf("Parity bad during correction.\n");
110: ret_val = 1;
111: break;
112: case RF_PARITY_COULD_NOT_CORRECT:
113: printf("Could not correct bad parity.\n");
114: ret_val = 1;
115: break;
116: case RF_PARITY_COULD_NOT_VERIFY:
117: printf("Could not verify parity.\n");
118: ret_val = 1;
119: break;
120: default:
121: printf("Bad rc=%d from VerifyParity in"
122: " RewriteParity.\n", rc);
123: ret_val = 1;
124: }
125: rf_FreeAccessStripeMap(asm_h);
126: }
127: return (ret_val);
128: }
129:
130:
131: /*****************************************************************************
132: *
133: * Verify that the parity in a particular stripe is correct.
134: * We validate only the range of parity defined by parityPDA, since
135: * this is all we have locked. The way we do this is to create an asm
136: * that maps the whole stripe and then range-restrict it to the parity
137: * region defined by the parityPDA.
138: *
139: *****************************************************************************/
140: int
141: rf_VerifyParity(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *aasm, int correct_it,
142: RF_RaidAccessFlags_t flags)
143: {
144: RF_PhysDiskAddr_t *parityPDA;
145: RF_AccessStripeMap_t *doasm;
146: RF_LayoutSW_t *lp;
147: int lrc, rc;
148:
149: lp = raidPtr->Layout.map;
150: if (lp->faultsTolerated == 0) {
151: /*
152: * There isn't any parity. Call it "okay."
153: */
154: return (RF_PARITY_OKAY);
155: }
156: rc = RF_PARITY_OKAY;
157: if (lp->VerifyParity) {
158: for (doasm = aasm; doasm; doasm = doasm->next) {
159: for (parityPDA = doasm->parityInfo; parityPDA;
160: parityPDA = parityPDA->next) {
161: lrc = lp->VerifyParity(raidPtr,
162: doasm->raidAddress, parityPDA, correct_it,
163: flags);
164: if (lrc > rc) {
165: /*
166: * see rf_parityscan.h for why this
167: * works.
168: */
169: rc = lrc;
170: }
171: }
172: }
173: } else {
174: rc = RF_PARITY_COULD_NOT_VERIFY;
175: }
176: return (rc);
177: }
178:
179: int
180: rf_VerifyParityBasic(RF_Raid_t *raidPtr, RF_RaidAddr_t raidAddr,
181: RF_PhysDiskAddr_t *parityPDA, int correct_it, RF_RaidAccessFlags_t flags)
182: {
183: RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout);
184: RF_RaidAddr_t startAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr,
185: raidAddr);
186: RF_SectorCount_t numsector = parityPDA->numSector;
187: int numbytes = rf_RaidAddressToByte(raidPtr, numsector);
188: int bytesPerStripe = numbytes * layoutPtr->numDataCol;
189: RF_DagHeader_t *rd_dag_h, *wr_dag_h; /* Read, write dag. */
190: RF_DagNode_t *blockNode, *unblockNode, *wrBlock, *wrUnblock;
191: RF_AccessStripeMapHeader_t *asm_h;
192: RF_AccessStripeMap_t *asmap;
193: RF_AllocListElem_t *alloclist;
194: RF_PhysDiskAddr_t *pda;
195: char *pbuf, *buf, *end_p, *p;
196: int i, retcode;
197: RF_ReconUnitNum_t which_ru;
198: RF_StripeNum_t psID = rf_RaidAddressToParityStripeID(layoutPtr,
199: raidAddr, &which_ru);
200: int stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
201: RF_AccTraceEntry_t tracerec;
202: RF_MCPair_t *mcpair;
203:
204: retcode = RF_PARITY_OKAY;
205:
206: mcpair = rf_AllocMCPair();
207: rf_MakeAllocList(alloclist);
208: RF_MallocAndAdd(buf, numbytes * (layoutPtr->numDataCol +
209: layoutPtr->numParityCol), (char *), alloclist);
210: /* Use calloc to make sure buffer is zeroed. */
211: RF_CallocAndAdd(pbuf, 1, numbytes, (char *), alloclist);
212: end_p = buf + bytesPerStripe;
213:
214: rd_dag_h = rf_MakeSimpleDAG(raidPtr, stripeWidth, numbytes, buf,
215: rf_DiskReadFunc, rf_DiskReadUndoFunc,
216: "Rod", alloclist, flags, RF_IO_NORMAL_PRIORITY);
217: blockNode = rd_dag_h->succedents[0];
218: unblockNode = blockNode->succedents[0]->succedents[0];
219:
220: /* Map the stripe and fill in the PDAs in the dag. */
221: asm_h = rf_MapAccess(raidPtr, startAddr,
222: layoutPtr->dataSectorsPerStripe, buf, RF_DONT_REMAP);
223: asmap = asm_h->stripeMap;
224:
225: for (pda = asmap->physInfo, i = 0; i < layoutPtr->numDataCol;
226: i++, pda = pda->next) {
227: RF_ASSERT(pda);
228: rf_RangeRestrictPDA(raidPtr, parityPDA, pda, 0, 1);
229: RF_ASSERT(pda->numSector != 0);
230: if (rf_TryToRedirectPDA(raidPtr, pda, 0))
231: goto out; /*
232: * No way to verify parity if disk is
233: * dead. Return w/ good status.
234: */
235: blockNode->succedents[i]->params[0].p = pda;
236: blockNode->succedents[i]->params[2].v = psID;
237: blockNode->succedents[i]->params[3].v =
238: RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
239: }
240:
241: RF_ASSERT(!asmap->parityInfo->next);
242: rf_RangeRestrictPDA(raidPtr, parityPDA, asmap->parityInfo, 0, 1);
243: RF_ASSERT(asmap->parityInfo->numSector != 0);
244: if (rf_TryToRedirectPDA(raidPtr, asmap->parityInfo, 1))
245: goto out;
246: blockNode->succedents[layoutPtr->numDataCol]->params[0].p =
247: asmap->parityInfo;
248:
249: /* Fire off the DAG. */
250: bzero((char *) &tracerec, sizeof(tracerec));
251: rd_dag_h->tracerec = &tracerec;
252:
253: if (rf_verifyParityDebug) {
254: printf("Parity verify read dag:\n");
255: rf_PrintDAGList(rd_dag_h);
256: }
257: RF_LOCK_MUTEX(mcpair->mutex);
258: mcpair->flag = 0;
259: rf_DispatchDAG(rd_dag_h, (void (*) (void *)) rf_MCPairWakeupFunc,
260: (void *) mcpair);
261: while (!mcpair->flag)
262: RF_WAIT_COND(mcpair->cond, mcpair->mutex);
263: RF_UNLOCK_MUTEX(mcpair->mutex);
264: if (rd_dag_h->status != rf_enable) {
265: RF_ERRORMSG("Unable to verify parity: can't read the"
266: " stripe.\n");
267: retcode = RF_PARITY_COULD_NOT_VERIFY;
268: goto out;
269: }
270: for (p = buf; p < end_p; p += numbytes) {
271: rf_bxor(p, pbuf, numbytes, NULL);
272: }
273: for (i = 0; i < numbytes; i++) {
274: #if 0
275: if (pbuf[i] != 0 || buf[bytesPerStripe + i] != 0) {
276: printf("Bytes: %d %d %d\n", i, pbuf[i],
277: buf[bytesPerStripe + i]);
278: }
279: #endif
280: if (pbuf[i] != buf[bytesPerStripe + i]) {
281: if (!correct_it)
282: RF_ERRORMSG3("Parity verify error: byte %d of"
283: " parity is 0x%x should be 0x%x.\n", i,
284: (u_char) buf[bytesPerStripe + i],
285: (u_char) pbuf[i]);
286: retcode = RF_PARITY_BAD;
287: break;
288: }
289: }
290:
291: if (retcode && correct_it) {
292: wr_dag_h = rf_MakeSimpleDAG(raidPtr, 1, numbytes, pbuf,
293: rf_DiskWriteFunc, rf_DiskWriteUndoFunc,
294: "Wnp", alloclist, flags, RF_IO_NORMAL_PRIORITY);
295: wrBlock = wr_dag_h->succedents[0];
296: wrUnblock = wrBlock->succedents[0]->succedents[0];
297: wrBlock->succedents[0]->params[0].p = asmap->parityInfo;
298: wrBlock->succedents[0]->params[2].v = psID;
299: wrBlock->succedents[0]->params[3].v =
300: RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru);
301: bzero((char *) &tracerec, sizeof(tracerec));
302: wr_dag_h->tracerec = &tracerec;
303: if (rf_verifyParityDebug) {
304: printf("Parity verify write dag:\n");
305: rf_PrintDAGList(wr_dag_h);
306: }
307: RF_LOCK_MUTEX(mcpair->mutex);
308: mcpair->flag = 0;
309: rf_DispatchDAG(wr_dag_h, (void (*) (void *))
310: rf_MCPairWakeupFunc, (void *) mcpair);
311: while (!mcpair->flag)
312: RF_WAIT_COND(mcpair->cond, mcpair->mutex);
313: RF_UNLOCK_MUTEX(mcpair->mutex);
314: if (wr_dag_h->status != rf_enable) {
315: RF_ERRORMSG("Unable to correct parity in VerifyParity:"
316: " can't write the stripe.\n");
317: retcode = RF_PARITY_COULD_NOT_CORRECT;
318: }
319: rf_FreeDAG(wr_dag_h);
320: if (retcode == RF_PARITY_BAD)
321: retcode = RF_PARITY_CORRECTED;
322: }
323: out:
324: rf_FreeAccessStripeMap(asm_h);
325: rf_FreeAllocList(alloclist);
326: rf_FreeDAG(rd_dag_h);
327: rf_FreeMCPair(mcpair);
328: return (retcode);
329: }
330:
331: int
332: rf_TryToRedirectPDA(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda, int parity)
333: {
334: if (raidPtr->Disks[pda->row][pda->col].status == rf_ds_reconstructing) {
335: if (rf_CheckRUReconstructed(raidPtr->reconControl[pda->row]
336: ->reconMap, pda->startSector)) {
337: if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
338: RF_RowCol_t or = pda->row, oc = pda->col;
339: RF_SectorNum_t os = pda->startSector;
340: if (parity) {
341: (raidPtr->Layout.map->MapParity)
342: (raidPtr, pda->raidAddress,
343: &pda->row, &pda->col,
344: &pda->startSector, RF_REMAP);
345: if (rf_verifyParityDebug)
346: printf("VerifyParity: Redir P"
347: " r %d c %d sect %ld ->"
348: " r %d c %d sect %ld.\n",
349: or, oc, (long) os,
350: pda->row, pda->col,
351: (long) pda->startSector);
352: } else {
353: (raidPtr->Layout.map->MapSector)
354: (raidPtr, pda->raidAddress,
355: &pda->row, &pda->col,
356: &pda->startSector, RF_REMAP);
357: if (rf_verifyParityDebug)
358: printf("VerifyParity: Redir D"
359: " r %d c %d sect %ld ->"
360: " r %d c %d sect %ld.\n",
361: or, oc, (long) os,
362: pda->row, pda->col,
363: (long) pda->startSector);
364: }
365: } else {
366: RF_RowCol_t spRow =
367: raidPtr->Disks[pda->row][pda->col].spareRow;
368: RF_RowCol_t spCol =
369: raidPtr->Disks[pda->row][pda->col].spareCol;
370: pda->row = spRow;
371: pda->col = spCol;
372: }
373: }
374: }
375: if (RF_DEAD_DISK(raidPtr->Disks[pda->row][pda->col].status))
376: return (1);
377: return (0);
378: }
379:
380:
381: /*****************************************************************************
382: *
383: * Currently a stub.
384: *
385: * Takes as input an ASM describing a write operation and containing one
386: * failure, and verifies that the parity was correctly updated to reflect the
387: * write.
388: *
389: * If it's a data unit that has failed, we read the other data units in the
390: * stripe and the parity unit, XOR them together, and verify that we get the
391: * data intended for the failed disk. Since it's easy, we also validate that
392: * the right data got written to the surviving data disks.
393: *
394: * If it's the parity that failed, there's really no validation we can do
395: * except the above verification that the right data got written to all disks.
396: * This is because the new data intended for the failed disk is supplied in
397: * the ASM, but this is of course not the case for the new parity.
398: *
399: *****************************************************************************/
400: int
401: rf_VerifyDegrModeWrite(RF_Raid_t *raidPtr, RF_AccessStripeMapHeader_t *asmh)
402: {
403: return (0);
404: }
405:
406:
407: /*
408: * Creates a simple DAG with a header, a block-recon node at level 1,
409: * nNodes nodes at level 2, an unblock-recon node at level 3, and
410: * a terminator node at level 4. The stripe address field in
411: * the block and unblock nodes are not touched, nor are the pda
412: * fields in the second-level nodes, so they must be filled in later.
413: *
414: * Commit point is established at unblock node - this means that any
415: * failure during dag execution causes the dag to fail.
416: */
417: RF_DagHeader_t *
418: rf_MakeSimpleDAG(RF_Raid_t *raidPtr, int nNodes, int bytesPerSU, char *databuf,
419: int (*doFunc) (RF_DagNode_t * node), int (*undoFunc) (RF_DagNode_t * node),
420: char *name /* Node names at the second level. */,
421: RF_AllocListElem_t *alloclist, RF_RaidAccessFlags_t flags, int priority)
422: {
423: RF_DagHeader_t *dag_h;
424: RF_DagNode_t *nodes, *termNode, *blockNode, *unblockNode;
425: int i;
426:
427: /*
428: * Create the nodes, the block & unblock nodes, and the terminator
429: * node.
430: */
431: RF_CallocAndAdd(nodes, nNodes + 3, sizeof(RF_DagNode_t),
432: (RF_DagNode_t *), alloclist);
433: blockNode = &nodes[nNodes];
434: unblockNode = blockNode + 1;
435: termNode = unblockNode + 1;
436:
437: dag_h = rf_AllocDAGHeader();
438: dag_h->raidPtr = (void *) raidPtr;
439: dag_h->allocList = NULL; /* We won't use this alloc list. */
440: dag_h->status = rf_enable;
441: dag_h->numSuccedents = 1;
442: dag_h->creator = "SimpleDAG";
443:
444: /*
445: * This dag can not commit until the unblock node is reached.
446: * Errors prior to the commit point imply the dag has failed.
447: */
448: dag_h->numCommitNodes = 1;
449: dag_h->numCommits = 0;
450:
451: dag_h->succedents[0] = blockNode;
452: rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc,
453: rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h,
454: "Nil", alloclist);
455: rf_InitNode(unblockNode, rf_wait, RF_TRUE, rf_NullNodeFunc,
456: rf_NullNodeUndoFunc, NULL, 1, nNodes, 0, 0, dag_h,
457: "Nil", alloclist);
458: unblockNode->succedents[0] = termNode;
459: for (i = 0; i < nNodes; i++) {
460: blockNode->succedents[i] = unblockNode->antecedents[i]
461: = &nodes[i];
462: unblockNode->antType[i] = rf_control;
463: rf_InitNode(&nodes[i], rf_wait, RF_FALSE, doFunc, undoFunc,
464: rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, name, alloclist);
465: nodes[i].succedents[0] = unblockNode;
466: nodes[i].antecedents[0] = blockNode;
467: nodes[i].antType[0] = rf_control;
468: nodes[i].params[1].p = (databuf + (i * bytesPerSU));
469: }
470: rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc,
471: rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", alloclist);
472: termNode->antecedents[0] = unblockNode;
473: termNode->antType[0] = rf_control;
474: return (dag_h);
475: }
CVSweb