Annotation of sys/dev/raidframe/rf_diskqueue.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: rf_diskqueue.c,v 1.7 2002/12/16 07:01:03 tdeval Exp $ */
2: /* $NetBSD: rf_diskqueue.c,v 1.13 2000/03/04 04:22:34 oster Exp $ */
3:
4: /*
5: * Copyright (c) 1995 Carnegie-Mellon University.
6: * All rights reserved.
7: *
8: * Author: Mark Holland
9: *
10: * Permission to use, copy, modify and distribute this software and
11: * its documentation is hereby granted, provided that both the copyright
12: * notice and this permission notice appear in all copies of the
13: * software, derivative works or modified versions, and any portions
14: * thereof, and that both notices appear in supporting documentation.
15: *
16: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19: *
20: * Carnegie Mellon requests users of this software to return to
21: *
22: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23: * School of Computer Science
24: * Carnegie Mellon University
25: * Pittsburgh PA 15213-3890
26: *
27: * any improvements or extensions that they make and grant Carnegie the
28: * rights to redistribute these changes.
29: */
30:
31: /*****************************************************************************
32: *
33: * rf_diskqueue.c -- Higher-level disk queue code.
34: *
35: * The routines here are a generic wrapper around the actual queueing
36: * routines. The code here implements thread scheduling, synchronization,
37: * and locking ops (see below) on top of the lower-level queueing code.
38: *
39: * To support atomic RMW, we implement "locking operations". When a
40: * locking op is dispatched to the lower levels of the driver, the
41: * queue is locked, and no further I/Os are dispatched until the queue
42: * receives & completes a corresponding "unlocking operation". This
43: * code relies on the higher layers to guarantee that a locking op
44: * will always be eventually followed by an unlocking op. The model
45: * is that the higher layers are structured so locking and unlocking
46: * ops occur in pairs, i.e. an unlocking op cannot be generated until
47: * after a locking op reports completion. There is no good way to
48: * check to see that an unlocking op "corresponds" to the op that
49: * currently has the queue locked, so we make no such attempt. Since
50: * by definition there can be only one locking op outstanding on a
51: * disk, this should not be a problem.
52: *
53: * In the kernel, we allow multiple I/Os to be concurrently dispatched
54: * to the disk driver. In order to support locking ops in this
55: * environment, when we decide to do a locking op, we stop dispatching
56: * new I/Os and wait until all dispatched I/Os have completed before
57: * dispatching the locking op.
58: *
59: * Unfortunately, the code is different in the 3 different operating
60: * states (user level, kernel, simulator). In the kernel, I/O is
61: * non-blocking, and we have no disk threads to dispatch for us.
62: * Therefore, we have to dispatch new I/Os to the scsi driver at the
63: * time of enqueue, and also at the time of completion. At user
64: * level, I/O is blocking, and so only the disk threads may dispatch
65: * I/Os. Thus at user level, all we can do at enqueue time is enqueue
66: * and wake up the disk thread to do the dispatch.
67: *
68: *****************************************************************************/
69:
70: #include "rf_types.h"
71: #include "rf_threadstuff.h"
72: #include "rf_raid.h"
73: #include "rf_diskqueue.h"
74: #include "rf_alloclist.h"
75: #include "rf_acctrace.h"
76: #include "rf_etimer.h"
77: #include "rf_configure.h"
78: #include "rf_general.h"
79: #include "rf_freelist.h"
80: #include "rf_debugprint.h"
81: #include "rf_shutdown.h"
82: #include "rf_cvscan.h"
83: #include "rf_sstf.h"
84: #include "rf_fifo.h"
85: #include "rf_kintf.h"
86:
87: int rf_init_dqd(RF_DiskQueueData_t *);
88: void rf_clean_dqd(RF_DiskQueueData_t *);
89: void rf_ShutdownDiskQueueSystem(void *);
90:
91: #define Dprintf1(s,a) \
92: if (rf_queueDebug) \
93: rf_debug_printf(s,(void *)((unsigned long)a), \
94: NULL,NULL,NULL,NULL,NULL,NULL,NULL)
95: #define Dprintf2(s,a,b) \
96: if (rf_queueDebug) \
97: rf_debug_printf(s,(void *)((unsigned long)a), \
98: (void *)((unsigned long)b), \
99: NULL,NULL,NULL,NULL,NULL,NULL)
100: #define Dprintf3(s,a,b,c) \
101: if (rf_queueDebug) \
102: rf_debug_printf(s,(void *)((unsigned long)a), \
103: (void *)((unsigned long)b), \
104: (void *)((unsigned long)c), \
105: NULL,NULL,NULL,NULL,NULL)
106:
107: /*****************************************************************************
108: *
109: * The disk queue switch defines all the functions used in the
110: * different queueing disciplines queue ID, init routine, enqueue
111: * routine, dequeue routine.
112: *
113: *****************************************************************************/
114:
115: static RF_DiskQueueSW_t diskqueuesw[] = {
116: {"fifo", /* FIFO */
117: rf_FifoCreate,
118: rf_FifoEnqueue,
119: rf_FifoDequeue,
120: rf_FifoPeek,
121: rf_FifoPromote},
122:
123: {"cvscan", /* cvscan */
124: rf_CvscanCreate,
125: rf_CvscanEnqueue,
126: rf_CvscanDequeue,
127: rf_CvscanPeek,
128: rf_CvscanPromote},
129:
130: {"sstf", /* shortest seek time first */
131: rf_SstfCreate,
132: rf_SstfEnqueue,
133: rf_SstfDequeue,
134: rf_SstfPeek,
135: rf_SstfPromote},
136:
137: {"scan", /* SCAN (two-way elevator) */
138: rf_ScanCreate,
139: rf_SstfEnqueue,
140: rf_ScanDequeue,
141: rf_ScanPeek,
142: rf_SstfPromote},
143:
144: {"cscan", /* CSCAN (one-way elevator) */
145: rf_CscanCreate,
146: rf_SstfEnqueue,
147: rf_CscanDequeue,
148: rf_CscanPeek,
149: rf_SstfPromote},
150:
151: };
152: #define NUM_DISK_QUEUE_TYPES (sizeof(diskqueuesw)/sizeof(RF_DiskQueueSW_t))
153:
154: static RF_FreeList_t *rf_dqd_freelist;
155:
156: #define RF_MAX_FREE_DQD 256
157: #define RF_DQD_INC 16
158: #define RF_DQD_INITIAL 64
159:
160: #include <sys/buf.h>
161:
162: int
163: rf_init_dqd(RF_DiskQueueData_t *dqd)
164: {
165:
166: dqd->bp = (struct buf *) malloc(sizeof(struct buf), M_RAIDFRAME,
167: M_NOWAIT);
168: if (dqd->bp == NULL) {
169: return (ENOMEM);
170: }
171: /* If you don't do it, nobody else will... */
172: memset(dqd->bp, 0, sizeof(struct buf));
173:
174: return (0);
175: }
176:
177: void
178: rf_clean_dqd(RF_DiskQueueData_t *dqd)
179: {
180: free(dqd->bp, M_RAIDFRAME);
181: }
182:
183: /* Configure a single disk queue. */
184: int
185: rf_ConfigureDiskQueue(
186: RF_Raid_t *raidPtr,
187: RF_DiskQueue_t *diskqueue,
188: /* row & col -- Debug only. BZZT not any more... */
189: RF_RowCol_t r,
190: RF_RowCol_t c,
191: RF_DiskQueueSW_t *p,
192: RF_SectorCount_t sectPerDisk,
193: dev_t dev,
194: int maxOutstanding,
195: RF_ShutdownList_t **listp,
196: RF_AllocListElem_t *clList
197: )
198: {
199: int rc;
200:
201: diskqueue->row = r;
202: diskqueue->col = c;
203: diskqueue->qPtr = p;
204: diskqueue->qHdr = (p->Create) (sectPerDisk, clList, listp);
205: diskqueue->dev = dev;
206: diskqueue->numOutstanding = 0;
207: diskqueue->queueLength = 0;
208: diskqueue->maxOutstanding = maxOutstanding;
209: diskqueue->curPriority = RF_IO_NORMAL_PRIORITY;
210: diskqueue->nextLockingOp = NULL;
211: diskqueue->unlockingOp = NULL;
212: diskqueue->numWaiting = 0;
213: diskqueue->flags = 0;
214: diskqueue->raidPtr = raidPtr;
215: diskqueue->rf_cinfo = &raidPtr->raid_cinfo[r][c];
216: rc = rf_create_managed_mutex(listp, &diskqueue->mutex);
217: if (rc) {
218: RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n",
219: __FILE__, __LINE__, rc);
220: return (rc);
221: }
222: rc = rf_create_managed_cond(listp, &diskqueue->cond);
223: if (rc) {
224: RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n",
225: __FILE__, __LINE__, rc);
226: return (rc);
227: }
228: return (0);
229: }
230:
231: void
232: rf_ShutdownDiskQueueSystem(void *ignored)
233: {
234: RF_FREELIST_DESTROY_CLEAN(rf_dqd_freelist, next,
235: (RF_DiskQueueData_t *), rf_clean_dqd);
236: }
237:
238: int
239: rf_ConfigureDiskQueueSystem(RF_ShutdownList_t **listp)
240: {
241: int rc;
242:
243: RF_FREELIST_CREATE(rf_dqd_freelist, RF_MAX_FREE_DQD, RF_DQD_INC,
244: sizeof(RF_DiskQueueData_t));
245: if (rf_dqd_freelist == NULL)
246: return (ENOMEM);
247: rc = rf_ShutdownCreate(listp, rf_ShutdownDiskQueueSystem, NULL);
248: if (rc) {
249: RF_ERRORMSG3("Unable to add to shutdown list file %s line %d"
250: " rc=%d\n", __FILE__, __LINE__, rc);
251: rf_ShutdownDiskQueueSystem(NULL);
252: return (rc);
253: }
254: RF_FREELIST_PRIME_INIT(rf_dqd_freelist, RF_DQD_INITIAL, next,
255: (RF_DiskQueueData_t *), rf_init_dqd);
256: return (0);
257: }
258:
259: int
260: rf_ConfigureDiskQueues(RF_ShutdownList_t **listp, RF_Raid_t *raidPtr,
261: RF_Config_t *cfgPtr)
262: {
263: RF_DiskQueue_t **diskQueues, *spareQueues;
264: RF_DiskQueueSW_t *p;
265: RF_RowCol_t r, c;
266: int rc, i;
267:
268: raidPtr->maxQueueDepth = cfgPtr->maxOutstandingDiskReqs;
269:
270: for (p = NULL, i = 0; i < NUM_DISK_QUEUE_TYPES; i++) {
271: if (!strcmp(diskqueuesw[i].queueType, cfgPtr->diskQueueType)) {
272: p = &diskqueuesw[i];
273: break;
274: }
275: }
276: if (p == NULL) {
277: RF_ERRORMSG2("Unknown queue type \"%s\". Using %s\n",
278: cfgPtr->diskQueueType, diskqueuesw[0].queueType);
279: p = &diskqueuesw[0];
280: }
281: raidPtr->qType = p;
282: RF_CallocAndAdd(diskQueues, raidPtr->numRow, sizeof(RF_DiskQueue_t *),
283: (RF_DiskQueue_t **), raidPtr->cleanupList);
284: if (diskQueues == NULL) {
285: return (ENOMEM);
286: }
287: raidPtr->Queues = diskQueues;
288: for (r = 0; r < raidPtr->numRow; r++) {
289: RF_CallocAndAdd(diskQueues[r], raidPtr->numCol +
290: ((r == 0) ? RF_MAXSPARE : 0),
291: sizeof(RF_DiskQueue_t), (RF_DiskQueue_t *),
292: raidPtr->cleanupList);
293: if (diskQueues[r] == NULL)
294: return (ENOMEM);
295: for (c = 0; c < raidPtr->numCol; c++) {
296: rc = rf_ConfigureDiskQueue(raidPtr, &diskQueues[r][c],
297: r, c, p, raidPtr->sectorsPerDisk,
298: raidPtr->Disks[r][c].dev,
299: cfgPtr->maxOutstandingDiskReqs, listp,
300: raidPtr->cleanupList);
301: if (rc)
302: return (rc);
303: }
304: }
305:
306: spareQueues = &raidPtr->Queues[0][raidPtr->numCol];
307: for (r = 0; r < raidPtr->numSpare; r++) {
308: rc = rf_ConfigureDiskQueue(raidPtr, &spareQueues[r], 0,
309: raidPtr->numCol + r, p, raidPtr->sectorsPerDisk,
310: raidPtr->Disks[0][raidPtr->numCol + r].dev,
311: cfgPtr->maxOutstandingDiskReqs, listp,
312: raidPtr->cleanupList);
313: if (rc)
314: return (rc);
315: }
316: return (0);
317: }
318:
319: /*
320: * Enqueue a disk I/O
321: *
322: * Unfortunately, we have to do things differently in the different
323: * environments (simulator, user-level, kernel).
324: * At user level, all I/O is blocking, so we have 1 or more threads/disk
325: * and the thread that enqueues is different from the thread that dequeues.
326: * In the kernel, I/O is non-blocking and so we'd like to have multiple
327: * I/Os outstanding on the physical disks when possible.
328: *
329: * When any request arrives at a queue, we have two choices:
330: * dispatch it to the lower levels
331: * queue it up
332: *
333: * Kernel rules for when to do what:
334: * locking request: Queue empty => dispatch and lock queue,
335: * else queue it.
336: * unlocking req : Always dispatch it.
337: * normal req : Queue empty => dispatch it & set priority.
338: * Queue not full & priority is ok => dispatch it
339: * else queue it.
340: *
341: * User-level rules:
342: * Always enqueue. In the special case of an unlocking op, enqueue
343: * in a special way that will cause the unlocking op to be the next
344: * thing dequeued.
345: *
346: * Simulator rules:
347: * Do the same as at user level, with the sleeps and wakeups suppressed.
348: */
349: void
350: rf_DiskIOEnqueue(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req, int pri)
351: {
352: RF_ETIMER_START(req->qtime);
353: RF_ASSERT(req->type == RF_IO_TYPE_NOP || req->numSector);
354: req->priority = pri;
355:
356: if (rf_queueDebug && (req->numSector == 0)) {
357: printf("Warning: Enqueueing zero-sector access\n");
358: }
359: /*
360: * Kernel.
361: */
362: RF_LOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue");
363: /* Locking request. */
364: if (RF_LOCKING_REQ(req)) {
365: if (RF_QUEUE_EMPTY(queue)) {
366: Dprintf3("Dispatching pri %d locking op to r %d c %d"
367: " (queue empty)\n", pri, queue->row, queue->col);
368: RF_LOCK_QUEUE(queue);
369: rf_DispatchKernelIO(queue, req);
370: } else {
371: /*
372: * Increment count of number of requests waiting
373: * in this queue.
374: */
375: queue->queueLength++;
376: Dprintf3("Enqueueing pri %d locking op to r %d c %d"
377: " (queue not empty)\n", pri, queue->row,
378: queue->col);
379: req->queue = (void *) queue;
380: (queue->qPtr->Enqueue) (queue->qHdr, req, pri);
381: }
382: } else {
383: /* Unlocking request. */
384: if (RF_UNLOCKING_REQ(req)) {
385: /*
386: * We'll do the actual unlock when this
387: * I/O completes.
388: */
389: Dprintf3("Dispatching pri %d unlocking op to r %d"
390: " c %d\n", pri, queue->row, queue->col);
391: RF_ASSERT(RF_QUEUE_LOCKED(queue));
392: rf_DispatchKernelIO(queue, req);
393: } else {
394: /* Normal request. */
395: if (RF_OK_TO_DISPATCH(queue, req)) {
396: Dprintf3("Dispatching pri %d regular op to"
397: " r %d c %d (ok to dispatch)\n", pri,
398: queue->row, queue->col);
399: rf_DispatchKernelIO(queue, req);
400: } else {
401: /*
402: * Increment count of number of requests
403: * waiting in this queue.
404: */
405: queue->queueLength++;
406: Dprintf3("Enqueueing pri %d regular op to"
407: " r %d c %d (not ok to dispatch)\n", pri,
408: queue->row, queue->col);
409: req->queue = (void *) queue;
410: (queue->qPtr->Enqueue) (queue->qHdr, req, pri);
411: }
412: }
413: }
414: RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOEnqueue");
415: }
416:
417:
418: /* Get the next set of I/Os started, kernel version only. */
419: void
420: rf_DiskIOComplete(RF_DiskQueue_t *queue, RF_DiskQueueData_t *req, int status)
421: {
422: int done = 0;
423:
424: RF_LOCK_QUEUE_MUTEX(queue, "DiskIOComplete");
425:
426: /*
427: * Unlock the queue:
428: * (1) after an unlocking req completes.
429: * (2) after a locking req fails.
430: */
431: if (RF_UNLOCKING_REQ(req) || (RF_LOCKING_REQ(req) && status)) {
432: Dprintf2("DiskIOComplete: unlocking queue at r %d c %d\n",
433: queue->row, queue->col);
434: RF_ASSERT(RF_QUEUE_LOCKED(queue) &&
435: (queue->unlockingOp == NULL));
436: RF_UNLOCK_QUEUE(queue);
437: }
438: queue->numOutstanding--;
439: RF_ASSERT(queue->numOutstanding >= 0);
440:
441: /*
442: * Dispatch requests to the disk until we find one that we can't.
443: * No reason to continue once we've filled up the queue.
444: * No reason to even start if the queue is locked.
445: */
446:
447: while (!done && !RF_QUEUE_FULL(queue) && !RF_QUEUE_LOCKED(queue)) {
448: if (queue->nextLockingOp) {
449: req = queue->nextLockingOp;
450: queue->nextLockingOp = NULL;
451: Dprintf3("DiskIOComplete: a pri %d locking req was"
452: " pending at r %d c %d\n", req->priority,
453: queue->row, queue->col);
454: } else {
455: req = (queue->qPtr->Dequeue) (queue->qHdr);
456: if (req != NULL) {
457: Dprintf3("DiskIOComplete: extracting pri %d"
458: " req from queue at r %d c %d\n",
459: req->priority, queue->row, queue->col);
460: } else {
461: Dprintf1("DiskIOComplete: no more requests"
462: " to extract.\n", "");
463: }
464: }
465: if (req) {
466: /*
467: * Decrement count of number of requests waiting
468: * in this queue.
469: */
470: queue->queueLength--;
471: RF_ASSERT(queue->queueLength >= 0);
472: }
473: if (!req)
474: done = 1;
475: else {
476: if (RF_LOCKING_REQ(req)) {
477: if (RF_QUEUE_EMPTY(queue)) {
478: /* Dispatch it. */
479: Dprintf3("DiskIOComplete: dispatching"
480: " pri %d locking req to r %d c %d"
481: " (queue empty)\n", req->priority,
482: queue->row, queue->col);
483: RF_LOCK_QUEUE(queue);
484: rf_DispatchKernelIO(queue, req);
485: done = 1;
486: } else {
487: /*
488: * Put it aside to wait for
489: * the queue to drain.
490: */
491: Dprintf3("DiskIOComplete: postponing"
492: " pri %d locking req to r %d"
493: " c %d\n", req->priority,
494: queue->row, queue->col);
495: RF_ASSERT(queue->nextLockingOp == NULL);
496: queue->nextLockingOp = req;
497: done = 1;
498: }
499: } else {
500: if (RF_UNLOCKING_REQ(req)) {
501: /*
502: * Should not happen:
503: * Unlocking ops should not get queued.
504: */
505: /* Support it anyway for the future. */
506: RF_ASSERT(RF_QUEUE_LOCKED(queue));
507: Dprintf3("DiskIOComplete: dispatching"
508: " pri %d unl req to r %d c %d"
509: " (SHOULD NOT SEE THIS)\n",
510: req->priority, queue->row,
511: queue->col);
512: rf_DispatchKernelIO(queue, req);
513: done = 1;
514: } else {
515: if (RF_OK_TO_DISPATCH(queue, req)) {
516: Dprintf3("DiskIOComplete:"
517: " dispatching pri %d"
518: " regular req to r %d"
519: " c %d (ok to dispatch)\n",
520: req->priority, queue->row,
521: queue->col);
522: rf_DispatchKernelIO(queue, req);
523: } else {
524: /*
525: * We can't dispatch it,
526: * so just re-enqueue
527: * it.
528: */
529: /*
530: * Potential trouble here if
531: * disk queues batch reqs.
532: */
533: Dprintf3("DiskIOComplete:"
534: " re-enqueueing pri %d"
535: " regular req to r %d"
536: " c %d\n", req->priority,
537: queue->row, queue->col);
538: queue->queueLength++;
539: (queue->qPtr->Enqueue)
540: (queue->qHdr, req,
541: req->priority);
542: done = 1;
543: }
544: }
545: }
546: }
547: }
548:
549: RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOComplete");
550: }
551:
552: /* Promote accesses tagged with the given parityStripeID from low priority
553: * to normal priority. This promotion is optional, meaning that a queue
554: * need not implement it. If there is no promotion routine associated with
555: * a queue, this routine does nothing and returns -1.
556: */
557: int
558: rf_DiskIOPromote(RF_DiskQueue_t *queue, RF_StripeNum_t parityStripeID,
559: RF_ReconUnitNum_t which_ru)
560: {
561: int retval;
562:
563: if (!queue->qPtr->Promote)
564: return (-1);
565: RF_LOCK_QUEUE_MUTEX(queue, "DiskIOPromote");
566: retval = (queue->qPtr->Promote) (queue->qHdr, parityStripeID, which_ru);
567: RF_UNLOCK_QUEUE_MUTEX(queue, "DiskIOPromote");
568: return (retval);
569: }
570:
571: RF_DiskQueueData_t *
572: rf_CreateDiskQueueData(
573: RF_IoType_t typ,
574: RF_SectorNum_t ssect,
575: RF_SectorCount_t nsect,
576: caddr_t buf,
577: RF_StripeNum_t parityStripeID,
578: RF_ReconUnitNum_t which_ru,
579: int (*wakeF) (void *, int),
580: void *arg,
581: RF_DiskQueueData_t *next,
582: RF_AccTraceEntry_t *tracerec,
583: void *raidPtr,
584: RF_DiskQueueDataFlags_t flags,
585: void *kb_proc
586: )
587: {
588: RF_DiskQueueData_t *p;
589:
590: RF_FREELIST_GET_INIT(rf_dqd_freelist, p, next, (RF_DiskQueueData_t *),
591: rf_init_dqd);
592:
593: p->sectorOffset = ssect + rf_protectedSectors;
594: p->numSector = nsect;
595: p->type = typ;
596: p->buf = buf;
597: p->parityStripeID = parityStripeID;
598: p->which_ru = which_ru;
599: p->CompleteFunc = wakeF;
600: p->argument = arg;
601: p->next = next;
602: p->tracerec = tracerec;
603: p->priority = RF_IO_NORMAL_PRIORITY;
604: p->AuxFunc = NULL;
605: p->buf2 = NULL;
606: p->raidPtr = raidPtr;
607: p->flags = flags;
608: p->b_proc = kb_proc;
609: return (p);
610: }
611:
612: RF_DiskQueueData_t *
613: rf_CreateDiskQueueDataFull(
614: RF_IoType_t typ,
615: RF_SectorNum_t ssect,
616: RF_SectorCount_t nsect,
617: caddr_t buf,
618: RF_StripeNum_t parityStripeID,
619: RF_ReconUnitNum_t which_ru,
620: int (*wakeF) (void *, int),
621: void *arg,
622: RF_DiskQueueData_t *next,
623: RF_AccTraceEntry_t *tracerec,
624: int priority,
625: int (*AuxFunc) (void *,...),
626: caddr_t buf2,
627: void *raidPtr,
628: RF_DiskQueueDataFlags_t flags,
629: void *kb_proc
630: )
631: {
632: RF_DiskQueueData_t *p;
633:
634: RF_FREELIST_GET_INIT(rf_dqd_freelist, p, next, (RF_DiskQueueData_t *),
635: rf_init_dqd);
636:
637: p->sectorOffset = ssect + rf_protectedSectors;
638: p->numSector = nsect;
639: p->type = typ;
640: p->buf = buf;
641: p->parityStripeID = parityStripeID;
642: p->which_ru = which_ru;
643: p->CompleteFunc = wakeF;
644: p->argument = arg;
645: p->next = next;
646: p->tracerec = tracerec;
647: p->priority = priority;
648: p->AuxFunc = AuxFunc;
649: p->buf2 = buf2;
650: p->raidPtr = raidPtr;
651: p->flags = flags;
652: p->b_proc = kb_proc;
653: return (p);
654: }
655:
656: void
657: rf_FreeDiskQueueData(RF_DiskQueueData_t *p)
658: {
659: RF_FREELIST_FREE_CLEAN(rf_dqd_freelist, p, next, rf_clean_dqd);
660: }
CVSweb