Annotation of sys/arch/sparc/dev/xd.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: xd.c,v 1.41 2007/07/01 19:06:57 miod Exp $ */
2: /* $NetBSD: xd.c,v 1.37 1997/07/29 09:58:16 fair Exp $ */
3:
4: /*
5: *
6: * Copyright (c) 1995 Charles D. Cranor
7: * All rights reserved.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed by Charles D. Cranor.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33: */
34:
35: /*
36: *
37: * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r
38: *
39: * author: Chuck Cranor <chuck@ccrc.wustl.edu>
40: * id: $NetBSD: xd.c,v 1.37 1997/07/29 09:58:16 fair Exp $
41: * started: 27-Feb-95
42: * references: [1] Xylogics Model 753 User's Manual
43: * part number: 166-753-001, Revision B, May 21, 1988.
44: * "Your Partner For Performance"
45: * [2] other NetBSD disk device drivers
46: *
47: * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking
48: * the time to answer some of my questions about the 753/7053.
49: *
50: * note: the 753 and the 7053 are programmed the same way, but are
51: * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U
52: * VME card (found in many VME based suns).
53: */
54:
55: #undef XDC_DEBUG /* full debug */
56: #define XDC_DIAG /* extra sanity checks */
57: #if defined(DIAGNOSTIC) && !defined(XDC_DIAG)
58: #define XDC_DIAG /* link in with master DIAG option */
59: #endif
60:
61: #include <sys/param.h>
62: #include <sys/proc.h>
63: #include <sys/systm.h>
64: #include <sys/kernel.h>
65: #include <sys/file.h>
66: #include <sys/stat.h>
67: #include <sys/ioctl.h>
68: #include <sys/buf.h>
69: #include <sys/uio.h>
70: #include <sys/malloc.h>
71: #include <sys/device.h>
72: #include <sys/disklabel.h>
73: #include <sys/disk.h>
74: #include <sys/syslog.h>
75: #include <sys/dkbad.h>
76: #include <sys/conf.h>
77: #include <sys/timeout.h>
78:
79: #include <uvm/uvm_extern.h>
80:
81: #include <machine/autoconf.h>
82: #include <dev/sun/disklabel.h>
83: #include <machine/conf.h>
84:
85: #include <sparc/dev/xdreg.h>
86: #include <sparc/dev/xdvar.h>
87: #include <sparc/dev/xio.h>
88: #include <sparc/sparc/vaddrs.h>
89: #include <sparc/sparc/cpuvar.h>
90:
91: /*
92: * macros
93: */
94:
95: /*
96: * XDC_TWAIT: add iorq "N" to tail of SC's wait queue
97: */
98: #define XDC_TWAIT(SC, N) { \
99: (SC)->waitq[(SC)->waitend] = (N); \
100: (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \
101: (SC)->nwait++; \
102: }
103:
104: /*
105: * XDC_HWAIT: add iorq "N" to head of SC's wait queue
106: */
107: #define XDC_HWAIT(SC, N) { \
108: (SC)->waithead = ((SC)->waithead == 0) ? \
109: (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \
110: (SC)->waitq[(SC)->waithead] = (N); \
111: (SC)->nwait++; \
112: }
113:
114: /*
115: * XDC_GET_WAITER: gets the first request waiting on the waitq
116: * and removes it (so it can be submitted)
117: */
118: #define XDC_GET_WAITER(XDCSC, RQ) { \
119: (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \
120: (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \
121: xdcsc->nwait--; \
122: }
123:
124: /*
125: * XDC_FREE: add iorq "N" to SC's free list
126: */
127: #define XDC_FREE(SC, N) { \
128: (SC)->freereq[(SC)->nfree++] = (N); \
129: (SC)->reqs[N].mode = 0; \
130: if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \
131: }
132:
133:
134: /*
135: * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0).
136: */
137: #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)]
138:
139: /*
140: * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC
141: */
142: #define XDC_GO(XDC, ADDR) { \
143: (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \
144: (ADDR) = ((ADDR) >> 8); \
145: (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \
146: (ADDR) = ((ADDR) >> 8); \
147: (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \
148: (ADDR) = ((ADDR) >> 8); \
149: (XDC)->xdc_iopbaddr3 = (ADDR); \
150: (XDC)->xdc_iopbamod = XDC_ADDRMOD; \
151: (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \
152: }
153:
154: /*
155: * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME".
156: * LCV is a counter. If it goes to zero then we timed out.
157: */
158: #define XDC_WAIT(XDC, LCV, TIME, BITS) { \
159: (LCV) = (TIME); \
160: while ((LCV) > 0) { \
161: if ((XDC)->xdc_csr & (BITS)) break; \
162: (LCV) = (LCV) - 1; \
163: DELAY(1); \
164: } \
165: }
166:
167: /*
168: * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd)
169: */
170: #define XDC_DONE(SC,RQ,ER) { \
171: if ((RQ) == XD_ERR_FAIL) { \
172: (ER) = (RQ); \
173: } else { \
174: if ((SC)->ndone-- == XDC_SUBWAITLIM) \
175: wakeup(&(SC)->ndone); \
176: (ER) = (SC)->reqs[RQ].errno; \
177: XDC_FREE((SC), (RQ)); \
178: } \
179: }
180:
181: /*
182: * XDC_ADVANCE: advance iorq's pointers by a number of sectors
183: */
184: #define XDC_ADVANCE(IORQ, N) { \
185: if (N) { \
186: (IORQ)->sectcnt -= (N); \
187: (IORQ)->blockno += (N); \
188: (IORQ)->dbuf += ((N)*XDFM_BPS); \
189: } \
190: }
191:
192: /*
193: * note - addresses you can sleep on:
194: * [1] & of xd_softc's "state" (waiting for a chance to attach a drive)
195: * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb)
196: * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's
197: * to drop below XDC_SUBWAITLIM)
198: * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish)
199: */
200:
201:
202: /*
203: * function prototypes
204: * "xdc_*" functions are internal, all others are external interfaces
205: */
206:
207: extern int pil_to_vme[]; /* from obio.c */
208:
209: /* internals */
210: int xdc_cmd(struct xdc_softc *, int, int, int, int, int, char *, int);
211: char *xdc_e2str(int);
212: int xdc_error(struct xdc_softc *, struct xd_iorq *,
213: struct xd_iopb *, int, int);
214: int xdc_ioctlcmd(struct xd_softc *, dev_t dev, struct xd_iocmd *);
215: void xdc_perror(struct xd_iorq *, struct xd_iopb *, int);
216: int xdc_piodriver(struct xdc_softc *, int, int);
217: int xdc_remove_iorq(struct xdc_softc *);
218: int xdc_reset(struct xdc_softc *, int, int, int, struct xd_softc *);
219: inline void xdc_rqinit(struct xd_iorq *, struct xdc_softc *,
220: struct xd_softc *, int, u_long, int,
221: caddr_t, struct buf *);
222: void xdc_rqtopb(struct xd_iorq *, struct xd_iopb *, int, int);
223: void xdc_start(struct xdc_softc *, int);
224: int xdc_startbuf(struct xdc_softc *, struct xd_softc *, struct buf *);
225: int xdc_submit_iorq(struct xdc_softc *, int, int);
226: void xdc_tick(void *);
227: void xdc_xdreset(struct xdc_softc *, struct xd_softc *);
228:
229: /* machine interrupt hook */
230: int xdcintr(void *);
231:
232: /* autoconf */
233: int xdcmatch(struct device *, void *, void *);
234: void xdcattach(struct device *, struct device *, void *);
235: int xdmatch(struct device *, void *, void *);
236: void xdattach(struct device *, struct device *, void *);
237:
238: static void xddummystrat(struct buf *);
239: int xdgetdisklabel(struct xd_softc *, void *);
240:
241: /*
242: * cfdrivers: device driver interface to autoconfig
243: */
244:
245: struct cfattach xdc_ca = {
246: sizeof(struct xdc_softc), xdcmatch, xdcattach
247: };
248:
249:
250: struct cfdriver xdc_cd = {
251: NULL, "xdc", DV_DULL
252: };
253:
254: struct cfattach xd_ca = {
255: sizeof(struct xd_softc), xdmatch, xdattach
256: };
257:
258: struct cfdriver xd_cd = {
259: NULL, "xd", DV_DISK
260: };
261:
262: struct xdc_attach_args { /* this is the "aux" args to xdattach */
263: int driveno; /* unit number */
264: char *buf; /* scratch buffer for reading disk label */
265: char *dvmabuf; /* DVMA address of above */
266: int fullmode; /* submit mode */
267: int booting; /* are we booting or not? */
268: };
269:
270: /*
271: * dkdriver
272: */
273:
274: struct dkdriver xddkdriver = {xdstrategy};
275:
276: /*
277: * start: disk label fix code (XXX)
278: */
279:
280: static void *xd_labeldata;
281:
282: static void
283: xddummystrat(bp)
284: struct buf *bp;
285: {
286: if (bp->b_bcount != XDFM_BPS)
287: panic("xddummystrat");
288: bcopy(xd_labeldata, bp->b_data, XDFM_BPS);
289: bp->b_flags |= B_DONE;
290: bp->b_flags &= ~B_BUSY;
291: }
292:
293: int
294: xdgetdisklabel(xd, b)
295: struct xd_softc *xd;
296: void *b;
297: {
298: struct disklabel *lp = xd->sc_dk.dk_label;
299: struct sun_disklabel *sl = b;
300: char *err;
301:
302: bzero(lp, sizeof(struct disklabel));
303: /* Required parameters for readdisklabel() */
304: lp->d_secsize = XDFM_BPS;
305: if (sl->sl_magic == SUN_DKMAGIC) {
306: lp->d_secpercyl = sl->sl_nsectors * sl->sl_ntracks;
307: DL_SETDSIZE(lp, (daddr64_t)lp->d_secpercyl * sl->sl_ncylinders);
308: } else {
309: lp->d_secpercyl = 1;
310: }
311:
312: /* We already have the label data in `b'; setup for dummy strategy */
313: xd_labeldata = b;
314:
315: err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART),
316: xddummystrat, lp, 0);
317: if (err) {
318: /*printf("%s: %s\n", xd->sc_dev.dv_xname, err);*/
319: return (XD_ERR_FAIL);
320: }
321:
322: /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
323: sl = b;
324: if (sl->sl_magic == SUN_DKMAGIC)
325: xd->pcyl = sl->sl_pcylinders;
326: else {
327: printf("%s: WARNING: no `pcyl' in disk label.\n",
328: xd->sc_dev.dv_xname);
329: xd->pcyl = lp->d_ncylinders +
330: lp->d_acylinders;
331: printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
332: xd->sc_dev.dv_xname, xd->pcyl);
333: }
334:
335: xd->ncyl = lp->d_ncylinders;
336: xd->acyl = lp->d_acylinders;
337: xd->nhead = lp->d_ntracks;
338: xd->nsect = lp->d_nsectors;
339: xd->sectpercyl = lp->d_secpercyl;
340: return (XD_ERR_AOK);
341: }
342:
343: /*
344: * end: disk label fix code (XXX)
345: */
346:
347: /*
348: * a u t o c o n f i g f u n c t i o n s
349: */
350:
351: /*
352: * xdcmatch: determine if xdc is present or not. we do a
353: * soft reset to detect the xdc.
354: */
355:
356: int xdcmatch(parent, vcf, aux)
357: struct device *parent;
358: void *vcf, *aux;
359: {
360: struct cfdata *cf = vcf;
361: struct confargs *ca = aux;
362: struct romaux *ra = &ca->ca_ra;
363: struct xdc *xdc;
364: int del = 0;
365:
366: if (strcmp(cf->cf_driver->cd_name, ra->ra_name))
367: return (0);
368:
369: switch (ca->ca_bustype) {
370: case BUS_OBIO:
371: case BUS_SBUS:
372: case BUS_VME16:
373: default:
374: return (0);
375: case BUS_VME32:
376: xdc = (struct xdc *) ra->ra_vaddr;
377: if (probeget((caddr_t) &xdc->xdc_csr, 1) == -1)
378: return (0);
379: xdc->xdc_csr = XDC_RESET;
380: XDC_WAIT(xdc, del, XDC_RESETUSEC, XDC_RESET);
381: if (del <= 0)
382: return (0);
383: return (1);
384: }
385: }
386:
387: /*
388: * xdcattach: attach controller
389: */
390: void
391: xdcattach(parent, self, aux)
392: struct device *parent, *self;
393: void *aux;
394:
395: {
396: struct xdc_softc *xdc = (void *) self;
397: struct confargs *ca = aux;
398: struct xdc_attach_args xa;
399: int lcv, rqno, err, pri;
400: struct xd_iopb_ctrl *ctl;
401:
402: /* get addressing and intr level stuff from autoconfig and load it
403: * into our xdc_softc. */
404:
405: ca->ca_ra.ra_vaddr = mapiodev(ca->ca_ra.ra_reg, 0, sizeof(struct xdc));
406:
407: xdc->xdc = (struct xdc *) ca->ca_ra.ra_vaddr;
408: pri = ca->ca_ra.ra_intr[0].int_pri;
409: xdc->ipl = pil_to_vme[pri];
410: xdc->vector = ca->ca_ra.ra_intr[0].int_vec;
411: printf(" pri %d", pri);
412:
413: for (lcv = 0; lcv < XDC_MAXDEV; lcv++)
414: xdc->sc_drives[lcv] = (struct xd_softc *) 0;
415:
416: /* allocate and zero buffers
417: *
418: * note: we simplify the code by allocating the max number of iopbs and
419: * iorq's up front. thus, we avoid linked lists and the costs
420: * associated with them in exchange for wasting a little memory. */
421:
422: xdc->dvmaiopb = (struct xd_iopb *)
423: dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb), &xdc->iopbase,
424: M_NOWAIT);
425: xdc->iopbase = xdc->dvmaiopb; /* XXX TMP HACK */
426: bzero(xdc->iopbase, XDC_MAXIOPB * sizeof(struct xd_iopb));
427: /* Setup device view of DVMA address */
428: xdc->dvmaiopb = (struct xd_iopb *) ((u_long) xdc->iopbase - DVMA_BASE);
429:
430: xdc->reqs = (struct xd_iorq *)
431: malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT);
432: if (xdc->reqs == NULL)
433: panic("xdc malloc");
434: bzero(xdc->reqs, XDC_MAXIOPB * sizeof(struct xd_iorq));
435:
436: /* init free list, iorq to iopb pointers, and non-zero fields in the
437: * iopb which never change. */
438:
439: for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
440: xdc->reqs[lcv].iopb = &xdc->iopbase[lcv];
441: xdc->freereq[lcv] = lcv;
442: xdc->iopbase[lcv].fixd = 1; /* always the same */
443: xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */
444: xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */
445: }
446: xdc->nfree = XDC_MAXIOPB;
447: xdc->nrun = 0;
448: xdc->waithead = xdc->waitend = xdc->nwait = 0;
449: xdc->ndone = 0;
450:
451: /* init queue of waiting bufs */
452:
453: xdc->sc_wq.b_active = 0;
454: xdc->sc_wq.b_actf = 0;
455: xdc->sc_wq.b_actb = &xdc->sc_wq.b_actf;
456:
457: /*
458: * section 7 of the manual tells us how to init the controller:
459: * - read controller parameters (6/0)
460: * - write controller parameters (5/0)
461: */
462:
463: /* read controller parameters and insure we have a 753/7053 */
464:
465: rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
466: if (rqno == XD_ERR_FAIL) {
467: printf(": couldn't read controller params\n");
468: return; /* shouldn't ever happen */
469: }
470: ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno];
471: if (ctl->ctype != XDCT_753) {
472: if (xdc->reqs[rqno].errno)
473: printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno));
474: printf(": doesn't identify as a 753/7053\n");
475: XDC_DONE(xdc, rqno, err);
476: return;
477: }
478: printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n",
479: ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev);
480: XDC_DONE(xdc, rqno, err);
481:
482: /* now write controller parameters (xdc_cmd sets all params for us) */
483:
484: rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL);
485: XDC_DONE(xdc, rqno, err);
486: if (err) {
487: printf("%s: controller config error: %s\n",
488: xdc->sc_dev.dv_xname, xdc_e2str(err));
489: return;
490: }
491: /* link in interrupt with higher level software */
492:
493: xdc->sc_ih.ih_fun = xdcintr;
494: xdc->sc_ih.ih_arg = xdc;
495: vmeintr_establish(ca->ca_ra.ra_intr[0].int_vec,
496: ca->ca_ra.ra_intr[0].int_pri, &xdc->sc_ih, IPL_BIO,
497: self->dv_xname);
498:
499: /* now we must look for disks using autoconfig */
500: xa.dvmabuf = (char *)dvma_malloc(XDFM_BPS, &xa.buf, M_NOWAIT);
501: xa.fullmode = XD_SUB_POLL;
502: xa.booting = 1;
503:
504: if (ca->ca_ra.ra_bp && ca->ca_ra.ra_bp->val[0] == -1 &&
505: ca->ca_ra.ra_bp->val[1] == xdc->sc_dev.dv_unit) {
506: bootpath_store(1, ca->ca_ra.ra_bp + 1); /* advance bootpath */
507: }
508:
509: for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++)
510: (void) config_found(self, (void *) &xa, NULL);
511:
512: dvma_free(xa.dvmabuf, XDFM_BPS, &xa.buf);
513: bootpath_store(1, NULL);
514:
515: /* start the watchdog clock */
516: timeout_set(&xdc->xdc_tick_tmo, xdc_tick, xdc);
517: timeout_add(&xdc->xdc_tick_tmo, XDC_TICKCNT);
518:
519: }
520:
521: /*
522: * xdmatch: probe for disk.
523: *
524: * note: we almost always say disk is present. this allows us to
525: * spin up and configure a disk after the system is booted (we can
526: * call xdattach!).
527: */
528: int
529: xdmatch(parent, vcf, aux)
530: struct device *parent;
531: void *vcf, *aux;
532: {
533: struct cfdata *cf = vcf;
534: struct xdc_attach_args *xa = aux;
535:
536: /* looking for autoconf wildcard or exact match */
537:
538: if (cf->cf_loc[0] != -1 && cf->cf_loc[0] != xa->driveno)
539: return 0;
540:
541: return 1;
542:
543: }
544:
545: /*
546: * xdattach: attach a disk. this can be called from autoconf and also
547: * from xdopen/xdstrategy.
548: */
549: void
550: xdattach(parent, self, aux)
551: struct device *parent, *self;
552: void *aux;
553:
554: {
555: struct xd_softc *xd = (void *) self;
556: struct xdc_softc *xdc = (void *) parent;
557: struct xdc_attach_args *xa = aux;
558: int rqno, err, spt = 0, mb, blk, lcv, fmode, s = 0, newstate;
559: struct xd_iopb_drive *driopb;
560: struct dkbad *dkb;
561: struct bootpath *bp;
562:
563: /*
564: * Always re-initialize the disk structure. We want statistics
565: * to start with a clean slate.
566: */
567: bzero(&xd->sc_dk, sizeof(xd->sc_dk));
568: xd->sc_dk.dk_driver = &xddkdriver;
569: xd->sc_dk.dk_name = xd->sc_dev.dv_xname;
570:
571: /* if booting, init the xd_softc */
572:
573: if (xa->booting) {
574: xd->state = XD_DRIVE_UNKNOWN; /* to start */
575: xd->flags = 0;
576: xd->parent = xdc;
577: }
578: xd->xd_drive = xa->driveno;
579: fmode = xa->fullmode;
580: xdc->sc_drives[xa->driveno] = xd;
581:
582: /* if not booting, make sure we are the only process in the attach for
583: * this drive. if locked out, sleep on it. */
584:
585: if (!xa->booting) {
586: s = splbio();
587: while (xd->state == XD_DRIVE_ATTACHING) {
588: if (tsleep(&xd->state, PRIBIO, "xdattach", 0)) {
589: splx(s);
590: return;
591: }
592: }
593: printf("%s at %s",
594: xd->sc_dev.dv_xname, xd->parent->sc_dev.dv_xname);
595: }
596: /* we now have control */
597:
598: xd->state = XD_DRIVE_ATTACHING;
599: newstate = XD_DRIVE_UNKNOWN;
600:
601: /* first try and reset the drive */
602:
603: rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fmode);
604: XDC_DONE(xdc, rqno, err);
605: if (err == XD_ERR_NRDY) {
606: printf(" drive %d: off-line\n", xa->driveno);
607: goto done;
608: }
609: if (err) {
610: printf(": ERROR 0x%02x (%s)\n", err, xdc_e2str(err));
611: goto done;
612: }
613: printf(" drive %d: ready\n", xa->driveno);
614:
615: /* now set format parameters */
616:
617: rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 0, 0, 0, fmode);
618: XDC_DONE(xdc, rqno, err);
619: if (err) {
620: printf("%s: write format parameters failed: %s\n",
621: xd->sc_dev.dv_xname, xdc_e2str(err));
622: goto done;
623: }
624:
625: /* get drive parameters */
626: rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
627: if (rqno != XD_ERR_FAIL) {
628: driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno];
629: spt = driopb->sectpertrk;
630: }
631: XDC_DONE(xdc, rqno, err);
632: if (err) {
633: printf("%s: read drive parameters failed: %s\n",
634: xd->sc_dev.dv_xname, xdc_e2str(err));
635: goto done;
636: }
637:
638: /*
639: * now set drive parameters (to semi-bogus values) so we can read the
640: * disk label.
641: */
642: xd->pcyl = xd->ncyl = 1;
643: xd->acyl = 0;
644: xd->nhead = 1;
645: xd->nsect = 1;
646: xd->sectpercyl = 1;
647: for (lcv = 0; lcv < NBT_BAD; lcv++) /* init empty bad144 table */
648: xd->dkb.bt_bad[lcv].bt_cyl = xd->dkb.bt_bad[lcv].bt_trksec = 0xffff;
649: rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
650: XDC_DONE(xdc, rqno, err);
651: if (err) {
652: printf("%s: write drive parameters failed: %s\n",
653: xd->sc_dev.dv_xname, xdc_e2str(err));
654: goto done;
655: }
656:
657: /* read disk label */
658: rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 0, 1, xa->dvmabuf, fmode);
659: XDC_DONE(xdc, rqno, err);
660: if (err) {
661: printf("%s: reading disk label failed: %s\n",
662: xd->sc_dev.dv_xname, xdc_e2str(err));
663: goto done;
664: }
665: newstate = XD_DRIVE_NOLABEL;
666:
667: xd->hw_spt = spt;
668: /* Attach the disk: must be before getdisklabel to malloc label */
669: disk_attach(&xd->sc_dk);
670:
671: if (xdgetdisklabel(xd, xa->buf) != XD_ERR_AOK)
672: goto done;
673:
674: /* inform the user of what is up */
675: printf("%s: <%s>, pcyl %d, hw_spt %d\n", xd->sc_dev.dv_xname,
676: xa->buf, xd->pcyl, spt);
677: mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS);
678: printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
679: xd->sc_dev.dv_xname, mb, xd->ncyl, xd->nhead, xd->nsect,
680: XDFM_BPS);
681:
682: /* now set the real drive parameters! */
683:
684: rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode);
685: XDC_DONE(xdc, rqno, err);
686: if (err) {
687: printf("%s: write real drive parameters failed: %s\n",
688: xd->sc_dev.dv_xname, xdc_e2str(err));
689: goto done;
690: }
691: newstate = XD_DRIVE_ONLINE;
692:
693: /*
694: * read bad144 table. this table resides on the first sector of the
695: * last track of the disk (i.e. second cyl of "acyl" area).
696: */
697:
698: blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */
699: (xd->nhead - 1) * xd->nsect; /* last head */
700: rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, blk, 1, xa->dvmabuf, fmode);
701: XDC_DONE(xdc, rqno, err);
702: if (err) {
703: printf("%s: reading bad144 failed: %s\n",
704: xd->sc_dev.dv_xname, xdc_e2str(err));
705: goto done;
706: }
707:
708: /* check dkbad for sanity */
709: dkb = (struct dkbad *) xa->buf;
710: for (lcv = 0; lcv < NBT_BAD; lcv++) {
711: if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
712: dkb->bt_bad[lcv].bt_cyl == 0) &&
713: dkb->bt_bad[lcv].bt_trksec == 0xffff)
714: continue; /* blank */
715: if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl)
716: break;
717: if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead)
718: break;
719: if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect)
720: break;
721: }
722: if (lcv != NBT_BAD) {
723: printf("%s: warning: invalid bad144 sector!\n",
724: xd->sc_dev.dv_xname);
725: } else {
726: bcopy(xa->buf, &xd->dkb, XDFM_BPS);
727: }
728:
729: if (xa->booting) {
730: /* restore bootpath! (do this via attach_args again?)*/
731: bp = bootpath_store(0, NULL);
732: if (bp && strcmp("xd", bp->name) == 0 &&
733: xd->xd_drive == bp->val[0])
734: bp->dev = &xd->sc_dev;
735: }
736:
737: done:
738: xd->state = newstate;
739: if (!xa->booting) {
740: wakeup(&xd->state);
741: splx(s);
742: }
743: }
744:
745: /*
746: * end of autoconfig functions
747: */
748:
749: /*
750: * { b , c } d e v s w f u n c t i o n s
751: */
752:
753: /*
754: * xdclose: close device
755: */
756: int
757: xdclose(dev, flag, fmt, p)
758: dev_t dev;
759: int flag, fmt;
760: struct proc *p;
761: {
762: struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)];
763: int part = DISKPART(dev);
764:
765: /* clear mask bits */
766:
767: switch (fmt) {
768: case S_IFCHR:
769: xd->sc_dk.dk_copenmask &= ~(1 << part);
770: break;
771: case S_IFBLK:
772: xd->sc_dk.dk_bopenmask &= ~(1 << part);
773: break;
774: }
775: xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
776:
777: return 0;
778: }
779:
780: /*
781: * xddump: crash dump system
782: */
783: int
784: xddump(dev, blkno, va, size)
785: dev_t dev;
786: daddr64_t blkno;
787: caddr_t va;
788: size_t size;
789: {
790: int unit, part;
791: struct xd_softc *xd;
792:
793: unit = DISKUNIT(dev);
794: if (unit >= xd_cd.cd_ndevs)
795: return ENXIO;
796: part = DISKPART(dev);
797:
798: xd = xd_cd.cd_devs[unit];
799:
800: printf("%s%c: crash dump not supported (yet)\n", xd->sc_dev.dv_xname,
801: 'a' + part);
802:
803: return ENXIO;
804:
805: /* outline: globals: "dumplo" == sector number of partition to start
806: * dump at (convert to physical sector with partition table)
807: * "dumpsize" == size of dump in clicks "physmem" == size of physical
808: * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
809: * physmem)
810: *
811: * dump a copy of physical memory to the dump device starting at sector
812: * "dumplo" in the swap partition (make sure > 0). map in pages as
813: * we go. use polled I/O.
814: *
815: * XXX how to handle NON_CONTIG? */
816:
817: }
818:
819: /*
820: * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks.
821: */
822: int
823: xdioctl(dev, command, addr, flag, p)
824: dev_t dev;
825: u_long command;
826: caddr_t addr;
827: int flag;
828: struct proc *p;
829:
830: {
831: struct xd_softc *xd;
832: struct xd_iocmd *xio;
833: int error, s, unit;
834:
835: unit = DISKUNIT(dev);
836:
837: if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL)
838: return (ENXIO);
839:
840: /* switch on ioctl type */
841:
842: switch (command) {
843: case DIOCSBAD: /* set bad144 info */
844: if ((flag & FWRITE) == 0)
845: return EBADF;
846: s = splbio();
847: bcopy(addr, &xd->dkb, sizeof(xd->dkb));
848: splx(s);
849: return 0;
850:
851: case DIOCGDINFO: /* get disk label */
852: bcopy(xd->sc_dk.dk_label, addr, sizeof(struct disklabel));
853: return 0;
854:
855: case DIOCGPART: /* get partition info */
856: ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label;
857: ((struct partinfo *) addr)->part =
858: &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
859: return 0;
860:
861: case DIOCSDINFO: /* set disk label */
862: if ((flag & FWRITE) == 0)
863: return EBADF;
864: error = setdisklabel(xd->sc_dk.dk_label,
865: (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0);
866: if (error == 0) {
867: if (xd->state == XD_DRIVE_NOLABEL)
868: xd->state = XD_DRIVE_ONLINE;
869: }
870: return error;
871:
872: case DIOCWLABEL: /* change write status of disk label */
873: if ((flag & FWRITE) == 0)
874: return EBADF;
875: if (*(int *) addr)
876: xd->flags |= XD_WLABEL;
877: else
878: xd->flags &= ~XD_WLABEL;
879: return 0;
880:
881: case DIOCWDINFO: /* write disk label */
882: if ((flag & FWRITE) == 0)
883: return EBADF;
884: error = setdisklabel(xd->sc_dk.dk_label,
885: (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0);
886: if (error == 0) {
887: if (xd->state == XD_DRIVE_NOLABEL)
888: xd->state = XD_DRIVE_ONLINE;
889:
890: /* Simulate opening partition 0 so write succeeds. */
891: xd->sc_dk.dk_openmask |= (1 << 0);
892: error = writedisklabel(DISKLABELDEV(dev), xdstrategy,
893: xd->sc_dk.dk_label);
894: xd->sc_dk.dk_openmask =
895: xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
896: }
897: return error;
898:
899: case DIOSXDCMD:
900: xio = (struct xd_iocmd *) addr;
901: if ((error = suser(p, 0)) != 0)
902: return (error);
903: return (xdc_ioctlcmd(xd, dev, xio));
904:
905: default:
906: return ENOTTY;
907: }
908: }
909: /*
910: * xdopen: open drive
911: */
912:
913: int
914: xdopen(dev, flag, fmt, p)
915: dev_t dev;
916: int flag, fmt;
917: struct proc *p;
918: {
919: int unit, part;
920: struct xd_softc *xd;
921: struct xdc_attach_args xa;
922:
923: /* first, could it be a valid target? */
924:
925: unit = DISKUNIT(dev);
926: if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL)
927: return (ENXIO);
928: part = DISKPART(dev);
929:
930: /* do we need to attach the drive? */
931:
932: if (xd->state == XD_DRIVE_UNKNOWN) {
933: xa.driveno = xd->xd_drive;
934: xa.dvmabuf = (char *)dvma_malloc(XDFM_BPS, &xa.buf, M_NOWAIT);
935: xa.fullmode = XD_SUB_WAIT;
936: xa.booting = 0;
937: xdattach((struct device *) xd->parent, (struct device *) xd, &xa);
938: dvma_free(xa.dvmabuf, XDFM_BPS, &xa.buf);
939: if (xd->state == XD_DRIVE_UNKNOWN) {
940: return (EIO);
941: }
942: }
943: /* check for partition */
944:
945: if (part != RAW_PART &&
946: (part >= xd->sc_dk.dk_label->d_npartitions ||
947: xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
948: return (ENXIO);
949: }
950: /* set open masks */
951:
952: switch (fmt) {
953: case S_IFCHR:
954: xd->sc_dk.dk_copenmask |= (1 << part);
955: break;
956: case S_IFBLK:
957: xd->sc_dk.dk_bopenmask |= (1 << part);
958: break;
959: }
960: xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask;
961:
962: return 0;
963: }
964:
965: int
966: xdread(dev, uio, flags)
967: dev_t dev;
968: struct uio *uio;
969: int flags;
970: {
971:
972: return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio));
973: }
974:
975: int
976: xdwrite(dev, uio, flags)
977: dev_t dev;
978: struct uio *uio;
979: int flags;
980: {
981:
982: return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio));
983: }
984:
985:
986: /*
987: * xdsize: return size of a partition for a dump
988: */
989:
990: daddr64_t
991: xdsize(dev)
992: dev_t dev;
993:
994: {
995: struct xd_softc *xdsc;
996: int unit, part, size, omask;
997:
998: /* valid unit? */
999: unit = DISKUNIT(dev);
1000: if (unit >= xd_cd.cd_ndevs || (xdsc = xd_cd.cd_devs[unit]) == NULL)
1001: return (-1);
1002:
1003: part = DISKPART(dev);
1004: omask = xdsc->sc_dk.dk_openmask & (1 << part);
1005:
1006: if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0)
1007: return (-1);
1008:
1009: /* do it */
1010: if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1011: size = -1; /* only give valid size for swap partitions */
1012: else
1013: size = DL_GETPSIZE(&xdsc->sc_dk.dk_label->d_partitions[part]) *
1014: (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1015: if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0)
1016: return (-1);
1017: return (size);
1018: }
1019: /*
1020: * xdstrategy: buffering system interface to xd.
1021: */
1022:
1023: void
1024: xdstrategy(bp)
1025: struct buf *bp;
1026:
1027: {
1028: struct xd_softc *xd;
1029: struct xdc_softc *parent;
1030: struct buf *wq;
1031: int s, unit;
1032: struct xdc_attach_args xa;
1033:
1034: unit = DISKUNIT(bp->b_dev);
1035:
1036: /* check for live device */
1037:
1038: if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 ||
1039: bp->b_blkno < 0 ||
1040: (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) {
1041: bp->b_error = EINVAL;
1042: goto bad;
1043: }
1044: /* do we need to attach the drive? */
1045:
1046: if (xd->state == XD_DRIVE_UNKNOWN) {
1047: xa.driveno = xd->xd_drive;
1048: xa.dvmabuf = (char *)dvma_malloc(XDFM_BPS, &xa.buf, M_NOWAIT);
1049: xa.fullmode = XD_SUB_WAIT;
1050: xa.booting = 0;
1051: xdattach((struct device *)xd->parent, (struct device *)xd, &xa);
1052: dvma_free(xa.dvmabuf, XDFM_BPS, &xa.buf);
1053: if (xd->state == XD_DRIVE_UNKNOWN) {
1054: bp->b_error = EIO;
1055: goto bad;
1056: }
1057: }
1058: if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
1059: /* no I/O to unlabeled disks, unless raw partition */
1060: bp->b_error = EIO;
1061: goto bad;
1062: }
1063: /* short circuit zero length request */
1064:
1065: if (bp->b_bcount == 0)
1066: goto done;
1067:
1068: /* check bounds with label (disksubr.c). Determine the size of the
1069: * transfer, and make sure it is within the boundaries of the
1070: * partition. Adjust transfer if needed, and signal errors or early
1071: * completion. */
1072:
1073: if (bounds_check_with_label(bp, xd->sc_dk.dk_label,
1074: (xd->flags & XD_WLABEL) != 0) <= 0)
1075: goto done;
1076:
1077: /*
1078: * now we know we have a valid buf structure that we need to do I/O
1079: * on.
1080: *
1081: * note that we don't disksort because the controller has a sorting
1082: * algorithm built into the hardware.
1083: */
1084:
1085: s = splbio(); /* protect the queues */
1086:
1087: /* first, give jobs in front of us a chance */
1088: parent = xd->parent;
1089: while (parent->nfree > 0 && parent->sc_wq.b_actf)
1090: if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK)
1091: break;
1092:
1093: /* if there are no free iorq's, then we just queue and return. the
1094: * buffs will get picked up later by xdcintr().
1095: */
1096:
1097: if (parent->nfree == 0) {
1098: wq = &xd->parent->sc_wq;
1099: bp->b_actf = 0;
1100: bp->b_actb = wq->b_actb;
1101: *wq->b_actb = bp;
1102: wq->b_actb = &bp->b_actf;
1103: splx(s);
1104: return;
1105: }
1106:
1107: /* now we have free iopb's and we are at splbio... start 'em up */
1108: if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) {
1109: splx(s);
1110: return;
1111: }
1112:
1113: /* done! */
1114:
1115: splx(s);
1116: return;
1117:
1118: bad: /* tells upper layers we have an error */
1119: bp->b_flags |= B_ERROR;
1120: done: /* tells upper layers we are done with this
1121: * buf */
1122: bp->b_resid = bp->b_bcount;
1123: s = splbio();
1124: biodone(bp);
1125: splx(s);
1126: }
1127: /*
1128: * end of {b,c}devsw functions
1129: */
1130:
1131: /*
1132: * i n t e r r u p t f u n c t i o n
1133: *
1134: * xdcintr: hardware interrupt.
1135: */
1136: int
1137: xdcintr(v)
1138: void *v;
1139:
1140: {
1141: struct xdc_softc *xdcsc = v;
1142:
1143: /* remove as many done IOPBs as possible */
1144:
1145: xdc_remove_iorq(xdcsc);
1146:
1147: /* start any iorq's already waiting */
1148:
1149: xdc_start(xdcsc, XDC_MAXIOPB);
1150:
1151: /* fill up any remaining iorq's with queue'd buffers */
1152:
1153: while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf)
1154: if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1155: break;
1156:
1157: return (1);
1158: }
1159: /*
1160: * end of interrupt function
1161: */
1162:
1163: /*
1164: * i n t e r n a l f u n c t i o n s
1165: */
1166:
1167: /*
1168: * xdc_rqinit: fill out the fields of an I/O request
1169: */
1170:
1171: inline void
1172: xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp)
1173: struct xd_iorq *rq;
1174: struct xdc_softc *xdc;
1175: struct xd_softc *xd;
1176: int md;
1177: u_long blk;
1178: int cnt;
1179: caddr_t db;
1180: struct buf *bp;
1181: {
1182: rq->xdc = xdc;
1183: rq->xd = xd;
1184: rq->ttl = XDC_MAXTTL + 10;
1185: rq->mode = md;
1186: rq->tries = rq->errno = rq->lasterror = 0;
1187: rq->blockno = blk;
1188: rq->sectcnt = cnt;
1189: rq->dbuf = rq->dbufbase = db;
1190: rq->buf = bp;
1191: }
1192: /*
1193: * xdc_rqtopb: load up an IOPB based on an iorq
1194: */
1195:
1196: void
1197: xdc_rqtopb(iorq, iopb, cmd, subfun)
1198: struct xd_iorq *iorq;
1199: struct xd_iopb *iopb;
1200: int cmd, subfun;
1201:
1202: {
1203: u_long block, dp;
1204:
1205: /* standard stuff */
1206:
1207: iopb->errs = iopb->done = 0;
1208: iopb->comm = cmd;
1209: iopb->errno = iopb->status = 0;
1210: iopb->subfun = subfun;
1211: if (iorq->xd)
1212: iopb->unit = iorq->xd->xd_drive;
1213: else
1214: iopb->unit = 0;
1215:
1216: /* check for alternate IOPB format */
1217:
1218: if (cmd == XDCMD_WRP) {
1219: switch (subfun) {
1220: case XDFUN_CTL:{
1221: struct xd_iopb_ctrl *ctrl =
1222: (struct xd_iopb_ctrl *) iopb;
1223: iopb->lll = 0;
1224: iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1225: ? 0
1226: : iorq->xdc->ipl;
1227: ctrl->param_a = XDPA_TMOD | XDPA_DACF;
1228: ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC;
1229: ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR |
1230: XDPC_RBC | XDPC_ECC2;
1231: ctrl->throttle = XDC_THROTTLE;
1232: #ifdef __sparc__
1233: if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300)
1234: ctrl->delay = XDC_DELAY_4_300;
1235: else
1236: ctrl->delay = XDC_DELAY_SPARC;
1237: #endif
1238: #ifdef sun3
1239: ctrl->delay = XDC_DELAY_SUN3;
1240: #endif
1241: break;
1242: }
1243: case XDFUN_DRV:{
1244: struct xd_iopb_drive *drv =
1245: (struct xd_iopb_drive *)iopb;
1246: /* we assume that the disk label has the right
1247: * info */
1248: if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1249: drv->dparam_ipl = (XDC_DPARAM << 3);
1250: else
1251: drv->dparam_ipl = (XDC_DPARAM << 3) |
1252: iorq->xdc->ipl;
1253: drv->maxsect = iorq->xd->nsect - 1;
1254: drv->maxsector = drv->maxsect;
1255: /* note: maxsector != maxsect only if you are
1256: * doing cyl sparing */
1257: drv->headoff = 0;
1258: drv->maxcyl = iorq->xd->pcyl - 1;
1259: drv->maxhead = iorq->xd->nhead - 1;
1260: break;
1261: }
1262: case XDFUN_FMT:{
1263: struct xd_iopb_format *form =
1264: (struct xd_iopb_format *) iopb;
1265: if (XD_STATE(iorq->mode) == XD_SUB_POLL)
1266: form->interleave_ipl = (XDC_INTERLEAVE << 3);
1267: else
1268: form->interleave_ipl = (XDC_INTERLEAVE << 3) |
1269: iorq->xdc->ipl;
1270: form->field1 = XDFM_FIELD1;
1271: form->field2 = XDFM_FIELD2;
1272: form->field3 = XDFM_FIELD3;
1273: form->field4 = XDFM_FIELD4;
1274: form->bytespersec = XDFM_BPS;
1275: form->field6 = XDFM_FIELD6;
1276: form->field7 = XDFM_FIELD7;
1277: break;
1278: }
1279: }
1280: } else {
1281:
1282: /* normal IOPB case (harmless to RDP command) */
1283:
1284: iopb->lll = 0;
1285: iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL)
1286: ? 0
1287: : iorq->xdc->ipl;
1288: iopb->sectcnt = iorq->sectcnt;
1289: block = iorq->blockno;
1290: if (iorq->xd == NULL || block == 0) {
1291: iopb->sectno = iopb->headno = iopb->cylno = 0;
1292: } else {
1293: iopb->sectno = block % iorq->xd->nsect;
1294: block = block / iorq->xd->nsect;
1295: iopb->headno = block % iorq->xd->nhead;
1296: block = block / iorq->xd->nhead;
1297: iopb->cylno = block;
1298: }
1299: dp = (u_long) iorq->dbuf - DVMA_BASE;
1300: dp = iopb->daddr = (iorq->dbuf == NULL) ? 0 : dp;
1301: iopb->addrmod = ((dp + (XDFM_BPS * iorq->sectcnt)) > 0x1000000)
1302: ? XDC_ADDRMOD32
1303: : XDC_ADDRMOD;
1304: }
1305: }
1306:
1307: /*
1308: * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno.
1309: * If you've already got an IORQ, you can call submit directly (currently
1310: * there is no need to do this). NORM requests are handled separately.
1311: */
1312: int
1313: xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode)
1314: struct xdc_softc *xdcsc;
1315: int cmd, subfn, unit, block, scnt;
1316: char *dptr;
1317: int fullmode;
1318:
1319: {
1320: int rqno, submode = XD_STATE(fullmode), retry;
1321: struct xd_iorq *iorq;
1322: struct xd_iopb *iopb;
1323:
1324: /* get iorq/iopb */
1325: switch (submode) {
1326: case XD_SUB_POLL:
1327: while (xdcsc->nfree == 0) {
1328: if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK)
1329: return (XD_ERR_FAIL);
1330: }
1331: break;
1332: case XD_SUB_WAIT:
1333: retry = 1;
1334: while (retry) {
1335: while (xdcsc->nfree == 0) {
1336: if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0))
1337: return (XD_ERR_FAIL);
1338: }
1339: while (xdcsc->ndone > XDC_SUBWAITLIM) {
1340: if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0))
1341: return (XD_ERR_FAIL);
1342: }
1343: if (xdcsc->nfree)
1344: retry = 0; /* got it */
1345: }
1346: break;
1347: default:
1348: return (XD_ERR_FAIL); /* illegal */
1349: }
1350: if (xdcsc->nfree == 0)
1351: panic("xdcmd nfree");
1352: rqno = XDC_RQALLOC(xdcsc);
1353: iorq = &xdcsc->reqs[rqno];
1354: iopb = iorq->iopb;
1355:
1356:
1357: /* init iorq/iopb */
1358:
1359: xdc_rqinit(iorq, xdcsc,
1360: (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit],
1361: fullmode, block, scnt, dptr, NULL);
1362:
1363: /* load IOPB from iorq */
1364:
1365: xdc_rqtopb(iorq, iopb, cmd, subfn);
1366:
1367: /* submit it for processing */
1368:
1369: xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */
1370:
1371: return (rqno);
1372: }
1373: /*
1374: * xdc_startbuf
1375: * start a buffer running, assumes nfree > 0
1376: */
1377:
1378: int
1379: xdc_startbuf(xdcsc, xdsc, bp)
1380: struct xdc_softc *xdcsc;
1381: struct xd_softc *xdsc;
1382: struct buf *bp;
1383:
1384: {
1385: int rqno, partno;
1386: struct xd_iorq *iorq;
1387: struct xd_iopb *iopb;
1388: struct buf *wq;
1389: u_long block;
1390: caddr_t dbuf;
1391:
1392: if (!xdcsc->nfree)
1393: panic("xdc_startbuf free");
1394: rqno = XDC_RQALLOC(xdcsc);
1395: iorq = &xdcsc->reqs[rqno];
1396: iopb = iorq->iopb;
1397:
1398: /* get buf */
1399:
1400: if (bp == NULL) {
1401: bp = xdcsc->sc_wq.b_actf;
1402: if (!bp)
1403: panic("xdc_startbuf bp");
1404: wq = bp->b_actf;
1405: if (wq)
1406: wq->b_actb = bp->b_actb;
1407: else
1408: xdcsc->sc_wq.b_actb = bp->b_actb;
1409: *bp->b_actb = wq;
1410: xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)];
1411: }
1412: partno = DISKPART(bp->b_dev);
1413: #ifdef XDC_DEBUG
1414: printf("xdc_startbuf: %s%c: %s block %lld\n",
1415: xdsc->sc_dev.dv_xname, 'a' + partno,
1416: (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno);
1417: printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n",
1418: bp->b_bcount, bp->b_data);
1419: #endif
1420:
1421: /*
1422: * load request. we have to calculate the correct block number based
1423: * on partition info.
1424: *
1425: * note that iorq points to the buffer as mapped into DVMA space,
1426: * where as the bp->b_data points to its non-DVMA mapping.
1427: */
1428:
1429: block = bp->b_blkno + ((partno == RAW_PART) ? 0 :
1430: DL_GETPOFFSET(&xdsc->sc_dk.dk_label->d_partitions[partno]));
1431:
1432: dbuf = kdvma_mapin(bp->b_data, bp->b_bcount, 0);
1433: if (dbuf == NULL) { /* out of DVMA space */
1434: printf("%s: warning: out of DVMA space\n",
1435: xdcsc->sc_dev.dv_xname);
1436: XDC_FREE(xdcsc, rqno);
1437: wq = &xdcsc->sc_wq; /* put at end of queue */
1438: bp->b_actf = 0;
1439: bp->b_actb = wq->b_actb;
1440: *wq->b_actb = bp;
1441: wq->b_actb = &bp->b_actf;
1442: return (XD_ERR_FAIL); /* XXX: need some sort of
1443: * call-back scheme here? */
1444: }
1445:
1446: /* init iorq and load iopb from it */
1447:
1448: xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block,
1449: bp->b_bcount / XDFM_BPS, dbuf, bp);
1450:
1451: xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0);
1452:
1453: /* Instrumentation. */
1454: disk_busy(&xdsc->sc_dk);
1455:
1456: /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */
1457:
1458: xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM);
1459: return (XD_ERR_AOK);
1460: }
1461:
1462:
1463: /*
1464: * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK
1465: * if ok. if it fail returns an error code. type is XD_SUB_*.
1466: *
1467: * note: caller frees iorq in all cases except NORM
1468: *
1469: * return value:
1470: * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request)
1471: * WAIT: XD_AOK (success), <error-code> (failed)
1472: * POLL: <same as WAIT>
1473: * NOQ : <same as NORM>
1474: *
1475: * there are three sources for i/o requests:
1476: * [1] xdstrategy: normal block I/O, using "struct buf" system.
1477: * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1478: * [3] open/ioctl: these are I/O requests done in the context of a process,
1479: * and the process should block until they are done.
1480: *
1481: * software state is stored in the iorq structure. each iorq has an
1482: * iopb structure. the hardware understands the iopb structure.
1483: * every command must go through an iopb. a 7053 can only handle
1484: * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in
1485: * DVMA space at boot up time. what happens if we run out of iopb's?
1486: * for i/o type [1], the buffers are queued at the "buff" layer and
1487: * picked up later by the interrupt routine. for case [2] the
1488: * programmed i/o driver is called with a special flag that says
1489: * return when one iopb is free. for case [3] the process can sleep
1490: * on the iorq free list until some iopbs are available.
1491: */
1492:
1493:
1494: int
1495: xdc_submit_iorq(xdcsc, iorqno, type)
1496: struct xdc_softc *xdcsc;
1497: int iorqno;
1498: int type;
1499:
1500: {
1501: u_long iopbaddr;
1502: struct xd_iorq *iorq = &xdcsc->reqs[iorqno];
1503:
1504: #ifdef XDC_DEBUG
1505: printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname,
1506: iorqno, type);
1507: #endif
1508:
1509: /* first check and see if controller is busy */
1510: if (xdcsc->xdc->xdc_csr & XDC_ADDING) {
1511: #ifdef XDC_DEBUG
1512: printf("xdc_submit_iorq: XDC not ready (ADDING)\n");
1513: #endif
1514: if (type == XD_SUB_NOQ)
1515: return (XD_ERR_FAIL); /* failed */
1516: XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */
1517: switch (type) {
1518: case XD_SUB_NORM:
1519: return XD_ERR_AOK; /* success */
1520: case XD_SUB_WAIT:
1521: while (iorq->iopb->done == 0) {
1522: tsleep(iorq, PRIBIO, "xdiorq", 0);
1523: }
1524: return (iorq->errno);
1525: case XD_SUB_POLL:
1526: return (xdc_piodriver(xdcsc, iorqno, 0));
1527: default:
1528: panic("xdc_submit_iorq adding");
1529: }
1530: }
1531: #ifdef XDC_DEBUG
1532: {
1533: u_char *rio = (u_char *) iorq->iopb;
1534: int sz = sizeof(struct xd_iopb), lcv;
1535: printf("%s: aio #%d [",
1536: xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs);
1537: for (lcv = 0; lcv < sz; lcv++)
1538: printf(" %02x", rio[lcv]);
1539: printf("]\n");
1540: }
1541: #endif /* XDC_DEBUG */
1542:
1543: /* controller not busy, start command */
1544: iopbaddr = (u_long) iorq->iopb - (u_long) DVMA_BASE;
1545: XDC_GO(xdcsc->xdc, iopbaddr); /* go! */
1546: xdcsc->nrun++;
1547: /* command now running, wrap it up */
1548: switch (type) {
1549: case XD_SUB_NORM:
1550: case XD_SUB_NOQ:
1551: return (XD_ERR_AOK); /* success */
1552: case XD_SUB_WAIT:
1553: while (iorq->iopb->done == 0) {
1554: tsleep(iorq, PRIBIO, "xdiorq", 0);
1555: }
1556: return (iorq->errno);
1557: case XD_SUB_POLL:
1558: return (xdc_piodriver(xdcsc, iorqno, 0));
1559: default:
1560: panic("xdc_submit_iorq wrap up");
1561: }
1562: panic("xdc_submit_iorq");
1563: return 0; /* not reached */
1564: }
1565:
1566:
1567: /*
1568: * xdc_piodriver
1569: *
1570: * programmed i/o driver. this function takes over the computer
1571: * and drains off all i/o requests. it returns the status of the iorq
1572: * the caller is interesting in. if freeone is true, then it returns
1573: * when there is a free iorq.
1574: */
1575: int
1576: xdc_piodriver(xdcsc, iorqno, freeone)
1577: struct xdc_softc *xdcsc;
1578: int iorqno;
1579: int freeone;
1580:
1581: {
1582: int nreset = 0;
1583: int retval = 0;
1584: u_long count;
1585: struct xdc *xdc = xdcsc->xdc;
1586: #ifdef XDC_DEBUG
1587: printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname,
1588: iorqno, freeone);
1589: #endif
1590:
1591: while (xdcsc->nwait || xdcsc->nrun) {
1592: #ifdef XDC_DEBUG
1593: printf("xdc_piodriver: wait=%d, run=%d\n",
1594: xdcsc->nwait, xdcsc->nrun);
1595: #endif
1596: XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR));
1597: #ifdef XDC_DEBUG
1598: printf("xdc_piodriver: done wait with count = %d\n", count);
1599: #endif
1600: /* we expect some progress soon */
1601: if (count == 0 && nreset >= 2) {
1602: xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0);
1603: #ifdef XDC_DEBUG
1604: printf("xdc_piodriver: timeout\n");
1605: #endif
1606: return (XD_ERR_FAIL);
1607: }
1608: if (count == 0) {
1609: if (xdc_reset(xdcsc, 0,
1610: (nreset++ == 0) ? XD_RSET_NONE : iorqno,
1611: XD_ERR_FAIL,
1612: 0) == XD_ERR_FAIL)
1613: return (XD_ERR_FAIL); /* flushes all but POLL
1614: * requests, resets */
1615: continue;
1616: }
1617: xdc_remove_iorq(xdcsc); /* could resubmit request */
1618: if (freeone) {
1619: if (xdcsc->nrun < XDC_MAXIOPB) {
1620: #ifdef XDC_DEBUG
1621: printf("xdc_piodriver: done: one free\n");
1622: #endif
1623: return (XD_ERR_AOK);
1624: }
1625: continue; /* don't xdc_start */
1626: }
1627: xdc_start(xdcsc, XDC_MAXIOPB);
1628: }
1629:
1630: /* get return value */
1631:
1632: retval = xdcsc->reqs[iorqno].errno;
1633:
1634: #ifdef XDC_DEBUG
1635: printf("xdc_piodriver: done, retval = 0x%x (%s)\n",
1636: xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno));
1637: #endif
1638:
1639: /* now that we've drained everything, start up any bufs that have
1640: * queued */
1641:
1642: while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf)
1643: if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK)
1644: break;
1645:
1646: return (retval);
1647: }
1648:
1649: /*
1650: * xdc_reset: reset one drive. NOTE: assumes xdc was just reset.
1651: * we steal iopb[0] for this, but we put it back when we are done.
1652: */
1653: void
1654: xdc_xdreset(xdcsc, xdsc)
1655: struct xdc_softc *xdcsc;
1656: struct xd_softc *xdsc;
1657:
1658: {
1659: struct xd_iopb tmpiopb;
1660: u_long addr;
1661: int del;
1662: bcopy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb));
1663: bzero(xdcsc->iopbase, sizeof(tmpiopb));
1664: xdcsc->iopbase->comm = XDCMD_RST;
1665: xdcsc->iopbase->unit = xdsc->xd_drive;
1666: addr = (u_long) xdcsc->dvmaiopb;
1667: XDC_GO(xdcsc->xdc, addr); /* go! */
1668: XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB);
1669: if (del <= 0 || xdcsc->iopbase->errs) {
1670: printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname,
1671: xdc_e2str(xdcsc->iopbase->errno));
1672: xdcsc->xdc->xdc_csr = XDC_RESET;
1673: XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1674: if (del <= 0)
1675: panic("xdc_reset");
1676: } else {
1677: xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */
1678: }
1679: bcopy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb));
1680: }
1681:
1682:
1683: /*
1684: * xdc_reset: reset everything: requests are marked as errors except
1685: * a polled request (which is resubmitted)
1686: */
1687: int
1688: xdc_reset(xdcsc, quiet, blastmode, error, xdsc)
1689: struct xdc_softc *xdcsc;
1690: int quiet, blastmode, error;
1691: struct xd_softc *xdsc;
1692:
1693: {
1694: int del = 0, lcv, retval = XD_ERR_AOK;
1695: int oldfree = xdcsc->nfree;
1696:
1697: /* soft reset hardware */
1698:
1699: if (!quiet)
1700: printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname);
1701: xdcsc->xdc->xdc_csr = XDC_RESET;
1702: XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET);
1703: if (del <= 0) {
1704: blastmode = XD_RSET_ALL; /* dead, flush all requests */
1705: retval = XD_ERR_FAIL;
1706: }
1707: if (xdsc)
1708: xdc_xdreset(xdcsc, xdsc);
1709:
1710: /* fix queues based on "blast-mode" */
1711:
1712: for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
1713: struct xd_iorq *iorq = &xdcsc->reqs[lcv];
1714:
1715: if (XD_STATE(iorq->mode) != XD_SUB_POLL &&
1716: XD_STATE(iorq->mode) != XD_SUB_WAIT &&
1717: XD_STATE(iorq->mode) != XD_SUB_NORM)
1718: /* is it active? */
1719: continue;
1720:
1721: xdcsc->nrun--; /* it isn't running any more */
1722: if (blastmode == XD_RSET_ALL || blastmode != lcv) {
1723: /* failed */
1724: iorq->errno = error;
1725: xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1;
1726: switch (XD_STATE(xdcsc->reqs[lcv].mode)) {
1727: case XD_SUB_NORM:
1728: iorq->buf->b_error = EIO;
1729: iorq->buf->b_flags |= B_ERROR;
1730: iorq->buf->b_resid =
1731: iorq->sectcnt * XDFM_BPS;
1732: dvma_mapout((vaddr_t)iorq->dbufbase,
1733: (vaddr_t)iorq->buf->b_data,
1734: iorq->buf->b_bcount);
1735: disk_unbusy(&xdcsc->reqs[lcv].xd->sc_dk,
1736: (xdcsc->reqs[lcv].buf->b_bcount -
1737: xdcsc->reqs[lcv].buf->b_resid),
1738: (xdcsc->reqs[lcv].buf->b_flags & B_READ));
1739: biodone(iorq->buf);
1740: XDC_FREE(xdcsc, lcv); /* add to free list */
1741: break;
1742: case XD_SUB_WAIT:
1743: wakeup(iorq);
1744: case XD_SUB_POLL:
1745: xdcsc->ndone++;
1746: iorq->mode =
1747: XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1748: break;
1749: }
1750:
1751: } else {
1752:
1753: /* resubmit, put at front of wait queue */
1754: XDC_HWAIT(xdcsc, lcv);
1755: }
1756: }
1757:
1758: /*
1759: * now, if stuff is waiting, start it.
1760: * since we just reset it should go
1761: */
1762: xdc_start(xdcsc, XDC_MAXIOPB);
1763:
1764: /* ok, we did it */
1765: if (oldfree == 0 && xdcsc->nfree)
1766: wakeup(&xdcsc->nfree);
1767:
1768: #ifdef XDC_DIAG
1769: del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone;
1770: if (del != XDC_MAXIOPB)
1771: printf("%s: diag: xdc_reset miscount (%d should be %d)!\n",
1772: xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB);
1773: else
1774: if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM)
1775: printf("%s: diag: lots of done jobs (%d)\n",
1776: xdcsc->sc_dev.dv_xname, xdcsc->ndone);
1777: #endif
1778: printf("RESET DONE\n");
1779: return (retval);
1780: }
1781: /*
1782: * xdc_start: start all waiting buffers
1783: */
1784:
1785: void
1786: xdc_start(xdcsc, maxio)
1787: struct xdc_softc *xdcsc;
1788: int maxio;
1789:
1790: {
1791: int rqno;
1792: while (maxio && xdcsc->nwait &&
1793: (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) {
1794: XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out"
1795: * param */
1796: if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK)
1797: panic("xdc_start"); /* should never happen */
1798: maxio--;
1799: }
1800: }
1801: /*
1802: * xdc_remove_iorq: remove "done" IOPB's.
1803: */
1804:
1805: int
1806: xdc_remove_iorq(xdcsc)
1807: struct xdc_softc *xdcsc;
1808:
1809: {
1810: int errno, rqno, comm, errs;
1811: struct xdc *xdc = xdcsc->xdc;
1812: struct xd_iopb *iopb;
1813: struct xd_iorq *iorq;
1814: struct buf *bp;
1815:
1816: if (xdc->xdc_csr & XDC_F_ERROR) {
1817: /*
1818: * FATAL ERROR: should never happen under normal use. This
1819: * error is so bad, you can't even tell which IOPB is bad, so
1820: * we dump them all.
1821: */
1822: errno = xdc->xdc_f_err;
1823: printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname,
1824: errno, xdc_e2str(errno));
1825: if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) {
1826: printf("%s: soft reset failed!\n",
1827: xdcsc->sc_dev.dv_xname);
1828: panic("xdc_remove_iorq: controller DEAD");
1829: }
1830: return (XD_ERR_AOK);
1831: }
1832:
1833: /*
1834: * get iopb that is done
1835: *
1836: * hmm... I used to read the address of the done IOPB off the VME
1837: * registers and calculate the rqno directly from that. that worked
1838: * until I started putting a load on the controller. when loaded, i
1839: * would get interrupts but neither the REMIOPB or F_ERROR bits would
1840: * be set, even after DELAY'ing a while! later on the timeout
1841: * routine would detect IOPBs that were marked "running" but their
1842: * "done" bit was set. rather than dealing directly with this
1843: * problem, it is just easier to look at all running IOPB's for the
1844: * done bit.
1845: */
1846: if (xdc->xdc_csr & XDC_REMIOPB) {
1847: xdc->xdc_csr = XDC_CLRRIO;
1848: }
1849:
1850: for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) {
1851: iorq = &xdcsc->reqs[rqno];
1852: if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE)
1853: continue; /* free, or done */
1854: iopb = &xdcsc->iopbase[rqno];
1855: if (iopb->done == 0)
1856: continue; /* not done yet */
1857:
1858: #ifdef XDC_DEBUG
1859: {
1860: u_char *rio = (u_char *) iopb;
1861: int sz = sizeof(struct xd_iopb), lcv;
1862: printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno);
1863: for (lcv = 0; lcv < sz; lcv++)
1864: printf(" %02x", rio[lcv]);
1865: printf("]\n");
1866: }
1867: #endif /* XDC_DEBUG */
1868:
1869: xdcsc->nrun--;
1870:
1871: comm = iopb->comm;
1872: errs = iopb->errs;
1873:
1874: if (errs)
1875: iorq->errno = iopb->errno;
1876: else
1877: iorq->errno = 0;
1878:
1879: /* handle non-fatal errors */
1880:
1881: if (errs &&
1882: xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK)
1883: continue; /* AOK: we resubmitted it */
1884:
1885:
1886: /* this iorq is now done (hasn't been restarted or anything) */
1887:
1888: if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
1889: xdc_perror(iorq, iopb, 0);
1890:
1891: /* now, if read/write check to make sure we got all the data
1892: * we needed. (this may not be the case if we got an error in
1893: * the middle of a multisector request). */
1894:
1895: if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 &&
1896: (comm == XDCMD_RD || comm == XDCMD_WR)) {
1897: /* we just successfully processed a bad144 sector
1898: * note: if we are in bad 144 mode, the pointers have
1899: * been advanced already (see above) and are pointing
1900: * at the bad144 sector. to exit bad144 mode, we
1901: * must advance the pointers 1 sector and issue a new
1902: * request if there are still sectors left to process
1903: *
1904: */
1905: XDC_ADVANCE(iorq, 1); /* advance 1 sector */
1906:
1907: /* exit b144 mode */
1908: iorq->mode = iorq->mode & (~XD_MODE_B144);
1909:
1910: if (iorq->sectcnt) { /* more to go! */
1911: iorq->lasterror = iorq->errno = iopb->errno = 0;
1912: iopb->errs = iopb->done = 0;
1913: iorq->tries = 0;
1914: iopb->sectcnt = iorq->sectcnt;
1915: iopb->cylno = iorq->blockno /
1916: iorq->xd->sectpercyl;
1917: iopb->headno =
1918: (iorq->blockno / iorq->xd->nhead) %
1919: iorq->xd->nhead;
1920: iopb->sectno = iorq->blockno % XDFM_BPS;
1921: iopb->daddr = (u_long) iorq->dbuf - DVMA_BASE;
1922: XDC_HWAIT(xdcsc, rqno);
1923: xdc_start(xdcsc, 1); /* resubmit */
1924: continue;
1925: }
1926: }
1927: /* final cleanup, totally done with this request */
1928:
1929: switch (XD_STATE(iorq->mode)) {
1930: case XD_SUB_NORM:
1931: bp = iorq->buf;
1932: if (errs) {
1933: bp->b_error = EIO;
1934: bp->b_flags |= B_ERROR;
1935: bp->b_resid = iorq->sectcnt * XDFM_BPS;
1936: } else {
1937: bp->b_resid = 0; /* done */
1938: }
1939: dvma_mapout((vaddr_t) iorq->dbufbase,
1940: (vaddr_t) bp->b_data,
1941: bp->b_bcount);
1942: disk_unbusy(&iorq->xd->sc_dk,
1943: (bp->b_bcount - bp->b_resid),
1944: (bp->b_flags & B_READ));
1945: XDC_FREE(xdcsc, rqno);
1946: biodone(bp);
1947: break;
1948: case XD_SUB_WAIT:
1949: iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1950: xdcsc->ndone++;
1951: wakeup(iorq);
1952: break;
1953: case XD_SUB_POLL:
1954: iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE);
1955: xdcsc->ndone++;
1956: break;
1957: }
1958: }
1959:
1960: return (XD_ERR_AOK);
1961: }
1962:
1963: /*
1964: * xdc_perror: print error.
1965: * - if still_trying is true: we got an error, retried and got a
1966: * different error. in that case lasterror is the old error,
1967: * and errno is the new one.
1968: * - if still_trying is not true, then if we ever had an error it
1969: * is in lasterror. also, if iorq->errno == 0, then we recovered
1970: * from that error (otherwise iorq->errno == iorq->lasterror).
1971: */
1972: void
1973: xdc_perror(iorq, iopb, still_trying)
1974: struct xd_iorq *iorq;
1975: struct xd_iopb *iopb;
1976: int still_trying;
1977:
1978: {
1979:
1980: int error = iorq->lasterror;
1981:
1982: printf("%s", (iorq->xd) ? iorq->xd->sc_dev.dv_xname
1983: : iorq->xdc->sc_dev.dv_xname);
1984: if (iorq->buf)
1985: printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev));
1986: if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR)
1987: printf("%s %d/%d/%d: ",
1988: (iopb->comm == XDCMD_RD) ? "read" : "write",
1989: iopb->cylno, iopb->headno, iopb->sectno);
1990: printf("%s", xdc_e2str(error));
1991:
1992: if (still_trying)
1993: printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno));
1994: else
1995: if (iorq->errno == 0)
1996: printf(" [recovered in %d tries]", iorq->tries);
1997:
1998: printf("\n");
1999: }
2000:
2001: /*
2002: * xdc_error: non-fatal error encountered... recover.
2003: * return AOK if resubmitted, return FAIL if this iopb is done
2004: */
2005: int
2006: xdc_error(xdcsc, iorq, iopb, rqno, comm)
2007: struct xdc_softc *xdcsc;
2008: struct xd_iorq *iorq;
2009: struct xd_iopb *iopb;
2010: int rqno, comm;
2011:
2012: {
2013: int errno = iorq->errno;
2014: int erract = errno & XD_ERA_MASK;
2015: int oldmode, advance, i;
2016:
2017: if (erract == XD_ERA_RSET) { /* some errors require a reset */
2018: oldmode = iorq->mode;
2019: iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode);
2020: xdcsc->ndone++;
2021: /* make xdc_start ignore us */
2022: xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd);
2023: iorq->mode = oldmode;
2024: xdcsc->ndone--;
2025: }
2026: /* check for read/write to a sector in bad144 table if bad: redirect
2027: * request to bad144 area */
2028:
2029: if ((comm == XDCMD_RD || comm == XDCMD_WR) &&
2030: (iorq->mode & XD_MODE_B144) == 0) {
2031: advance = iorq->sectcnt - iopb->sectcnt;
2032: XDC_ADVANCE(iorq, advance);
2033: if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl,
2034: (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead,
2035: iorq->blockno % iorq->xd->nsect)) != -1) {
2036: iorq->mode |= XD_MODE_B144; /* enter bad144 mode &
2037: * redirect */
2038: iopb->errno = iopb->done = iopb->errs = 0;
2039: iopb->sectcnt = 1;
2040: iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2;
2041: /* second to last acyl */
2042: i = iorq->xd->sectpercyl - 1 - i; /* follow bad144
2043: * standard */
2044: iopb->headno = i / iorq->xd->nhead;
2045: iopb->sectno = i % iorq->xd->nhead;
2046: XDC_HWAIT(xdcsc, rqno);
2047: xdc_start(xdcsc, 1); /* resubmit */
2048: return (XD_ERR_AOK); /* recovered! */
2049: }
2050: }
2051:
2052: /*
2053: * it isn't a bad144 sector, must be real error! see if we can retry
2054: * it?
2055: */
2056: if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror)
2057: xdc_perror(iorq, iopb, 1); /* inform of error state
2058: * change */
2059: iorq->lasterror = errno;
2060:
2061: if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD)
2062: && iorq->tries < XDC_MAXTRIES) { /* retry? */
2063: iorq->tries++;
2064: iorq->errno = iopb->errno = iopb->done = iopb->errs = 0;
2065: XDC_HWAIT(xdcsc, rqno);
2066: xdc_start(xdcsc, 1); /* restart */
2067: return (XD_ERR_AOK); /* recovered! */
2068: }
2069:
2070: /* failed to recover from this error */
2071: return (XD_ERR_FAIL);
2072: }
2073:
2074: /*
2075: * xdc_tick: make sure xd is still alive and ticking (err, kicking).
2076: */
2077: void
2078: xdc_tick(arg)
2079: void *arg;
2080:
2081: {
2082: struct xdc_softc *xdcsc = arg;
2083: int lcv, s, reset = 0;
2084: #ifdef XDC_DIAG
2085: int wait, run, free, done, whd = 0;
2086: u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB];
2087: s = splbio();
2088: wait = xdcsc->nwait;
2089: run = xdcsc->nrun;
2090: free = xdcsc->nfree;
2091: done = xdcsc->ndone;
2092: bcopy(xdcsc->waitq, wqc, sizeof(wqc));
2093: bcopy(xdcsc->freereq, fqc, sizeof(fqc));
2094: splx(s);
2095: if (wait + run + free + done != XDC_MAXIOPB) {
2096: printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n",
2097: xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB);
2098: bzero(mark, sizeof(mark));
2099: printf("FREE: ");
2100: for (lcv = free; lcv > 0; lcv--) {
2101: printf("%d ", fqc[lcv - 1]);
2102: mark[fqc[lcv - 1]] = 1;
2103: }
2104: printf("\nWAIT: ");
2105: lcv = wait;
2106: while (lcv > 0) {
2107: printf("%d ", wqc[whd]);
2108: mark[wqc[whd]] = 1;
2109: whd = (whd + 1) % XDC_MAXIOPB;
2110: lcv--;
2111: }
2112: printf("\n");
2113: for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2114: if (mark[lcv] == 0)
2115: printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %p\n",
2116: lcv, xdcsc->reqs[lcv].mode,
2117: xdcsc->iopbase[lcv].done,
2118: xdcsc->iopbase[lcv].errs,
2119: xdcsc->iopbase[lcv].errno,
2120: xdcsc->reqs[lcv].ttl, xdcsc->reqs[lcv].buf);
2121: }
2122: } else
2123: if (done > XDC_MAXIOPB - XDC_SUBWAITLIM)
2124: printf("%s: diag: lots of done jobs (%d)\n",
2125: xdcsc->sc_dev.dv_xname, done);
2126:
2127: #endif
2128: #ifdef XDC_DEBUG
2129: printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n",
2130: xdcsc->sc_dev.dv_xname,
2131: xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun,
2132: xdcsc->ndone);
2133: for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2134: if (xdcsc->reqs[lcv].mode)
2135: printf("running %d: mode %d done %d errs %d errno 0x%x\n",
2136: lcv,
2137: xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done,
2138: xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno);
2139: }
2140: #endif
2141:
2142: /* reduce ttl for each request if one goes to zero, reset xdc */
2143: s = splbio();
2144: for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) {
2145: if (xdcsc->reqs[lcv].mode == 0 ||
2146: XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE)
2147: continue;
2148: xdcsc->reqs[lcv].ttl--;
2149: if (xdcsc->reqs[lcv].ttl == 0)
2150: reset = 1;
2151: }
2152: if (reset) {
2153: printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname);
2154: xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL);
2155: }
2156: splx(s);
2157:
2158: /* until next time */
2159:
2160: timeout_add(&xdcsc->xdc_tick_tmo, XDC_TICKCNT);
2161: }
2162:
2163: /*
2164: * xdc_ioctlcmd: this function provides a user level interface to the
2165: * controller via ioctl. this allows "format" programs to be written
2166: * in user code, and is also useful for some debugging. we return
2167: * an error code. called at user priority.
2168: */
2169: int
2170: xdc_ioctlcmd(xd, dev, xio)
2171: struct xd_softc *xd;
2172: dev_t dev;
2173: struct xd_iocmd *xio;
2174:
2175: {
2176: int s, err, rqno, dummy;
2177: caddr_t dvmabuf = NULL, buf = NULL;
2178: struct xdc_softc *xdcsc;
2179:
2180: /* check sanity of requested command */
2181:
2182: switch (xio->cmd) {
2183:
2184: case XDCMD_NOP: /* no op: everything should be zero */
2185: if (xio->subfn || xio->dptr || xio->dlen ||
2186: xio->block || xio->sectcnt)
2187: return (EINVAL);
2188: break;
2189:
2190: case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */
2191: case XDCMD_WR:
2192: if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
2193: xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL)
2194: return (EINVAL);
2195: break;
2196:
2197: case XDCMD_SK: /* seek: doesn't seem useful to export this */
2198: return (EINVAL);
2199:
2200: case XDCMD_WRP: /* write parameters */
2201: return (EINVAL);/* not useful, except maybe drive
2202: * parameters... but drive parameters should
2203: * go via disklabel changes */
2204:
2205: case XDCMD_RDP: /* read parameters */
2206: if (xio->subfn != XDFUN_DRV ||
2207: xio->dlen || xio->block || xio->dptr)
2208: return (EINVAL); /* allow read drive params to
2209: * get hw_spt */
2210: xio->sectcnt = xd->hw_spt; /* we already know the answer */
2211: return (0);
2212: break;
2213:
2214: case XDCMD_XRD: /* extended read/write */
2215: case XDCMD_XWR:
2216:
2217: switch (xio->subfn) {
2218:
2219: case XDFUN_THD:/* track headers */
2220: if (xio->sectcnt != xd->hw_spt ||
2221: (xio->block % xd->nsect) != 0 ||
2222: xio->dlen != XD_IOCMD_HSZ * xd->hw_spt ||
2223: xio->dptr == NULL)
2224: return (EINVAL);
2225: xio->sectcnt = 0;
2226: break;
2227:
2228: case XDFUN_FMT:/* NOTE: also XDFUN_VFY */
2229: if (xio->cmd == XDCMD_XRD)
2230: return (EINVAL); /* no XDFUN_VFY */
2231: if (xio->sectcnt || xio->dlen ||
2232: (xio->block % xd->nsect) != 0 || xio->dptr)
2233: return (EINVAL);
2234: break;
2235:
2236: case XDFUN_HDR:/* header, header verify, data, data ECC */
2237: return (EINVAL); /* not yet */
2238:
2239: case XDFUN_DM: /* defect map */
2240: case XDFUN_DMX:/* defect map (alternate location) */
2241: if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ ||
2242: (xio->block % xd->nsect) != 0 || xio->dptr == NULL)
2243: return (EINVAL);
2244: break;
2245:
2246: default:
2247: return (EINVAL);
2248: }
2249: break;
2250:
2251: case XDCMD_TST: /* diagnostics */
2252: return (EINVAL);
2253:
2254: default:
2255: return (EINVAL);/* ??? */
2256: }
2257:
2258: /* create DVMA buffer for request if needed */
2259:
2260: if (xio->dlen) {
2261: dvmabuf = dvma_malloc(xio->dlen, &buf, M_WAITOK);
2262: if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) {
2263: if ((err = copyin(xio->dptr, buf, xio->dlen)) != 0) {
2264: dvma_free(dvmabuf, xio->dlen, &buf);
2265: return (err);
2266: }
2267: }
2268: }
2269: /* do it! */
2270:
2271: err = 0;
2272: xdcsc = xd->parent;
2273: s = splbio();
2274: rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block,
2275: xio->sectcnt, dvmabuf, XD_SUB_WAIT);
2276: if (rqno == XD_ERR_FAIL) {
2277: err = EIO;
2278: goto done;
2279: }
2280: xio->errno = xdcsc->reqs[rqno].errno;
2281: xio->tries = xdcsc->reqs[rqno].tries;
2282: XDC_DONE(xdcsc, rqno, dummy);
2283:
2284: if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD)
2285: err = copyout(buf, xio->dptr, xio->dlen);
2286:
2287: done:
2288: splx(s);
2289: if (dvmabuf)
2290: dvma_free(dvmabuf, xio->dlen, &buf);
2291: return (err);
2292: }
2293:
2294: /*
2295: * xdc_e2str: convert error code number into an error string
2296: */
2297: char *
2298: xdc_e2str(no)
2299: int no;
2300: {
2301: switch (no) {
2302: case XD_ERR_FAIL:
2303: return ("Software fatal error");
2304: case XD_ERR_AOK:
2305: return ("Successful completion");
2306: case XD_ERR_ICYL:
2307: return ("Illegal cylinder address");
2308: case XD_ERR_IHD:
2309: return ("Illegal head address");
2310: case XD_ERR_ISEC:
2311: return ("Illgal sector address");
2312: case XD_ERR_CZER:
2313: return ("Count zero");
2314: case XD_ERR_UIMP:
2315: return ("Unimplemented command");
2316: case XD_ERR_IF1:
2317: return ("Illegal field length 1");
2318: case XD_ERR_IF2:
2319: return ("Illegal field length 2");
2320: case XD_ERR_IF3:
2321: return ("Illegal field length 3");
2322: case XD_ERR_IF4:
2323: return ("Illegal field length 4");
2324: case XD_ERR_IF5:
2325: return ("Illegal field length 5");
2326: case XD_ERR_IF6:
2327: return ("Illegal field length 6");
2328: case XD_ERR_IF7:
2329: return ("Illegal field length 7");
2330: case XD_ERR_ISG:
2331: return ("Illegal scatter/gather length");
2332: case XD_ERR_ISPT:
2333: return ("Not enough sectors per track");
2334: case XD_ERR_ALGN:
2335: return ("Next IOPB address alignment error");
2336: case XD_ERR_SGAL:
2337: return ("Scatter/gather address alignment error");
2338: case XD_ERR_SGEC:
2339: return ("Scatter/gather with auto-ECC");
2340: case XD_ERR_SECC:
2341: return ("Soft ECC corrected");
2342: case XD_ERR_SIGN:
2343: return ("ECC ignored");
2344: case XD_ERR_ASEK:
2345: return ("Auto-seek retry recovered");
2346: case XD_ERR_RTRY:
2347: return ("Soft retry recovered");
2348: case XD_ERR_HECC:
2349: return ("Hard data ECC");
2350: case XD_ERR_NHDR:
2351: return ("Header not found");
2352: case XD_ERR_NRDY:
2353: return ("Drive not ready");
2354: case XD_ERR_TOUT:
2355: return ("Operation timeout");
2356: case XD_ERR_VTIM:
2357: return ("VMEDMA timeout");
2358: case XD_ERR_DSEQ:
2359: return ("Disk sequencer error");
2360: case XD_ERR_HDEC:
2361: return ("Header ECC error");
2362: case XD_ERR_RVFY:
2363: return ("Read verify");
2364: case XD_ERR_VFER:
2365: return ("Fatal VMEDMA error");
2366: case XD_ERR_VBUS:
2367: return ("VMEbus error");
2368: case XD_ERR_DFLT:
2369: return ("Drive faulted");
2370: case XD_ERR_HECY:
2371: return ("Header error/cylinder");
2372: case XD_ERR_HEHD:
2373: return ("Header error/head");
2374: case XD_ERR_NOCY:
2375: return ("Drive not on-cylinder");
2376: case XD_ERR_SEEK:
2377: return ("Seek error");
2378: case XD_ERR_ILSS:
2379: return ("Illegal sector size");
2380: case XD_ERR_SEC:
2381: return ("Soft ECC");
2382: case XD_ERR_WPER:
2383: return ("Write-protect error");
2384: case XD_ERR_IRAM:
2385: return ("IRAM self test failure");
2386: case XD_ERR_MT3:
2387: return ("Maintenance test 3 failure (DSKCEL RAM)");
2388: case XD_ERR_MT4:
2389: return ("Maintenance test 4 failure (header shift reg)");
2390: case XD_ERR_MT5:
2391: return ("Maintenance test 5 failure (VMEDMA regs)");
2392: case XD_ERR_MT6:
2393: return ("Maintenance test 6 failure (REGCEL chip)");
2394: case XD_ERR_MT7:
2395: return ("Maintenance test 7 failure (buffer parity)");
2396: case XD_ERR_MT8:
2397: return ("Maintenance test 8 failure (disk FIFO)");
2398: case XD_ERR_IOCK:
2399: return ("IOPB checksum miscompare");
2400: case XD_ERR_IODM:
2401: return ("IOPB DMA fatal");
2402: case XD_ERR_IOAL:
2403: return ("IOPB address alignment error");
2404: case XD_ERR_FIRM:
2405: return ("Firmware error");
2406: case XD_ERR_MMOD:
2407: return ("Illegal maintenance mode test number");
2408: case XD_ERR_ACFL:
2409: return ("ACFAIL asserted");
2410: default:
2411: return ("Unknown error");
2412: }
2413: }
CVSweb