Annotation of sys/dev/ic/twe.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: twe.c,v 1.27 2006/12/29 13:04:37 pedro Exp $ */
2:
3: /*
4: * Copyright (c) 2000-2002 Michael Shalayeff. All rights reserved.
5: *
6: * The SCSI emulation layer is derived from gdt(4) driver,
7: * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: *
18: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21: * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24: * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28: * THE POSSIBILITY OF SUCH DAMAGE.
29: */
30:
31: /* #define TWE_DEBUG */
32:
33: #include <sys/param.h>
34: #include <sys/systm.h>
35: #include <sys/buf.h>
36: #include <sys/device.h>
37: #include <sys/kernel.h>
38: #include <sys/malloc.h>
39: #include <sys/proc.h>
40: #include <sys/kthread.h>
41:
42: #include <machine/bus.h>
43:
44: #include <scsi/scsi_all.h>
45: #include <scsi/scsi_disk.h>
46: #include <scsi/scsiconf.h>
47:
48: #include <dev/ic/twereg.h>
49: #include <dev/ic/twevar.h>
50:
51: #ifdef TWE_DEBUG
52: #define TWE_DPRINTF(m,a) if (twe_debug & (m)) printf a
53: #define TWE_D_CMD 0x0001
54: #define TWE_D_INTR 0x0002
55: #define TWE_D_MISC 0x0004
56: #define TWE_D_DMA 0x0008
57: #define TWE_D_AEN 0x0010
58: int twe_debug = 0;
59: #else
60: #define TWE_DPRINTF(m,a) /* m, a */
61: #endif
62:
63: struct cfdriver twe_cd = {
64: NULL, "twe", DV_DULL
65: };
66:
67: int twe_scsi_cmd(struct scsi_xfer *);
68:
69: struct scsi_adapter twe_switch = {
70: twe_scsi_cmd, tweminphys, 0, 0,
71: };
72:
73: struct scsi_device twe_dev = {
74: NULL, NULL, NULL, NULL
75: };
76:
77: static __inline struct twe_ccb *twe_get_ccb(struct twe_softc *sc);
78: static __inline void twe_put_ccb(struct twe_ccb *ccb);
79: void twe_dispose(struct twe_softc *sc);
80: int twe_cmd(struct twe_ccb *ccb, int flags, int wait);
81: int twe_start(struct twe_ccb *ccb, int wait);
82: int twe_complete(struct twe_ccb *ccb);
83: int twe_done(struct twe_softc *sc, struct twe_ccb *ccb);
84: void twe_copy_internal_data(struct scsi_xfer *xs, void *v, size_t size);
85: void twe_thread_create(void *v);
86: void twe_thread(void *v);
87:
88:
89: static __inline struct twe_ccb *
90: twe_get_ccb(sc)
91: struct twe_softc *sc;
92: {
93: struct twe_ccb *ccb;
94:
95: ccb = TAILQ_LAST(&sc->sc_free_ccb, twe_queue_head);
96: if (ccb)
97: TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link);
98: return ccb;
99: }
100:
101: static __inline void
102: twe_put_ccb(ccb)
103: struct twe_ccb *ccb;
104: {
105: struct twe_softc *sc = ccb->ccb_sc;
106:
107: ccb->ccb_state = TWE_CCB_FREE;
108: TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
109: }
110:
111: void
112: twe_dispose(sc)
113: struct twe_softc *sc;
114: {
115: register struct twe_ccb *ccb;
116: if (sc->sc_cmdmap != NULL) {
117: bus_dmamap_destroy(sc->dmat, sc->sc_cmdmap);
118: /* traverse the ccbs and destroy the maps */
119: for (ccb = &sc->sc_ccbs[TWE_MAXCMDS - 1]; ccb >= sc->sc_ccbs; ccb--)
120: if (ccb->ccb_dmamap)
121: bus_dmamap_destroy(sc->dmat, ccb->ccb_dmamap);
122: }
123: bus_dmamem_unmap(sc->dmat, sc->sc_cmds,
124: sizeof(struct twe_cmd) * TWE_MAXCMDS);
125: bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
126: }
127:
128: int
129: twe_attach(sc)
130: struct twe_softc *sc;
131: {
132: struct scsibus_attach_args saa;
133: /* this includes a buffer for drive config req, and a capacity req */
134: u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
135: struct twe_param *pb = (void *)
136: (((u_long)param_buf + TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
137: struct twe_param *cap = (void *)((u_int8_t *)pb + TWE_SECTOR_SIZE);
138: struct twe_ccb *ccb;
139: struct twe_cmd *cmd;
140: u_int32_t status;
141: int error, i, retry, nunits, nseg;
142: const char *errstr;
143: twe_lock_t lock;
144: paddr_t pa;
145:
146: error = bus_dmamem_alloc(sc->dmat, sizeof(struct twe_cmd) * TWE_MAXCMDS,
147: PAGE_SIZE, 0, sc->sc_cmdseg, 1, &nseg, BUS_DMA_NOWAIT);
148: if (error) {
149: printf(": cannot allocate commands (%d)\n", error);
150: return (1);
151: }
152:
153: error = bus_dmamem_map(sc->dmat, sc->sc_cmdseg, nseg,
154: sizeof(struct twe_cmd) * TWE_MAXCMDS,
155: (caddr_t *)&sc->sc_cmds, BUS_DMA_NOWAIT);
156: if (error) {
157: printf(": cannot map commands (%d)\n", error);
158: bus_dmamem_free(sc->dmat, sc->sc_cmdseg, 1);
159: return (1);
160: }
161:
162: error = bus_dmamap_create(sc->dmat,
163: sizeof(struct twe_cmd) * TWE_MAXCMDS, TWE_MAXCMDS,
164: sizeof(struct twe_cmd) * TWE_MAXCMDS, 0,
165: BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_cmdmap);
166: if (error) {
167: printf(": cannot create ccb cmd dmamap (%d)\n", error);
168: twe_dispose(sc);
169: return (1);
170: }
171: error = bus_dmamap_load(sc->dmat, sc->sc_cmdmap, sc->sc_cmds,
172: sizeof(struct twe_cmd) * TWE_MAXCMDS, NULL, BUS_DMA_NOWAIT);
173: if (error) {
174: printf(": cannot load command dma map (%d)\n", error);
175: twe_dispose(sc);
176: return (1);
177: }
178:
179: TAILQ_INIT(&sc->sc_ccb2q);
180: TAILQ_INIT(&sc->sc_ccbq);
181: TAILQ_INIT(&sc->sc_free_ccb);
182: TAILQ_INIT(&sc->sc_done_ccb);
183:
184: lockinit(&sc->sc_lock, PWAIT, "twelk", 0, 0);
185:
186: pa = sc->sc_cmdmap->dm_segs[0].ds_addr +
187: sizeof(struct twe_cmd) * (TWE_MAXCMDS - 1);
188: for (cmd = (struct twe_cmd *)sc->sc_cmds + TWE_MAXCMDS - 1;
189: cmd >= (struct twe_cmd *)sc->sc_cmds; cmd--, pa -= sizeof(*cmd)) {
190:
191: cmd->cmd_index = cmd - (struct twe_cmd *)sc->sc_cmds;
192: ccb = &sc->sc_ccbs[cmd->cmd_index];
193: error = bus_dmamap_create(sc->dmat,
194: TWE_MAXFER, TWE_MAXOFFSETS, TWE_MAXFER, 0,
195: BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
196: if (error) {
197: printf(": cannot create ccb dmamap (%d)\n", error);
198: twe_dispose(sc);
199: return (1);
200: }
201: ccb->ccb_sc = sc;
202: ccb->ccb_cmd = cmd;
203: ccb->ccb_cmdpa = pa;
204: ccb->ccb_state = TWE_CCB_FREE;
205: TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link);
206: }
207:
208: for (errstr = NULL, retry = 3; retry--; ) {
209: int veseen_srst;
210: u_int16_t aen;
211:
212: if (errstr)
213: TWE_DPRINTF(TWE_D_MISC, ("%s ", errstr));
214:
215: for (i = 350000; i--; DELAY(100)) {
216: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
217: if (status & TWE_STAT_CPURDY)
218: break;
219: }
220:
221: if (!(status & TWE_STAT_CPURDY)) {
222: errstr = ": card CPU is not ready\n";
223: continue;
224: }
225:
226: /* soft reset, disable ints */
227: bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
228: TWE_CTRL_SRST |
229: TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR |
230: TWE_CTRL_MCMDI | TWE_CTRL_MRDYI |
231: TWE_CTRL_MINT);
232:
233: for (i = 350000; i--; DELAY(100)) {
234: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
235: if (status & TWE_STAT_ATTNI)
236: break;
237: }
238:
239: if (!(status & TWE_STAT_ATTNI)) {
240: errstr = ": cannot get card's attention\n";
241: continue;
242: }
243:
244: /* drain aen queue */
245: for (veseen_srst = 0, aen = -1; aen != TWE_AEN_QEMPTY; ) {
246:
247: if ((ccb = twe_get_ccb(sc)) == NULL) {
248: errstr = ": out of ccbs\n";
249: continue;
250: }
251:
252: ccb->ccb_xs = NULL;
253: ccb->ccb_data = pb;
254: ccb->ccb_length = TWE_SECTOR_SIZE;
255: ccb->ccb_state = TWE_CCB_READY;
256: cmd = ccb->ccb_cmd;
257: cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
258: cmd->cmd_op = TWE_CMD_GPARAM;
259: cmd->cmd_param.count = 1;
260:
261: pb->table_id = TWE_PARAM_AEN;
262: pb->param_id = 2;
263: pb->param_size = 2;
264:
265: if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
266: errstr = ": error draining attention queue\n";
267: break;
268: }
269: aen = *(u_int16_t *)pb->data;
270: TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
271: if (aen == TWE_AEN_SRST)
272: veseen_srst++;
273: }
274:
275: if (!veseen_srst) {
276: errstr = ": we don't get it\n";
277: continue;
278: }
279:
280: if (status & TWE_STAT_CPUERR) {
281: errstr = ": card CPU error detected\n";
282: continue;
283: }
284:
285: if (status & TWE_STAT_PCIPAR) {
286: errstr = ": PCI parity error detected\n";
287: continue;
288: }
289:
290: if (status & TWE_STAT_QUEUEE ) {
291: errstr = ": queuing error detected\n";
292: continue;
293: }
294:
295: if (status & TWE_STAT_PCIABR) {
296: errstr = ": PCI abort\n";
297: continue;
298: }
299:
300: while (!(status & TWE_STAT_RQE)) {
301: bus_space_read_4(sc->iot, sc->ioh, TWE_READYQUEUE);
302: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
303: }
304:
305: break;
306: }
307:
308: if (retry < 0) {
309: printf(errstr);
310: twe_dispose(sc);
311: return 1;
312: }
313:
314: if ((ccb = twe_get_ccb(sc)) == NULL) {
315: printf(": out of ccbs\n");
316: twe_dispose(sc);
317: return 1;
318: }
319:
320: ccb->ccb_xs = NULL;
321: ccb->ccb_data = pb;
322: ccb->ccb_length = TWE_SECTOR_SIZE;
323: ccb->ccb_state = TWE_CCB_READY;
324: cmd = ccb->ccb_cmd;
325: cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
326: cmd->cmd_op = TWE_CMD_GPARAM;
327: cmd->cmd_param.count = 1;
328:
329: pb->table_id = TWE_PARAM_UC;
330: pb->param_id = TWE_PARAM_UC;
331: pb->param_size = TWE_MAX_UNITS;
332: if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
333: printf(": failed to fetch unit parameters\n");
334: twe_dispose(sc);
335: return 1;
336: }
337:
338: /* we are assuming last read status was good */
339: printf(": Escalade V%d.%d\n", TWE_MAJV(status), TWE_MINV(status));
340:
341: for (nunits = i = 0; i < TWE_MAX_UNITS; i++) {
342: if (pb->data[i] == 0)
343: continue;
344:
345: if ((ccb = twe_get_ccb(sc)) == NULL) {
346: printf(": out of ccbs\n");
347: twe_dispose(sc);
348: return 1;
349: }
350:
351: ccb->ccb_xs = NULL;
352: ccb->ccb_data = cap;
353: ccb->ccb_length = TWE_SECTOR_SIZE;
354: ccb->ccb_state = TWE_CCB_READY;
355: cmd = ccb->ccb_cmd;
356: cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
357: cmd->cmd_op = TWE_CMD_GPARAM;
358: cmd->cmd_param.count = 1;
359:
360: cap->table_id = TWE_PARAM_UI + i;
361: cap->param_id = 4;
362: cap->param_size = 4; /* 4 bytes */
363: lock = TWE_LOCK(sc);
364: if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
365: TWE_UNLOCK(sc, lock);
366: printf("%s: error fetching capacity for unit %d\n",
367: sc->sc_dev.dv_xname, i);
368: continue;
369: }
370: TWE_UNLOCK(sc, lock);
371:
372: nunits++;
373: sc->sc_hdr[i].hd_present = 1;
374: sc->sc_hdr[i].hd_devtype = 0;
375: sc->sc_hdr[i].hd_size = letoh32(*(u_int32_t *)cap->data);
376: TWE_DPRINTF(TWE_D_MISC, ("twed%d: size=%d\n",
377: i, sc->sc_hdr[i].hd_size));
378: }
379:
380: if (!nunits)
381: nunits++;
382:
383: /* TODO: fetch & print cache params? */
384:
385: sc->sc_link.adapter_softc = sc;
386: sc->sc_link.adapter = &twe_switch;
387: sc->sc_link.adapter_target = TWE_MAX_UNITS;
388: sc->sc_link.device = &twe_dev;
389: sc->sc_link.openings = TWE_MAXCMDS / nunits;
390: sc->sc_link.adapter_buswidth = TWE_MAX_UNITS;
391:
392: bzero(&saa, sizeof(saa));
393: saa.saa_sc_link = &sc->sc_link;
394:
395: config_found(&sc->sc_dev, &saa, scsiprint);
396:
397: kthread_create_deferred(twe_thread_create, sc);
398:
399: return (0);
400: }
401:
402: void
403: twe_thread_create(void *v)
404: {
405: struct twe_softc *sc = v;
406:
407: if (kthread_create(twe_thread, sc, &sc->sc_thread,
408: "%s", sc->sc_dev.dv_xname)) {
409: /* TODO disable twe */
410: printf("%s: failed to create kernel thread, disabled\n",
411: sc->sc_dev.dv_xname);
412: return;
413: }
414:
415: TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
416: bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
417: /*
418: * ack all before enable, cannot be done in one
419: * operation as it seems clear is not processed
420: * if enable is specified.
421: */
422: bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
423: TWE_CTRL_CHOSTI | TWE_CTRL_CATTNI | TWE_CTRL_CERR);
424: TWE_DPRINTF(TWE_D_CMD, ("stat=%b ",
425: bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS), TWE_STAT_BITS));
426: /* enable interrupts */
427: bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
428: TWE_CTRL_EINT | TWE_CTRL_ERDYI |
429: /*TWE_CTRL_HOSTI |*/ TWE_CTRL_MCMDI);
430: }
431:
432: void
433: twe_thread(v)
434: void *v;
435: {
436: struct twe_softc *sc = v;
437: struct twe_ccb *ccb;
438: twe_lock_t lock;
439: u_int32_t status;
440: int err;
441:
442: splbio();
443: for (;;) {
444: lock = TWE_LOCK(sc);
445:
446: while (!TAILQ_EMPTY(&sc->sc_done_ccb)) {
447: ccb = TAILQ_FIRST(&sc->sc_done_ccb);
448: TAILQ_REMOVE(&sc->sc_done_ccb, ccb, ccb_link);
449: if ((err = twe_done(sc, ccb)))
450: printf("%s: done failed (%d)\n",
451: sc->sc_dev.dv_xname, err);
452: }
453:
454: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
455: TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
456: status & TWE_STAT_FLAGS, TWE_STAT_BITS));
457: while (!(status & TWE_STAT_CQF) &&
458: !TAILQ_EMPTY(&sc->sc_ccb2q)) {
459:
460: ccb = TAILQ_LAST(&sc->sc_ccb2q, twe_queue_head);
461: TAILQ_REMOVE(&sc->sc_ccb2q, ccb, ccb_link);
462:
463: ccb->ccb_state = TWE_CCB_QUEUED;
464: TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
465: bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
466: ccb->ccb_cmdpa);
467:
468: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
469: TWE_DPRINTF(TWE_D_INTR, ("twe_thread stat=%b ",
470: status & TWE_STAT_FLAGS, TWE_STAT_BITS));
471: }
472:
473: if (!TAILQ_EMPTY(&sc->sc_ccb2q))
474: bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
475: TWE_CTRL_ECMDI);
476:
477: TWE_UNLOCK(sc, lock);
478: sc->sc_thread_on = 1;
479: tsleep(sc, PWAIT, "twespank", 0);
480: }
481: }
482:
483: int
484: twe_cmd(ccb, flags, wait)
485: struct twe_ccb *ccb;
486: int flags, wait;
487: {
488: struct twe_softc *sc = ccb->ccb_sc;
489: bus_dmamap_t dmap;
490: struct twe_cmd *cmd;
491: struct twe_segs *sgp;
492: int error, i;
493:
494: if (ccb->ccb_data && ((u_long)ccb->ccb_data & (TWE_ALIGN - 1))) {
495: TWE_DPRINTF(TWE_D_DMA, ("data=%p is unaligned ",ccb->ccb_data));
496: ccb->ccb_realdata = ccb->ccb_data;
497:
498: error = bus_dmamem_alloc(sc->dmat, ccb->ccb_length, PAGE_SIZE,
499: 0, ccb->ccb_2bseg, TWE_MAXOFFSETS, &ccb->ccb_2nseg,
500: BUS_DMA_NOWAIT);
501: if (error) {
502: TWE_DPRINTF(TWE_D_DMA, ("2buf alloc failed(%d) ", error));
503: twe_put_ccb(ccb);
504: return (ENOMEM);
505: }
506:
507: error = bus_dmamem_map(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg,
508: ccb->ccb_length, (caddr_t *)&ccb->ccb_data, BUS_DMA_NOWAIT);
509: if (error) {
510: TWE_DPRINTF(TWE_D_DMA, ("2buf map failed(%d) ", error));
511: bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
512: twe_put_ccb(ccb);
513: return (ENOMEM);
514: }
515: bcopy(ccb->ccb_realdata, ccb->ccb_data, ccb->ccb_length);
516: } else
517: ccb->ccb_realdata = NULL;
518:
519: dmap = ccb->ccb_dmamap;
520: cmd = ccb->ccb_cmd;
521: cmd->cmd_status = 0;
522:
523: if (ccb->ccb_data) {
524: error = bus_dmamap_load(sc->dmat, dmap, ccb->ccb_data,
525: ccb->ccb_length, NULL, flags);
526: if (error) {
527: if (error == EFBIG)
528: printf("more than %d dma segs\n", TWE_MAXOFFSETS);
529: else
530: printf("error %d loading dma map\n", error);
531:
532: if (ccb->ccb_realdata) {
533: bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
534: ccb->ccb_length);
535: bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
536: ccb->ccb_2nseg);
537: }
538: twe_put_ccb(ccb);
539: return error;
540: }
541: /* load addresses into command */
542: switch (cmd->cmd_op) {
543: case TWE_CMD_GPARAM:
544: case TWE_CMD_SPARAM:
545: sgp = cmd->cmd_param.segs;
546: break;
547: case TWE_CMD_READ:
548: case TWE_CMD_WRITE:
549: sgp = cmd->cmd_io.segs;
550: break;
551: default:
552: /* no data transfer */
553: TWE_DPRINTF(TWE_D_DMA, ("twe_cmd: unknown sgp op=%x\n",
554: cmd->cmd_op));
555: sgp = NULL;
556: break;
557: }
558: TWE_DPRINTF(TWE_D_DMA, ("data=%p<", ccb->ccb_data));
559: if (sgp) {
560: /*
561: * we know that size is in the upper byte,
562: * and we do not worry about overflow
563: */
564: cmd->cmd_op += (2 * dmap->dm_nsegs) << 8;
565: bzero (sgp, TWE_MAXOFFSETS * sizeof(*sgp));
566: for (i = 0; i < dmap->dm_nsegs; i++, sgp++) {
567: sgp->twes_addr = htole32(dmap->dm_segs[i].ds_addr);
568: sgp->twes_len = htole32(dmap->dm_segs[i].ds_len);
569: TWE_DPRINTF(TWE_D_DMA, ("%x[%x] ",
570: dmap->dm_segs[i].ds_addr,
571: dmap->dm_segs[i].ds_len));
572: }
573: }
574: TWE_DPRINTF(TWE_D_DMA, ("> "));
575: bus_dmamap_sync(sc->dmat, dmap, 0, dmap->dm_mapsize,
576: BUS_DMASYNC_PREWRITE);
577: }
578: bus_dmamap_sync(sc->dmat, sc->sc_cmdmap, 0, sc->sc_cmdmap->dm_mapsize,
579: BUS_DMASYNC_PREWRITE);
580:
581: if ((error = twe_start(ccb, wait))) {
582: bus_dmamap_unload(sc->dmat, dmap);
583: if (ccb->ccb_realdata) {
584: bus_dmamem_unmap(sc->dmat, ccb->ccb_data,
585: ccb->ccb_length);
586: bus_dmamem_free(sc->dmat, ccb->ccb_2bseg,
587: ccb->ccb_2nseg);
588: }
589: twe_put_ccb(ccb);
590: return (error);
591: }
592:
593: return wait? twe_complete(ccb) : 0;
594: }
595:
596: int
597: twe_start(ccb, wait)
598: struct twe_ccb *ccb;
599: int wait;
600: {
601: struct twe_softc*sc = ccb->ccb_sc;
602: struct twe_cmd *cmd = ccb->ccb_cmd;
603: u_int32_t status;
604: int i;
605:
606: cmd->cmd_op = htole16(cmd->cmd_op);
607:
608: if (!wait) {
609:
610: TWE_DPRINTF(TWE_D_CMD, ("prequeue(%d) ", cmd->cmd_index));
611: ccb->ccb_state = TWE_CCB_PREQUEUED;
612: TAILQ_INSERT_TAIL(&sc->sc_ccb2q, ccb, ccb_link);
613: wakeup(sc);
614: return 0;
615: }
616:
617: for (i = 1000; i--; DELAY(10)) {
618:
619: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
620: if (!(status & TWE_STAT_CQF))
621: break;
622: TWE_DPRINTF(TWE_D_CMD, ("twe_start stat=%b ",
623: status & TWE_STAT_FLAGS, TWE_STAT_BITS));
624: }
625:
626: if (!(status & TWE_STAT_CQF)) {
627: bus_space_write_4(sc->iot, sc->ioh, TWE_COMMANDQUEUE,
628: ccb->ccb_cmdpa);
629:
630: TWE_DPRINTF(TWE_D_CMD, ("queue(%d) ", cmd->cmd_index));
631: ccb->ccb_state = TWE_CCB_QUEUED;
632: TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link);
633: return 0;
634:
635: } else {
636:
637: printf("%s: twe_start(%d) timed out\n",
638: sc->sc_dev.dv_xname, cmd->cmd_index);
639:
640: return 1;
641: }
642: }
643:
644: int
645: twe_complete(ccb)
646: struct twe_ccb *ccb;
647: {
648: struct twe_softc *sc = ccb->ccb_sc;
649: struct scsi_xfer *xs = ccb->ccb_xs;
650: int i;
651:
652: for (i = 100 * (xs? xs->timeout : 35000); i--; DELAY(10)) {
653: u_int32_t status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
654:
655: /* TWE_DPRINTF(TWE_D_CMD, ("twe_intr stat=%b ",
656: status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
657:
658: while (!(status & TWE_STAT_RQE)) {
659: struct twe_ccb *ccb1;
660: u_int32_t ready;
661:
662: ready = bus_space_read_4(sc->iot, sc->ioh,
663: TWE_READYQUEUE);
664:
665: TWE_DPRINTF(TWE_D_CMD, ("ready=%x ", ready));
666:
667: ccb1 = &sc->sc_ccbs[TWE_READYID(ready)];
668: TAILQ_REMOVE(&sc->sc_ccbq, ccb1, ccb_link);
669: ccb1->ccb_state = TWE_CCB_DONE;
670: if (!twe_done(sc, ccb1) && ccb1 == ccb) {
671: TWE_DPRINTF(TWE_D_CMD, ("complete\n"));
672: return 0;
673: }
674:
675: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
676: /* TWE_DPRINTF(TWE_D_CMD, ("twe_intr stat=%b ",
677: status & TWE_STAT_FLAGS, TWE_STAT_BITS)); */
678: }
679: }
680:
681: return 1;
682: }
683:
684: int
685: twe_done(sc, ccb)
686: struct twe_softc *sc;
687: struct twe_ccb *ccb;
688: {
689: struct twe_cmd *cmd = ccb->ccb_cmd;
690: struct scsi_xfer *xs = ccb->ccb_xs;
691: bus_dmamap_t dmap;
692: twe_lock_t lock;
693:
694: TWE_DPRINTF(TWE_D_CMD, ("done(%d) ", cmd->cmd_index));
695:
696: if (ccb->ccb_state != TWE_CCB_DONE) {
697: printf("%s: undone ccb %d ready\n",
698: sc->sc_dev.dv_xname, cmd->cmd_index);
699: return 1;
700: }
701:
702: dmap = ccb->ccb_dmamap;
703: if (xs) {
704: if (xs->cmd->opcode != PREVENT_ALLOW &&
705: xs->cmd->opcode != SYNCHRONIZE_CACHE) {
706: bus_dmamap_sync(sc->dmat, dmap, 0,
707: dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ?
708: BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
709: bus_dmamap_unload(sc->dmat, dmap);
710: }
711: } else {
712: switch (letoh16(cmd->cmd_op)) {
713: case TWE_CMD_GPARAM:
714: case TWE_CMD_READ:
715: bus_dmamap_sync(sc->dmat, dmap, 0,
716: dmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
717: bus_dmamap_unload(sc->dmat, dmap);
718: break;
719: case TWE_CMD_SPARAM:
720: case TWE_CMD_WRITE:
721: bus_dmamap_sync(sc->dmat, dmap, 0,
722: dmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
723: bus_dmamap_unload(sc->dmat, dmap);
724: break;
725: default:
726: /* no data */
727: break;
728: }
729: }
730:
731: if (ccb->ccb_realdata) {
732: bcopy(ccb->ccb_data, ccb->ccb_realdata, ccb->ccb_length);
733: bus_dmamem_unmap(sc->dmat, ccb->ccb_data, ccb->ccb_length);
734: bus_dmamem_free(sc->dmat, ccb->ccb_2bseg, ccb->ccb_2nseg);
735: }
736:
737: lock = TWE_LOCK(sc);
738: twe_put_ccb(ccb);
739:
740: if (xs) {
741: xs->resid = 0;
742: xs->flags |= ITSDONE;
743: scsi_done(xs);
744: }
745: TWE_UNLOCK(sc, lock);
746:
747: return 0;
748: }
749:
750: void
751: tweminphys(bp)
752: struct buf *bp;
753: {
754: if (bp->b_bcount > TWE_MAXFER)
755: bp->b_bcount = TWE_MAXFER;
756: minphys(bp);
757: }
758:
759: void
760: twe_copy_internal_data(xs, v, size)
761: struct scsi_xfer *xs;
762: void *v;
763: size_t size;
764: {
765: size_t copy_cnt;
766:
767: TWE_DPRINTF(TWE_D_MISC, ("twe_copy_internal_data "));
768:
769: if (!xs->datalen)
770: printf("uio move is not yet supported\n");
771: else {
772: copy_cnt = MIN(size, xs->datalen);
773: bcopy(v, xs->data, copy_cnt);
774: }
775: }
776:
777: int
778: twe_scsi_cmd(xs)
779: struct scsi_xfer *xs;
780: {
781: struct scsi_link *link = xs->sc_link;
782: struct twe_softc *sc = link->adapter_softc;
783: struct twe_ccb *ccb;
784: struct twe_cmd *cmd;
785: struct scsi_inquiry_data inq;
786: struct scsi_sense_data sd;
787: struct scsi_read_cap_data rcd;
788: u_int8_t target = link->target;
789: u_int32_t blockno, blockcnt;
790: struct scsi_rw *rw;
791: struct scsi_rw_big *rwb;
792: int error, op, flags, wait;
793: twe_lock_t lock;
794:
795:
796: if (target >= TWE_MAX_UNITS || !sc->sc_hdr[target].hd_present ||
797: link->lun != 0) {
798: xs->error = XS_DRIVER_STUFFUP;
799: return (COMPLETE);
800: }
801:
802: TWE_DPRINTF(TWE_D_CMD, ("twe_scsi_cmd "));
803:
804: xs->error = XS_NOERROR;
805:
806: switch (xs->cmd->opcode) {
807: case TEST_UNIT_READY:
808: case START_STOP:
809: #if 0
810: case VERIFY:
811: #endif
812: TWE_DPRINTF(TWE_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
813: target));
814: break;
815:
816: case REQUEST_SENSE:
817: TWE_DPRINTF(TWE_D_CMD, ("REQUEST SENSE tgt %d ", target));
818: bzero(&sd, sizeof sd);
819: sd.error_code = 0x70;
820: sd.segment = 0;
821: sd.flags = SKEY_NO_SENSE;
822: *(u_int32_t*)sd.info = htole32(0);
823: sd.extra_len = 0;
824: twe_copy_internal_data(xs, &sd, sizeof sd);
825: break;
826:
827: case INQUIRY:
828: TWE_DPRINTF(TWE_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
829: sc->sc_hdr[target].hd_devtype));
830: bzero(&inq, sizeof inq);
831: inq.device =
832: (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
833: inq.dev_qual2 =
834: (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
835: inq.version = 2;
836: inq.response_format = 2;
837: inq.additional_length = 32;
838: strlcpy(inq.vendor, "3WARE ", sizeof inq.vendor);
839: snprintf(inq.product, sizeof inq.product, "Host drive #%02d",
840: target);
841: strlcpy(inq.revision, " ", sizeof inq.revision);
842: twe_copy_internal_data(xs, &inq, sizeof inq);
843: break;
844:
845: case READ_CAPACITY:
846: TWE_DPRINTF(TWE_D_CMD, ("READ CAPACITY tgt %d ", target));
847: bzero(&rcd, sizeof rcd);
848: _lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
849: _lto4b(TWE_SECTOR_SIZE, rcd.length);
850: twe_copy_internal_data(xs, &rcd, sizeof rcd);
851: break;
852:
853: case PREVENT_ALLOW:
854: TWE_DPRINTF(TWE_D_CMD, ("PREVENT/ALLOW "));
855: return (COMPLETE);
856:
857: case READ_COMMAND:
858: case READ_BIG:
859: case WRITE_COMMAND:
860: case WRITE_BIG:
861: case SYNCHRONIZE_CACHE:
862: lock = TWE_LOCK(sc);
863:
864: flags = 0;
865: if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
866: /* A read or write operation. */
867: if (xs->cmdlen == 6) {
868: rw = (struct scsi_rw *)xs->cmd;
869: blockno = _3btol(rw->addr) &
870: (SRW_TOPADDR << 16 | 0xffff);
871: blockcnt = rw->length ? rw->length : 0x100;
872: } else {
873: rwb = (struct scsi_rw_big *)xs->cmd;
874: blockno = _4btol(rwb->addr);
875: blockcnt = _2btol(rwb->length);
876: /* reflect DPO & FUA flags */
877: if (xs->cmd->opcode == WRITE_BIG &&
878: rwb->byte2 & 0x18)
879: flags = TWE_FLAGS_CACHEDISABLE;
880: }
881: if (blockno >= sc->sc_hdr[target].hd_size ||
882: blockno + blockcnt > sc->sc_hdr[target].hd_size) {
883: printf("%s: out of bounds %u-%u >= %u\n",
884: sc->sc_dev.dv_xname, blockno, blockcnt,
885: sc->sc_hdr[target].hd_size);
886: xs->error = XS_DRIVER_STUFFUP;
887: scsi_done(xs);
888: TWE_UNLOCK(sc, lock);
889: return (COMPLETE);
890: }
891: }
892:
893: switch (xs->cmd->opcode) {
894: case READ_COMMAND: op = TWE_CMD_READ; break;
895: case READ_BIG: op = TWE_CMD_READ; break;
896: case WRITE_COMMAND: op = TWE_CMD_WRITE; break;
897: case WRITE_BIG: op = TWE_CMD_WRITE; break;
898: default: op = TWE_CMD_NOP; break;
899: }
900:
901: if ((ccb = twe_get_ccb(sc)) == NULL) {
902: xs->error = XS_DRIVER_STUFFUP;
903: scsi_done(xs);
904: TWE_UNLOCK(sc, lock);
905: return (COMPLETE);
906: }
907:
908: ccb->ccb_xs = xs;
909: ccb->ccb_data = xs->data;
910: ccb->ccb_length = xs->datalen;
911: ccb->ccb_state = TWE_CCB_READY;
912: cmd = ccb->ccb_cmd;
913: cmd->cmd_unit_host = TWE_UNITHOST(target, 0); /* XXX why 0? */
914: cmd->cmd_op = op;
915: cmd->cmd_flags = flags;
916: cmd->cmd_io.count = htole16(blockcnt);
917: cmd->cmd_io.lba = htole32(blockno);
918: wait = xs->flags & SCSI_POLL;
919: if (!sc->sc_thread_on)
920: wait |= SCSI_POLL;
921:
922: if ((error = twe_cmd(ccb, ((xs->flags & SCSI_NOSLEEP)?
923: BUS_DMA_NOWAIT : BUS_DMA_WAITOK), wait))) {
924:
925: TWE_UNLOCK(sc, lock);
926: TWE_DPRINTF(TWE_D_CMD, ("failed %p ", xs));
927: if (xs->flags & SCSI_POLL) {
928: return (TRY_AGAIN_LATER);
929: } else {
930: xs->error = XS_DRIVER_STUFFUP;
931: scsi_done(xs);
932: return (COMPLETE);
933: }
934: }
935:
936: TWE_UNLOCK(sc, lock);
937:
938: if (wait & SCSI_POLL)
939: return (COMPLETE);
940: else
941: return (SUCCESSFULLY_QUEUED);
942:
943: default:
944: TWE_DPRINTF(TWE_D_CMD, ("unsupported scsi command %#x tgt %d ",
945: xs->cmd->opcode, target));
946: xs->error = XS_DRIVER_STUFFUP;
947: }
948:
949: return (COMPLETE);
950: }
951:
952: int
953: twe_intr(v)
954: void *v;
955: {
956: struct twe_softc *sc = v;
957: struct twe_ccb *ccb;
958: struct twe_cmd *cmd;
959: u_int32_t status;
960: twe_lock_t lock;
961: int rv = 0;
962:
963: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
964: TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
965: status & TWE_STAT_FLAGS, TWE_STAT_BITS));
966: #if 0
967: if (status & TWE_STAT_HOSTI) {
968:
969: bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
970: TWE_CTRL_CHOSTI);
971: }
972: #endif
973:
974: if (status & TWE_STAT_RDYI) {
975:
976: while (!(status & TWE_STAT_RQE)) {
977:
978: u_int32_t ready;
979:
980: /*
981: * it seems that reading ready queue
982: * we get all the status bits in each ready word.
983: * i wonder if it's legal to use those for
984: * status and avoid extra read below
985: */
986: ready = bus_space_read_4(sc->iot, sc->ioh,
987: TWE_READYQUEUE);
988:
989: ccb = &sc->sc_ccbs[TWE_READYID(ready)];
990: TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link);
991: ccb->ccb_state = TWE_CCB_DONE;
992: TAILQ_INSERT_TAIL(&sc->sc_done_ccb, ccb, ccb_link);
993: rv++;
994:
995: status = bus_space_read_4(sc->iot, sc->ioh, TWE_STATUS);
996: TWE_DPRINTF(TWE_D_INTR, ("twe_intr stat=%b ",
997: status & TWE_STAT_FLAGS, TWE_STAT_BITS));
998: }
999: }
1000:
1001: if (status & TWE_STAT_CMDI) {
1002: rv++;
1003: bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1004: TWE_CTRL_MCMDI);
1005: }
1006:
1007: if (rv)
1008: wakeup(sc);
1009:
1010: if (status & TWE_STAT_ATTNI) {
1011: u_int16_t aen;
1012:
1013: /*
1014: * we know no attentions of interest right now.
1015: * one of those would be mirror degradation i think.
1016: * or, what else exists in there?
1017: * maybe 3ware can answer that?
1018: */
1019: bus_space_write_4(sc->iot, sc->ioh, TWE_CONTROL,
1020: TWE_CTRL_CATTNI);
1021:
1022: lock = TWE_LOCK(sc);
1023: for (aen = -1; aen != TWE_AEN_QEMPTY; ) {
1024: u_int8_t param_buf[2 * TWE_SECTOR_SIZE + TWE_ALIGN - 1];
1025: struct twe_param *pb = (void *) (((u_long)param_buf +
1026: TWE_ALIGN - 1) & ~(TWE_ALIGN - 1));
1027:
1028: if ((ccb = twe_get_ccb(sc)) == NULL)
1029: break;
1030:
1031: ccb->ccb_xs = NULL;
1032: ccb->ccb_data = pb;
1033: ccb->ccb_length = TWE_SECTOR_SIZE;
1034: ccb->ccb_state = TWE_CCB_READY;
1035: cmd = ccb->ccb_cmd;
1036: cmd->cmd_unit_host = TWE_UNITHOST(0, 0);
1037: cmd->cmd_op = TWE_CMD_GPARAM;
1038: cmd->cmd_flags = 0;
1039: cmd->cmd_param.count = 1;
1040:
1041: pb->table_id = TWE_PARAM_AEN;
1042: pb->param_id = 2;
1043: pb->param_size = 2;
1044: if (twe_cmd(ccb, BUS_DMA_NOWAIT, 1)) {
1045: printf(": error draining attention queue\n");
1046: break;
1047: }
1048: aen = *(u_int16_t *)pb->data;
1049: TWE_DPRINTF(TWE_D_AEN, ("aen=%x ", aen));
1050: }
1051: TWE_UNLOCK(sc, lock);
1052: }
1053:
1054: return rv;
1055: }
CVSweb