Annotation of sys/dev/ata/atascsi.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: atascsi.c,v 1.41 2007/04/22 05:11:45 dlg Exp $ */
2:
3: /*
4: * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5: *
6: * Permission to use, copy, modify, and distribute this software for any
7: * purpose with or without fee is hereby granted, provided that the above
8: * copyright notice and this permission notice appear in all copies.
9: *
10: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17: */
18:
19: #include <sys/param.h>
20: #include <sys/systm.h>
21: #include <sys/buf.h>
22: #include <sys/kernel.h>
23: #include <sys/malloc.h>
24: #include <sys/device.h>
25: #include <sys/proc.h>
26: #include <sys/queue.h>
27:
28: #include <scsi/scsi_all.h>
29: #include <scsi/scsi_disk.h>
30: #include <scsi/scsiconf.h>
31:
32: #include <dev/ata/atascsi.h>
33:
34: struct atascsi {
35: struct device *as_dev;
36: void *as_cookie;
37:
38: struct ata_port **as_ports;
39:
40: struct atascsi_methods *as_methods;
41: struct scsi_adapter as_switch;
42: struct scsi_link as_link;
43: struct scsibus_softc *as_scsibus;
44:
45: int as_capability;
46: };
47:
48: int atascsi_cmd(struct scsi_xfer *);
49:
50: /* template */
51: struct scsi_adapter atascsi_switch = {
52: atascsi_cmd, /* scsi_cmd */
53: minphys, /* scsi_minphys */
54: NULL,
55: NULL,
56: NULL /* ioctl */
57: };
58:
59: struct scsi_device atascsi_device = {
60: NULL, NULL, NULL, NULL
61: };
62:
63: int atascsi_probe(struct atascsi *, int);
64:
65: struct ata_xfer *ata_setup_identify(struct ata_port *, int);
66: void ata_free_identify(struct ata_xfer *);
67: void ata_complete_identify(struct ata_xfer *,
68: struct ata_identify *);
69:
70: int atascsi_disk_cmd(struct scsi_xfer *);
71: void atascsi_disk_cmd_done(struct ata_xfer *);
72: int atascsi_disk_inq(struct scsi_xfer *);
73: void atascsi_disk_inq_done(struct ata_xfer *);
74: int atascsi_disk_capacity(struct scsi_xfer *);
75: void atascsi_disk_capacity_done(struct ata_xfer *);
76: int atascsi_disk_sync(struct scsi_xfer *);
77: void atascsi_disk_sync_done(struct ata_xfer *);
78: int atascsi_disk_sense(struct scsi_xfer *);
79:
80: void atascsi_empty_done(struct ata_xfer *);
81:
82: int atascsi_atapi_cmd(struct scsi_xfer *);
83: void atascsi_atapi_cmd_done(struct ata_xfer *);
84:
85: int atascsi_stuffup(struct scsi_xfer *);
86:
87:
88: int ata_running = 0;
89:
90: int ata_exec(struct atascsi *, struct ata_xfer *);
91:
92: struct ata_xfer *ata_get_xfer(struct ata_port *, int);
93: void ata_put_xfer(struct ata_xfer *);
94:
95: struct atascsi *
96: atascsi_attach(struct device *self, struct atascsi_attach_args *aaa)
97: {
98: struct scsibus_attach_args saa;
99: struct atascsi *as;
100: int i;
101:
102: as = malloc(sizeof(struct atascsi), M_DEVBUF, M_WAITOK);
103: bzero(as, sizeof(struct atascsi));
104:
105: as->as_dev = self;
106: as->as_cookie = aaa->aaa_cookie;
107: as->as_methods = aaa->aaa_methods;
108: as->as_capability = aaa->aaa_capability;
109:
110: /* copy from template and modify for ourselves */
111: as->as_switch = atascsi_switch;
112: as->as_switch.scsi_minphys = aaa->aaa_minphys;
113:
114: /* fill in our scsi_link */
115: as->as_link.device = &atascsi_device;
116: as->as_link.adapter = &as->as_switch;
117: as->as_link.adapter_softc = as;
118: as->as_link.adapter_buswidth = aaa->aaa_nports;
119: as->as_link.luns = 1; /* XXX port multiplier as luns */
120: as->as_link.adapter_target = aaa->aaa_nports;
121: as->as_link.openings = aaa->aaa_ncmds;
122: if (as->as_capability & ASAA_CAP_NEEDS_RESERVED)
123: as->as_link.openings--;
124:
125: as->as_ports = malloc(sizeof(struct ata_port *) * aaa->aaa_nports,
126: M_DEVBUF, M_WAITOK);
127: bzero(as->as_ports, sizeof(struct ata_port *) * aaa->aaa_nports);
128:
129: /* fill in the port array with the type of devices there */
130: for (i = 0; i < as->as_link.adapter_buswidth; i++)
131: atascsi_probe(as, i);
132:
133: bzero(&saa, sizeof(saa));
134: saa.saa_sc_link = &as->as_link;
135:
136: /* stash the scsibus so we can do hotplug on it */
137: as->as_scsibus = (struct scsibus_softc *)config_found(self, &saa,
138: scsiprint);
139:
140: return (as);
141: }
142:
143: int
144: atascsi_detach(struct atascsi *as)
145: {
146: return (0);
147: }
148:
149: int
150: atascsi_probe(struct atascsi *as, int port)
151: {
152: struct ata_port *ap;
153: struct ata_xfer *xa;
154: int type, s;
155:
156: if (port > as->as_link.adapter_buswidth)
157: return (ENXIO);
158:
159: type = as->as_methods->probe(as->as_cookie, port);
160: switch (type) {
161: case ATA_PORT_T_DISK:
162: break;
163: case ATA_PORT_T_ATAPI:
164: as->as_link.flags |= SDEV_ATAPI;
165: as->as_link.quirks |= SDEV_ONLYBIG;
166: break;
167: default:
168: return (ENODEV);
169: }
170:
171: ap = malloc(sizeof(struct ata_port), M_DEVBUF, M_WAITOK);
172: bzero(ap, sizeof(struct ata_port));
173: ap->ap_as = as;
174: ap->ap_port = port;
175: ap->ap_type = type;
176:
177: as->as_ports[port] = ap;
178:
179: s = splbio();
180: xa = ata_get_xfer(ap, 1);
181: splx(s);
182: if (xa == NULL)
183: return (EBUSY);
184:
185: /*
186: * FREEZE LOCK the device so malicous users can't lock it on us.
187: * As there is no harm in issuing this to devices that don't
188: * support the security feature set we just send it, and don't bother
189: * checking if the device sends a command abort to tell us it doesn't
190: * support it
191: */
192: xa->fis->command = ATA_C_SEC_FREEZE_LOCK;
193: xa->fis->flags = ATA_H2D_FLAGS_CMD;
194: xa->complete = atascsi_empty_done;
195: xa->flags = ATA_F_POLL | ATA_F_PIO;
196: xa->timeout = 1000;
197: ata_exec(as, xa);
198:
199: return (0);
200: }
201:
202: struct ata_xfer *
203: ata_setup_identify(struct ata_port *ap, int nosleep)
204: {
205: struct ata_xfer *xa;
206: int s;
207:
208: s = splbio();
209: xa = ata_get_xfer(ap, nosleep);
210: splx(s);
211: if (xa == NULL)
212: return (NULL);
213:
214: xa->data = malloc(512, M_TEMP, nosleep ? M_NOWAIT : M_WAITOK);
215: if (xa->data == NULL) {
216: s = splbio();
217: xa->state = ATA_S_ERROR;
218: ata_put_xfer(xa);
219: splx(s);
220: return (NULL);
221: }
222: bzero(xa->data, 512);
223: xa->datalen = 512;
224:
225: xa->fis->flags = ATA_H2D_FLAGS_CMD;
226: xa->fis->command = ATA_C_IDENTIFY;
227: xa->fis->device = 0;
228:
229: xa->flags = ATA_F_READ | ATA_F_PIO;
230:
231: return (xa);
232: }
233:
234: void
235: ata_free_identify(struct ata_xfer *xa)
236: {
237: free(xa->data, M_TEMP);
238: ata_put_xfer(xa);
239: }
240:
241: void
242: ata_complete_identify(struct ata_xfer *xa, struct ata_identify *id)
243: {
244: u_int16_t *swap;
245: int i;
246:
247: bcopy(xa->data, id, sizeof(struct ata_identify));
248: ata_free_identify(xa);
249:
250: swap = (u_int16_t *)id->serial;
251: for (i = 0; i < sizeof(id->serial) / sizeof(u_int16_t); i++)
252: swap[i] = swap16(swap[i]);
253:
254: swap = (u_int16_t *)id->firmware;
255: for (i = 0; i < sizeof(id->firmware) / sizeof(u_int16_t); i++)
256: swap[i] = swap16(swap[i]);
257:
258: swap = (u_int16_t *)id->model;
259: for (i = 0; i < sizeof(id->model) / sizeof(u_int16_t); i++)
260: swap[i] = swap16(swap[i]);
261: }
262:
263: int
264: atascsi_cmd(struct scsi_xfer *xs)
265: {
266: struct scsi_link *link = xs->sc_link;
267: struct atascsi *as = link->adapter_softc;
268: struct ata_port *ap = as->as_ports[link->target];
269:
270: if (ap == NULL)
271: return (atascsi_stuffup(xs));
272:
273: switch (ap->ap_type) {
274: case ATA_PORT_T_DISK:
275: return (atascsi_disk_cmd(xs));
276: case ATA_PORT_T_ATAPI:
277: return (atascsi_atapi_cmd(xs));
278:
279: case ATA_PORT_T_NONE:
280: default:
281: return (atascsi_stuffup(xs));
282: }
283: }
284:
285: int
286: atascsi_disk_cmd(struct scsi_xfer *xs)
287: {
288: struct scsi_link *link = xs->sc_link;
289: struct atascsi *as = link->adapter_softc;
290: struct ata_port *ap = as->as_ports[link->target];
291: int s, flags = 0;
292: struct scsi_rw *rw;
293: struct scsi_rw_big *rwb;
294: struct ata_xfer *xa;
295: struct ata_fis_h2d *fis;
296: u_int64_t lba;
297: u_int32_t sector_count;
298:
299: switch (xs->cmd->opcode) {
300: case READ_BIG:
301: case READ_COMMAND:
302: flags = ATA_F_READ;
303: break;
304: case WRITE_BIG:
305: case WRITE_COMMAND:
306: flags = ATA_F_WRITE;
307: /* deal with io outside the switch */
308: break;
309:
310: case SYNCHRONIZE_CACHE:
311: return (atascsi_disk_sync(xs));
312: case REQUEST_SENSE:
313: return (atascsi_disk_sense(xs));
314: case INQUIRY:
315: return (atascsi_disk_inq(xs));
316: case READ_CAPACITY:
317: return (atascsi_disk_capacity(xs));
318:
319: case TEST_UNIT_READY:
320: case START_STOP:
321: case PREVENT_ALLOW:
322: return (COMPLETE);
323:
324: default:
325: return (atascsi_stuffup(xs));
326: }
327:
328: s = splbio();
329: xa = ata_get_xfer(ap, xs->flags & SCSI_NOSLEEP);
330: splx(s);
331: if (xa == NULL)
332: return (NO_CCB);
333:
334: xa->flags = flags;
335: if (xs->cmdlen == 6) {
336: rw = (struct scsi_rw *)xs->cmd;
337: lba = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff);
338: sector_count = rw->length ? rw->length : 0x100;
339: } else {
340: rwb = (struct scsi_rw_big *)xs->cmd;
341: lba = _4btol(rwb->addr);
342: sector_count = _2btol(rwb->length);
343: }
344:
345: fis = xa->fis;
346:
347: fis->flags = ATA_H2D_FLAGS_CMD;
348: fis->lba_low = lba & 0xff;
349: fis->lba_mid = (lba >> 8) & 0xff;
350: fis->lba_high = (lba >> 16) & 0xff;
351:
352: if (ap->ap_ncqdepth && !(xs->flags & SCSI_POLL)) {
353: /* Use NCQ */
354: xa->flags |= ATA_F_NCQ;
355: fis->command = (xa->flags & ATA_F_WRITE) ?
356: ATA_C_WRITE_FPDMA : ATA_C_READ_FPDMA;
357: fis->device = ATA_H2D_DEVICE_LBA;
358: fis->lba_low_exp = (lba >> 24) & 0xff;
359: fis->lba_mid_exp = (lba >> 32) & 0xff;
360: fis->lba_high_exp = (lba >> 40) & 0xff;
361: fis->sector_count = xa->tag << 3;
362: fis->features = sector_count & 0xff;
363: fis->features_exp = (sector_count >> 8) & 0xff;
364: } else if (sector_count > 0x100 || lba > 0xfffffff) {
365: /* Use LBA48 */
366: fis->command = (xa->flags & ATA_F_WRITE) ?
367: ATA_C_WRITEDMA_EXT : ATA_C_READDMA_EXT;
368: fis->device = ATA_H2D_DEVICE_LBA;
369: fis->lba_low_exp = (lba >> 24) & 0xff;
370: fis->lba_mid_exp = (lba >> 32) & 0xff;
371: fis->lba_high_exp = (lba >> 40) & 0xff;
372: fis->sector_count = sector_count & 0xff;
373: fis->sector_count_exp = (sector_count >> 8) & 0xff;
374: } else {
375: /* Use LBA */
376: fis->command = (xa->flags & ATA_F_WRITE) ?
377: ATA_C_WRITEDMA : ATA_C_READDMA;
378: fis->device = ATA_H2D_DEVICE_LBA | ((lba >> 24) & 0x0f);
379: fis->sector_count = sector_count & 0xff;
380: }
381:
382: xa->data = xs->data;
383: xa->datalen = xs->datalen;
384: xa->complete = atascsi_disk_cmd_done;
385: xa->timeout = xs->timeout;
386: xa->atascsi_private = xs;
387: if (xs->flags & SCSI_POLL)
388: xa->flags |= ATA_F_POLL;
389:
390: return (ata_exec(as, xa));
391: }
392:
393: void
394: atascsi_empty_done(struct ata_xfer *xa)
395: {
396: ata_put_xfer(xa);
397: }
398:
399: void
400: atascsi_disk_cmd_done(struct ata_xfer *xa)
401: {
402: struct scsi_xfer *xs = xa->atascsi_private;
403:
404: switch (xa->state) {
405: case ATA_S_COMPLETE:
406: xs->error = XS_NOERROR;
407: break;
408: case ATA_S_ERROR:
409: /* fake sense? */
410: xs->error = XS_DRIVER_STUFFUP;
411: break;
412: case ATA_S_TIMEOUT:
413: xs->error = XS_TIMEOUT;
414: break;
415: default:
416: panic("atascsi_disk_cmd_done: unexpected ata_xfer state (%d)",
417: xa->state);
418: }
419:
420: xs->resid = xa->resid;
421: ata_put_xfer(xa);
422:
423: xs->flags |= ITSDONE;
424: scsi_done(xs);
425: }
426:
427: int
428: atascsi_disk_inq(struct scsi_xfer *xs)
429: {
430: struct scsi_link *link = xs->sc_link;
431: struct atascsi *as = link->adapter_softc;
432: struct ata_port *ap = as->as_ports[link->target];
433: struct ata_xfer *xa;
434:
435: xa = ata_setup_identify(ap, xs->flags & SCSI_NOSLEEP);
436: if (xa == NULL)
437: return (NO_CCB);
438:
439: xa->complete = atascsi_disk_inq_done;
440: xa->timeout = xs->timeout;
441: xa->atascsi_private = xs;
442: if (xs->flags & SCSI_POLL)
443: xa->flags |= ATA_F_POLL;
444:
445: return (ata_exec(as, xa));
446: }
447:
448: void
449: atascsi_disk_inq_done(struct ata_xfer *xa)
450: {
451: struct scsi_xfer *xs = xa->atascsi_private;
452: struct scsi_link *link = xs->sc_link;
453: struct atascsi *as = link->adapter_softc;
454: struct ata_port *ap = as->as_ports[link->target];
455: struct ata_identify id;
456: struct scsi_inquiry_data inq;
457: int host_ncqdepth, complete = 0;
458:
459: switch (xa->state) {
460: case ATA_S_COMPLETE:
461: ata_complete_identify(xa, &id);
462:
463: bzero(&inq, sizeof(inq));
464:
465: inq.device = T_DIRECT;
466: inq.version = 2;
467: inq.response_format = 2;
468: inq.additional_length = 32;
469: bcopy("ATA ", inq.vendor, sizeof(inq.vendor));
470: bcopy(id.model, inq.product, sizeof(inq.product));
471: bcopy(id.firmware, inq.revision, sizeof(inq.revision));
472:
473: bcopy(&inq, xs->data, MIN(sizeof(inq), xs->datalen));
474: xs->error = XS_NOERROR;
475: complete = 1;
476: break;
477:
478: case ATA_S_ERROR:
479: case ATA_S_TIMEOUT:
480: ata_free_identify(xa);
481: xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
482: XS_DRIVER_STUFFUP);
483: break;
484:
485: default:
486: panic("atascsi_disk_inq_done: unexpected ata_xfer state (%d)",
487: xa->state);
488: }
489:
490: xs->flags |= ITSDONE;
491: scsi_done(xs);
492:
493: if (!complete || (ap->ap_features & ATA_PORT_F_PROBED))
494: return;
495:
496: ap->ap_features = ATA_PORT_F_PROBED;
497:
498: if (as->as_capability & ASAA_CAP_NCQ && (letoh16(id.satacap) &
499: (1 << 8))) {
500: /*
501: * At this point, openings should be the number of commands the
502: * host controller supports, less the one that is outstanding
503: * as a result of this inquiry, less any reserved slot the
504: * host controller needs for recovery.
505: */
506: host_ncqdepth = link->openings + 1 + ((as->as_capability &
507: ASAA_CAP_NEEDS_RESERVED) ? 1 : 0);
508:
509: ap->ap_ncqdepth = (letoh16(id.qdepth) & 0x1f) + 1;
510:
511: /* Limit the number of openings to what the device supports. */
512: if (host_ncqdepth > ap->ap_ncqdepth)
513: link->openings -= (host_ncqdepth - ap->ap_ncqdepth);
514:
515: /*
516: * XXX throw away any xfers that have tag numbers higher than
517: * what the device supports.
518: */
519: while (host_ncqdepth--) {
520: struct ata_xfer *xa;
521:
522: xa = ata_get_xfer(ap, 1);
523: if (xa->tag < ap->ap_ncqdepth) {
524: xa->state = ATA_S_COMPLETE;
525: ata_put_xfer(xa);
526: }
527: }
528: }
529: }
530:
531: int
532: atascsi_disk_sync(struct scsi_xfer *xs)
533: {
534: struct scsi_link *link = xs->sc_link;
535: struct atascsi *as = link->adapter_softc;
536: struct ata_port *ap = as->as_ports[link->target];
537: struct ata_xfer *xa;
538: int s;
539:
540: s = splbio();
541: xa = ata_get_xfer(ap, xs->flags & SCSI_NOSLEEP);
542: splx(s);
543: if (xa == NULL)
544: return (NO_CCB);
545:
546: xa->datalen = 0;
547: xa->flags = ATA_F_READ;
548: xa->complete = atascsi_disk_sync_done;
549: /* Spec says flush cache can take >30 sec, so give it at least 45. */
550: xa->timeout = (xs->timeout < 45000) ? 45000 : xs->timeout;
551: xa->atascsi_private = xs;
552: if (xs->flags & SCSI_POLL)
553: xa->flags |= ATA_F_POLL;
554:
555: xa->fis->flags = ATA_H2D_FLAGS_CMD;
556: xa->fis->command = ATA_C_FLUSH_CACHE;
557: xa->fis->device = 0;
558:
559: return (ata_exec(as, xa));
560: }
561:
562: void
563: atascsi_disk_sync_done(struct ata_xfer *xa)
564: {
565: struct scsi_xfer *xs = xa->atascsi_private;
566:
567: switch (xa->state) {
568: case ATA_S_COMPLETE:
569: xs->error = XS_NOERROR;
570: break;
571:
572: case ATA_S_ERROR:
573: case ATA_S_TIMEOUT:
574: printf("atascsi_disk_sync_done: %s\n",
575: xa->state == ATA_S_TIMEOUT ? "timeout" : "error");
576: xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
577: XS_DRIVER_STUFFUP);
578: break;
579:
580: default:
581: panic("atascsi_disk_sync_done: unexpected ata_xfer state (%d)",
582: xa->state);
583: }
584:
585: ata_put_xfer(xa);
586:
587: xs->flags |= ITSDONE;
588: scsi_done(xs);
589: }
590:
591: int
592: atascsi_disk_capacity(struct scsi_xfer *xs)
593: {
594: struct scsi_link *link = xs->sc_link;
595: struct atascsi *as = link->adapter_softc;
596: struct ata_port *ap = as->as_ports[link->target];
597: struct ata_xfer *xa;
598:
599: xa = ata_setup_identify(ap, xs->flags & SCSI_NOSLEEP);
600: if (xa == NULL)
601: return (NO_CCB);
602:
603: xa->complete = atascsi_disk_capacity_done;
604: xa->timeout = xs->timeout;
605: xa->atascsi_private = xs;
606: if (xs->flags & SCSI_POLL)
607: xa->flags |= ATA_F_POLL;
608:
609: return (ata_exec(as, xa));
610: }
611:
612: void
613: atascsi_disk_capacity_done(struct ata_xfer *xa)
614: {
615: struct scsi_xfer *xs = xa->atascsi_private;
616: struct ata_identify id;
617: struct scsi_read_cap_data rcd;
618: u_int64_t capacity;
619: int i;
620:
621: switch (xa->state) {
622: case ATA_S_COMPLETE:
623: ata_complete_identify(xa, &id);
624:
625: bzero(&rcd, sizeof(rcd));
626: if (letoh16(id.cmdset83) & 0x0400) {
627: /* LBA48 feature set supported */
628: for (i = 3; i >= 0; --i) {
629: capacity <<= 16;
630: capacity += letoh16(id.addrsecxt[i]);
631: }
632: } else {
633: capacity = letoh16(id.addrsec[1]);
634: capacity <<= 16;
635: capacity += letoh16(id.addrsec[0]);
636: }
637:
638: /* XXX SCSI layer can't handle a device this big yet */
639: if (capacity > 0xffffffff)
640: capacity = 0xffffffff;
641:
642: _lto4b(capacity - 1, rcd.addr);
643: _lto4b(512, rcd.length);
644:
645: bcopy(&rcd, xs->data, MIN(sizeof(rcd), xs->datalen));
646: xs->error = XS_NOERROR;
647: break;
648:
649: case ATA_S_ERROR:
650: case ATA_S_TIMEOUT:
651: ata_free_identify(xa);
652: xs->error = (xa->state == ATA_S_TIMEOUT ? XS_TIMEOUT :
653: XS_DRIVER_STUFFUP);
654: break;
655:
656: default:
657: panic("atascsi_disk_capacity_done: "
658: "unexpected ata_xfer state (%d)", xa->state);
659: }
660:
661: xs->flags |= ITSDONE;
662: scsi_done(xs);
663: }
664:
665: int
666: atascsi_disk_sense(struct scsi_xfer *xs)
667: {
668: struct scsi_sense_data *sd = (struct scsi_sense_data *)xs->data;
669: int s;
670:
671: bzero(xs->data, xs->datalen);
672: /* check datalen > sizeof(struct scsi_sense_data)? */
673: sd->error_code = 0x70; /* XXX magic */
674: sd->flags = SKEY_NO_SENSE;
675:
676: xs->error = XS_NOERROR;
677: xs->flags |= ITSDONE;
678:
679: s = splbio();
680: scsi_done(xs);
681: splx(s);
682: return (COMPLETE);
683: }
684:
685: int
686: atascsi_atapi_cmd(struct scsi_xfer *xs)
687: {
688: struct scsi_link *link = xs->sc_link;
689: struct atascsi *as = link->adapter_softc;
690: struct ata_port *ap = as->as_ports[link->target];
691: int s;
692: struct ata_xfer *xa;
693: struct ata_fis_h2d *fis;
694:
695: s = splbio();
696: xa = ata_get_xfer(ap, xs->flags & SCSI_NOSLEEP);
697: splx(s);
698: if (xa == NULL)
699: return (NO_CCB);
700:
701: switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
702: case SCSI_DATA_IN:
703: xa->flags = ATA_F_PACKET | ATA_F_READ;
704: break;
705: case SCSI_DATA_OUT:
706: xa->flags = ATA_F_PACKET | ATA_F_WRITE;
707: break;
708: default:
709: xa->flags = ATA_F_PACKET;
710: }
711:
712: xa->data = xs->data;
713: xa->datalen = xs->datalen;
714: xa->complete = atascsi_atapi_cmd_done;
715: xa->timeout = xs->timeout;
716: xa->atascsi_private = xs;
717: if (xs->flags & SCSI_POLL)
718: xa->flags |= ATA_F_POLL;
719:
720: fis = xa->fis;
721: fis->flags = ATA_H2D_FLAGS_CMD;
722: fis->command = ATA_C_PACKET;
723: fis->device = 0;
724: fis->sector_count = xa->tag << 3;
725: fis->features = ATA_H2D_FEATURES_DMA | ((xa->flags & ATA_F_WRITE) ?
726: ATA_H2D_FEATURES_DIR_WRITE : ATA_H2D_FEATURES_DIR_READ);
727: fis->lba_mid = 0x00;
728: fis->lba_high = 0x20;
729:
730: /* Copy SCSI command into ATAPI packet. */
731: memcpy(xa->packetcmd, xs->cmd, xs->cmdlen);
732:
733: return (ata_exec(as, xa));
734: }
735:
736: void
737: atascsi_atapi_cmd_done(struct ata_xfer *xa)
738: {
739: struct scsi_xfer *xs = xa->atascsi_private;
740: struct scsi_sense_data *sd = &xs->sense;
741:
742: switch (xa->state) {
743: case ATA_S_COMPLETE:
744: xs->error = XS_NOERROR;
745: break;
746: case ATA_S_ERROR:
747: /* Return PACKET sense data */
748: sd->error_code = SSD_ERRCODE_CURRENT;
749: sd->flags = (xa->rfis.error & 0xf0) >> 4;
750: if (xa->rfis.error & 0x04)
751: sd->flags = SKEY_ILLEGAL_REQUEST;
752: if (xa->rfis.error & 0x02)
753: sd->flags |= SSD_EOM;
754: if (xa->rfis.error & 0x01)
755: sd->flags |= SSD_ILI;
756: xs->error = XS_SENSE;
757: break;
758: case ATA_S_TIMEOUT:
759: printf("atascsi_atapi_cmd_done, timeout\n");
760: xs->error = XS_TIMEOUT;
761: break;
762: default:
763: panic("atascsi_atapi_cmd_done: unexpected ata_xfer state (%d)",
764: xa->state);
765: }
766:
767: xs->resid = xa->resid;
768: ata_put_xfer(xa);
769:
770: xs->flags |= ITSDONE;
771: scsi_done(xs);
772: }
773:
774: int
775: atascsi_stuffup(struct scsi_xfer *xs)
776: {
777: int s;
778:
779: xs->error = XS_DRIVER_STUFFUP;
780: xs->flags |= ITSDONE;
781:
782: s = splbio();
783: scsi_done(xs);
784: splx(s);
785: return (COMPLETE);
786: }
787:
788: int
789: ata_exec(struct atascsi *as, struct ata_xfer *xa)
790: {
791: int polled = xa->flags & ATA_F_POLL;
792:
793: switch (as->as_methods->ata_cmd(xa)) {
794: case ATA_COMPLETE:
795: case ATA_ERROR:
796: return (COMPLETE);
797: case ATA_QUEUED:
798: if (!polled)
799: return (SUCCESSFULLY_QUEUED);
800: default:
801: panic("unexpected return from ata_exec");
802: }
803: }
804:
805: struct ata_xfer *
806: ata_get_xfer(struct ata_port *ap, int nosleep /* XXX unused */)
807: {
808: struct atascsi *as = ap->ap_as;
809: struct ata_xfer *xa;
810:
811: xa = as->as_methods->ata_get_xfer(as->as_cookie, ap->ap_port);
812: if (xa != NULL)
813: xa->fis->type = ATA_FIS_TYPE_H2D;
814:
815: return (xa);
816: }
817:
818: void
819: ata_put_xfer(struct ata_xfer *xa)
820: {
821: xa->ata_put_xfer(xa);
822: }
CVSweb