Annotation of sys/dev/i2o/iop.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: iop.c,v 1.29 2006/11/29 12:24:17 miod Exp $ */
2: /* $NetBSD: iop.c,v 1.12 2001/03/21 14:27:05 ad Exp $ */
3:
4: /*-
5: * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Andrew Doran.
10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
39:
40: /*
41: * Support for I2O IOPs (intelligent I/O processors).
42: */
43:
44: #include <sys/param.h>
45: #include <sys/systm.h>
46: #include <sys/kernel.h>
47: #include <sys/device.h>
48: #include <sys/queue.h>
49: #include <sys/proc.h>
50: #include <sys/malloc.h>
51: #include <sys/ioctl.h>
52: #include <sys/endian.h>
53: #include <sys/conf.h>
54: #include <sys/kthread.h>
55:
56: #include <uvm/uvm_extern.h>
57:
58: #include <machine/bus.h>
59:
60: #include <dev/i2o/i2o.h>
61: #include <dev/i2o/iopio.h>
62: #include <dev/i2o/iopreg.h>
63: #include <dev/i2o/iopvar.h>
64:
65: #define POLL(ms, cond) \
66: do { \
67: int i; \
68: for (i = (ms) * 10; i; i--) { \
69: if (cond) \
70: break; \
71: DELAY(100); \
72: } \
73: } while (/* CONSTCOND */0);
74:
75: #ifdef I2ODEBUG
76: #define DPRINTF(x) printf x
77: #else
78: #define DPRINTF(x)
79: #endif
80:
81: #ifdef I2OVERBOSE
82: #define IFVERBOSE(x) x
83: #define COMMENT(x) NULL
84: #else
85: #define IFVERBOSE(x)
86: #define COMMENT(x)
87: #endif
88:
89: #define IOP_ICTXHASH_NBUCKETS 16
90: #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
91:
92: #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
93:
94: #define IOP_TCTX_SHIFT 12
95: #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
96:
97: LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
98: u_long iop_ictxhash;
99: void *iop_sdh;
100: struct i2o_systab *iop_systab;
101: int iop_systab_size;
102:
103: struct cfdriver iop_cd = {
104: NULL, "iop", DV_DULL
105: };
106:
107: #define IC_CONFIGURE 0x01
108: #define IC_PRIORITY 0x02
109:
110: struct iop_class {
111: u_short ic_class;
112: u_short ic_flags;
113: #ifdef I2OVERBOSE
114: const char *ic_caption;
115: #endif
116: } static const iop_class[] = {
117: {
118: I2O_CLASS_EXECUTIVE,
119: 0,
120: COMMENT("executive")
121: },
122: {
123: I2O_CLASS_DDM,
124: 0,
125: COMMENT("device driver module")
126: },
127: {
128: I2O_CLASS_RANDOM_BLOCK_STORAGE,
129: IC_CONFIGURE | IC_PRIORITY,
130: IFVERBOSE("random block storage")
131: },
132: {
133: I2O_CLASS_SEQUENTIAL_STORAGE,
134: IC_CONFIGURE | IC_PRIORITY,
135: IFVERBOSE("sequential storage")
136: },
137: {
138: I2O_CLASS_LAN,
139: IC_CONFIGURE | IC_PRIORITY,
140: IFVERBOSE("LAN port")
141: },
142: {
143: I2O_CLASS_WAN,
144: IC_CONFIGURE | IC_PRIORITY,
145: IFVERBOSE("WAN port")
146: },
147: {
148: I2O_CLASS_FIBRE_CHANNEL_PORT,
149: IC_CONFIGURE,
150: IFVERBOSE("fibrechannel port")
151: },
152: {
153: I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
154: 0,
155: COMMENT("fibrechannel peripheral")
156: },
157: {
158: I2O_CLASS_SCSI_PERIPHERAL,
159: 0,
160: COMMENT("SCSI peripheral")
161: },
162: {
163: I2O_CLASS_ATE_PORT,
164: IC_CONFIGURE,
165: IFVERBOSE("ATE port")
166: },
167: {
168: I2O_CLASS_ATE_PERIPHERAL,
169: 0,
170: COMMENT("ATE peripheral")
171: },
172: {
173: I2O_CLASS_FLOPPY_CONTROLLER,
174: IC_CONFIGURE,
175: IFVERBOSE("floppy controller")
176: },
177: {
178: I2O_CLASS_FLOPPY_DEVICE,
179: 0,
180: COMMENT("floppy device")
181: },
182: {
183: I2O_CLASS_BUS_ADAPTER_PORT,
184: IC_CONFIGURE,
185: IFVERBOSE("bus adapter port" )
186: },
187: };
188:
189: #if defined(I2ODEBUG) && defined(I2OVERBOSE)
190: static const char * const iop_status[] = {
191: "success",
192: "abort (dirty)",
193: "abort (no data transfer)",
194: "abort (partial transfer)",
195: "error (dirty)",
196: "error (no data transfer)",
197: "error (partial transfer)",
198: "undefined error code",
199: "process abort (dirty)",
200: "process abort (no data transfer)",
201: "process abort (partial transfer)",
202: "transaction error",
203: };
204: #endif
205:
206: static inline u_int32_t iop_inl(struct iop_softc *, int);
207: static inline void iop_outl(struct iop_softc *, int, u_int32_t);
208:
209: void iop_config_interrupts(struct device *);
210: void iop_configure_devices(struct iop_softc *, int, int);
211: void iop_devinfo(int, char *, size_t);
212: int iop_print(void *, const char *);
213: int iop_reconfigure(struct iop_softc *, u_int);
214: void iop_shutdown(void *);
215: int iop_submatch(struct device *, void *, void *);
216: #ifdef notyet
217: int iop_vendor_print(void *, const char *);
218: #endif
219:
220: void iop_adjqparam(struct iop_softc *, int);
221: void iop_create_reconf_thread(void *);
222: int iop_handle_reply(struct iop_softc *, u_int32_t);
223: int iop_hrt_get(struct iop_softc *);
224: int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, size_t);
225: void iop_intr_event(struct device *, struct iop_msg *, void *);
226: int iop_lct_get0(struct iop_softc *, struct i2o_lct *, size_t, u_int32_t);
227: void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
228: void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
229: int iop_ofifo_init(struct iop_softc *);
230: int iop_passthrough(struct iop_softc *, struct ioppt *);
231: int iop_post(struct iop_softc *, u_int32_t *);
232: void iop_reconf_thread(void *);
233: void iop_release_mfa(struct iop_softc *, u_int32_t);
234: int iop_reset(struct iop_softc *);
235: int iop_status_get(struct iop_softc *, int);
236: int iop_systab_set(struct iop_softc *);
237: void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
238:
239: #ifdef I2ODEBUG
240: void iop_reply_print(struct iop_softc *, struct i2o_reply *);
241: #endif
242:
243: cdev_decl(iop);
244:
245: static inline u_int32_t
246: iop_inl(struct iop_softc *sc, int off)
247: {
248:
249: bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250: BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
251: return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
252: }
253:
254: static inline void
255: iop_outl(struct iop_softc *sc, int off, u_int32_t val)
256: {
257:
258: bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
259: bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260: BUS_SPACE_BARRIER_WRITE);
261: }
262:
263: /*
264: * Initialise the IOP and our interface.
265: */
266: void
267: iop_init(struct iop_softc *sc, const char *intrstr)
268: {
269: struct iop_msg *im;
270: u_int32_t mask;
271: char ident[64];
272: int rv, i, nsegs;
273: int state = 0;
274:
275: if (iop_ictxhashtbl == NULL) {
276: iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, M_DEVBUF,
277: M_NOWAIT, &iop_ictxhash);
278: if (iop_ictxhashtbl == NULL) {
279: printf("%s: cannot allocate hashtable\n",
280: sc->sc_dv.dv_xname);
281: return;
282: }
283: }
284:
285: /* Reset the IOP and request status. */
286: printf("I2O adapter");
287:
288: /* Allocate a scratch DMA map for small miscellaneous shared data. */
289: if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
290: BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
291: printf("%s: cannot create scratch dmamap\n",
292: sc->sc_dv.dv_xname);
293: return;
294: }
295: state++;
296:
297: if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
298: sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
299: printf("%s: cannot alloc scratch dmamem\n",
300: sc->sc_dv.dv_xname);
301: goto bail_out;
302: }
303: state++;
304:
305: if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
306: &sc->sc_scr, 0)) {
307: printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
308: goto bail_out;
309: }
310: state++;
311:
312: if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
313: PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
314: printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
315: goto bail_out;
316: }
317: state++;
318:
319: if ((rv = iop_reset(sc)) != 0) {
320: printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
321: goto bail_out;
322: }
323: if ((rv = iop_status_get(sc, 1)) != 0) {
324: printf("%s: not responding (get status)\n",
325: sc->sc_dv.dv_xname);
326: goto bail_out;
327: }
328: sc->sc_flags |= IOP_HAVESTATUS;
329: iop_strvis(sc, sc->sc_status.productid,
330: sizeof(sc->sc_status.productid), ident, sizeof(ident));
331: printf(" <%s>\n", ident);
332:
333: #ifdef I2ODEBUG
334: printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
335: letoh16(sc->sc_status.orgid),
336: (letoh32(sc->sc_status.segnumber) >> 12) & 15);
337: printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
338: printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
339: letoh32(sc->sc_status.desiredprivmemsize),
340: letoh32(sc->sc_status.currentprivmemsize),
341: letoh32(sc->sc_status.currentprivmembase));
342: printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
343: letoh32(sc->sc_status.desiredpriviosize),
344: letoh32(sc->sc_status.currentpriviosize),
345: letoh32(sc->sc_status.currentpriviobase));
346: #endif
347:
348: sc->sc_maxob = letoh32(sc->sc_status.maxoutboundmframes);
349: if (sc->sc_maxob > IOP_MAX_OUTBOUND)
350: sc->sc_maxob = IOP_MAX_OUTBOUND;
351: sc->sc_maxib = letoh32(sc->sc_status.maxinboundmframes);
352: if (sc->sc_maxib > IOP_MAX_INBOUND)
353: sc->sc_maxib = IOP_MAX_INBOUND;
354:
355: /* Allocate message wrappers. */
356: im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
357: if (!im) {
358: printf("%s: couldn't allocate message", sc->sc_dv.dv_xname);
359: goto bail_out;
360: }
361: state++;
362:
363: bzero(im, sizeof(*im) * sc->sc_maxib);
364: sc->sc_ims = im;
365: SLIST_INIT(&sc->sc_im_freelist);
366:
367: for (i = 0; i < sc->sc_maxib; i++, im++) {
368: rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
369: IOP_MAX_SEGS, IOP_MAX_XFER, 0,
370: BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
371: &im->im_xfer[0].ix_map);
372: if (rv != 0) {
373: printf("%s: couldn't create dmamap (%d)",
374: sc->sc_dv.dv_xname, rv);
375: goto bail_out;
376: }
377:
378: im->im_tctx = i;
379: SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
380: }
381:
382: /* Initalise the IOP's outbound FIFO. */
383: if (iop_ofifo_init(sc) != 0) {
384: printf("%s: unable to init outbound FIFO\n",
385: sc->sc_dv.dv_xname);
386: goto bail_out;
387: }
388:
389: /* Configure shutdown hook before we start any device activity. */
390: if (iop_sdh == NULL)
391: iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
392:
393: /* Ensure interrupts are enabled at the IOP. */
394: mask = iop_inl(sc, IOP_REG_INTR_MASK);
395: iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
396:
397: if (intrstr != NULL)
398: printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
399: intrstr);
400:
401: #ifdef I2ODEBUG
402: printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
403: sc->sc_dv.dv_xname, sc->sc_maxib,
404: letoh32(sc->sc_status.maxinboundmframes),
405: sc->sc_maxob, letoh32(sc->sc_status.maxoutboundmframes));
406: #endif
407:
408: lockinit(&sc->sc_conflock, PRIBIO, "iopconf", 0, 0);
409:
410: startuphook_establish((void (*)(void *))iop_config_interrupts, sc);
411: return;
412:
413: bail_out:
414: if (state > 4)
415: free(im, M_DEVBUF);
416: if (state > 3)
417: bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
418: if (state > 2)
419: bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
420: if (state > 1)
421: bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
422: if (state > 0)
423: bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
424: }
425:
426: /*
427: * Perform autoconfiguration tasks.
428: */
429: void
430: iop_config_interrupts(struct device *self)
431: {
432: struct iop_softc *sc, *iop;
433: struct i2o_systab_entry *ste;
434: int rv, i, niop;
435:
436: sc = (struct iop_softc *)self;
437: LIST_INIT(&sc->sc_iilist);
438:
439: printf("%s: configuring...\n", sc->sc_dv.dv_xname);
440:
441: if (iop_hrt_get(sc) != 0) {
442: printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
443: return;
444: }
445:
446: /*
447: * Build the system table.
448: */
449: if (iop_systab == NULL) {
450: for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
451: iop = (struct iop_softc *)device_lookup(&iop_cd, i);
452: if (iop == NULL)
453: continue;
454: if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
455: continue;
456: if (iop_status_get(iop, 1) != 0) {
457: printf("%s: unable to retrieve status\n",
458: sc->sc_dv.dv_xname);
459: iop->sc_flags &= ~IOP_HAVESTATUS;
460: continue;
461: }
462: niop++;
463: }
464: if (niop == 0)
465: return;
466:
467: i = sizeof(struct i2o_systab_entry) * (niop - 1) +
468: sizeof(struct i2o_systab);
469: iop_systab_size = i;
470: iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
471: if (!iop_systab)
472: return;
473:
474: bzero(iop_systab, i);
475: iop_systab->numentries = niop;
476: iop_systab->version = I2O_VERSION_11;
477:
478: for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++)
479: {
480: iop = (struct iop_softc *)device_lookup(&iop_cd, i);
481: if (iop == NULL)
482: continue;
483: if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
484: continue;
485:
486: ste->orgid = iop->sc_status.orgid;
487: ste->iopid = iop->sc_dv.dv_unit + 2;
488: ste->segnumber =
489: htole32(letoh32(iop->sc_status.segnumber) & ~4095);
490: ste->iopcaps = iop->sc_status.iopcaps;
491: ste->inboundmsgframesize =
492: iop->sc_status.inboundmframesize;
493: ste->inboundmsgportaddresslow =
494: htole32(iop->sc_memaddr + IOP_REG_IFIFO);
495: ste++;
496: }
497: }
498:
499: /*
500: * Post the system table to the IOP and bring it to the OPERATIONAL
501: * state.
502: */
503: if (iop_systab_set(sc) != 0) {
504: printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
505: return;
506: }
507: if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
508: 30000) != 0) {
509: printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
510: return;
511: }
512:
513: /*
514: * Set up an event handler for this IOP.
515: */
516: sc->sc_eventii.ii_dv = self;
517: sc->sc_eventii.ii_intr = iop_intr_event;
518: sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
519: sc->sc_eventii.ii_tid = I2O_TID_IOP;
520: iop_initiator_register(sc, &sc->sc_eventii);
521:
522: rv = iop_util_eventreg(sc, &sc->sc_eventii,
523: I2O_EVENT_EXEC_RESOURCE_LIMITS |
524: I2O_EVENT_EXEC_CONNECTION_FAIL |
525: I2O_EVENT_EXEC_ADAPTER_FAULT |
526: I2O_EVENT_EXEC_POWER_FAIL |
527: I2O_EVENT_EXEC_RESET_PENDING |
528: I2O_EVENT_EXEC_RESET_IMMINENT |
529: I2O_EVENT_EXEC_HARDWARE_FAIL |
530: I2O_EVENT_EXEC_XCT_CHANGE |
531: I2O_EVENT_EXEC_DDM_AVAILIBILITY |
532: I2O_EVENT_GEN_DEVICE_RESET |
533: I2O_EVENT_GEN_STATE_CHANGE |
534: I2O_EVENT_GEN_GENERAL_WARNING);
535: if (rv != 0) {
536: printf("%s: unable to register for events",
537: sc->sc_dv.dv_xname);
538: return;
539: }
540:
541: #ifdef notyet
542: /* Attempt to match and attach a product-specific extension. */
543: ia.ia_class = I2O_CLASS_ANY;
544: ia.ia_tid = I2O_TID_IOP;
545: config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
546: #endif
547:
548: lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
549: if ((rv = iop_reconfigure(sc, 0)) == -1) {
550: printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
551: return;
552: }
553: lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
554: kthread_create_deferred(iop_create_reconf_thread, sc);
555: }
556:
557: /*
558: * Create the reconfiguration thread. Called after the standard kernel
559: * threads have been created.
560: */
561: void
562: iop_create_reconf_thread(void *cookie)
563: {
564: struct iop_softc *sc;
565: int rv;
566:
567: sc = cookie;
568: sc->sc_flags |= IOP_ONLINE;
569:
570: rv = kthread_create(iop_reconf_thread, sc, &sc->sc_reconf_proc,
571: "%s", sc->sc_dv.dv_xname);
572: if (rv != 0) {
573: printf("%s: unable to create reconfiguration thread (%d)",
574: sc->sc_dv.dv_xname, rv);
575: return;
576: }
577: }
578:
579: /*
580: * Reconfiguration thread; listens for LCT change notification, and
581: * initiates re-configuration if received.
582: */
583: void
584: iop_reconf_thread(void *cookie)
585: {
586: struct iop_softc *sc = cookie;
587: struct i2o_lct lct;
588: u_int32_t chgind;
589: int rv;
590:
591: chgind = sc->sc_chgind + 1;
592:
593: for (;;) {
594: DPRINTF(("%s: async reconfig: requested 0x%08x\n",
595: sc->sc_dv.dv_xname, chgind));
596:
597: rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
598:
599: DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
600: sc->sc_dv.dv_xname, letoh32(lct.changeindicator), rv));
601:
602: if (rv == 0 &&
603: lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
604: iop_reconfigure(sc, letoh32(lct.changeindicator));
605: chgind = sc->sc_chgind + 1;
606: lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
607: }
608:
609: tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
610: }
611: }
612:
613: /*
614: * Reconfigure: find new and removed devices.
615: */
616: int
617: iop_reconfigure(struct iop_softc *sc, u_int chgind)
618: {
619: struct iop_msg *im;
620: struct i2o_hba_bus_scan mf;
621: struct i2o_lct_entry *le;
622: struct iop_initiator *ii, *nextii;
623: int rv, tid, i;
624:
625: /*
626: * If the reconfiguration request isn't the result of LCT change
627: * notification, then be more thorough: ask all bus ports to scan
628: * their busses. Wait up to 5 minutes for each bus port to complete
629: * the request.
630: */
631: if (chgind == 0) {
632: if ((rv = iop_lct_get(sc)) != 0) {
633: DPRINTF(("iop_reconfigure: unable to read LCT\n"));
634: return (rv);
635: }
636:
637: le = sc->sc_lct->entry;
638: for (i = 0; i < sc->sc_nlctent; i++, le++) {
639: if ((letoh16(le->classid) & I2O_CLASS_MASK) !=
640: I2O_CLASS_BUS_ADAPTER_PORT)
641: continue;
642: tid = letoh16(le->localtid) & I2O_CLASS_MASK;
643:
644: im = iop_msg_alloc(sc, NULL, IM_WAIT);
645:
646: mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
647: mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
648: mf.msgictx = IOP_ICTX;
649: mf.msgtctx = im->im_tctx;
650:
651: DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
652: tid));
653:
654: rv = iop_msg_post(sc, im, &mf, 5*60*1000);
655: iop_msg_free(sc, im);
656: #ifdef I2ODEBUG
657: if (rv != 0)
658: printf("%s: bus scan failed, status =%d\n",
659: sc->sc_dv.dv_xname, rv);
660: #endif
661: }
662: } else if (chgind <= sc->sc_chgind) {
663: DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
664: return (0);
665: }
666:
667: /* Re-read the LCT and determine if it has changed. */
668: if ((rv = iop_lct_get(sc)) != 0) {
669: DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
670: return (rv);
671: }
672: DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
673:
674: chgind = letoh32(sc->sc_lct->changeindicator);
675: if (chgind == sc->sc_chgind) {
676: DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
677: return (0);
678: }
679: DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
680: sc->sc_chgind = chgind;
681:
682: if (sc->sc_tidmap != NULL)
683: free(sc->sc_tidmap, M_DEVBUF);
684: sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
685: M_DEVBUF, M_NOWAIT);
686: if (!sc->sc_tidmap) {
687: DPRINTF(("iop_reconfigure: out of memory\n"));
688: return (ENOMEM);
689: }
690: bzero(sc->sc_tidmap, sc->sc_nlctent * sizeof(struct iop_tidmap));
691:
692: /* Allow 1 queued command per device while we're configuring. */
693: iop_adjqparam(sc, 1);
694:
695: /*
696: * Match and attach child devices. We configure high-level devices
697: * first so that any claims will propagate throughout the LCT,
698: * hopefully masking off aliased devices as a result.
699: *
700: * Re-reading the LCT at this point is a little dangerous, but we'll
701: * trust the IOP (and the operator) to behave itself...
702: */
703: iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
704: IC_CONFIGURE | IC_PRIORITY);
705: if ((rv = iop_lct_get(sc)) != 0)
706: DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
707: iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
708: IC_CONFIGURE);
709:
710: for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
711: nextii = LIST_NEXT(ii, ii_list);
712:
713: /* Detach devices that were configured, but are now gone. */
714: for (i = 0; i < sc->sc_nlctent; i++)
715: if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
716: break;
717: if (i == sc->sc_nlctent ||
718: (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
719: config_detach(ii->ii_dv, DETACH_FORCE);
720:
721: /*
722: * Tell initiators that existed before the re-configuration
723: * to re-configure.
724: */
725: if (ii->ii_reconfig == NULL)
726: continue;
727: if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
728: printf("%s: %s failed reconfigure (%d)\n",
729: sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
730: }
731:
732: /* Re-adjust queue parameters and return. */
733: if (sc->sc_nii != 0)
734: iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
735: / sc->sc_nii);
736:
737: return (0);
738: }
739:
740: /*
741: * Configure I2O devices into the system.
742: */
743: void
744: iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
745: {
746: struct iop_attach_args ia;
747: struct iop_initiator *ii;
748: const struct i2o_lct_entry *le;
749: struct device *dv;
750: int i, j, nent;
751: u_int usertid;
752:
753: nent = sc->sc_nlctent;
754: for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
755: sc->sc_tidmap[i].it_tid =
756: letoh16(le->localtid) & I2O_LCT_ENTRY_TID_MASK;
757:
758: /* Ignore the device if it's in use. */
759: usertid = letoh32(le->usertid) & I2O_LCT_ENTRY_TID_MASK;
760: if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
761: continue;
762:
763: ia.ia_class = letoh16(le->classid) & I2O_CLASS_MASK;
764: ia.ia_tid = sc->sc_tidmap[i].it_tid;
765:
766: /* Ignore uninteresting devices. */
767: for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
768: if (iop_class[j].ic_class == ia.ia_class)
769: break;
770: if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
771: (iop_class[j].ic_flags & mask) != maskval)
772: continue;
773:
774: /*
775: * Try to configure the device only if it's not already
776: * configured.
777: */
778: LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
779: if (ia.ia_tid == ii->ii_tid) {
780: sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
781: strlcpy(sc->sc_tidmap[i].it_dvname,
782: ii->ii_dv->dv_xname,
783: sizeof sc->sc_tidmap[i].it_dvname);
784: break;
785: }
786: }
787: if (ii != NULL)
788: continue;
789: dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
790: if (dv != NULL) {
791: sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
792: strlcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname,
793: sizeof sc->sc_tidmap[i].it_dvname);
794: }
795: }
796: }
797:
798: /*
799: * Adjust queue parameters for all child devices.
800: */
801: void
802: iop_adjqparam(struct iop_softc *sc, int mpi)
803: {
804: struct iop_initiator *ii;
805:
806: LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
807: if (ii->ii_adjqparam != NULL)
808: (*ii->ii_adjqparam)(ii->ii_dv, mpi);
809: }
810:
811: void
812: iop_devinfo(int class, char *devinfo, size_t di_len)
813: {
814: #ifdef I2OVERBOSE
815: int i;
816:
817: for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
818: if (class == iop_class[i].ic_class)
819: break;
820:
821: if (i == sizeof(iop_class) / sizeof(iop_class[0]))
822: snprintf(devinfo, di_len, "device (class 0x%x)", class);
823: else
824: strlcpy(devinfo, iop_class[i].ic_caption, di_len);
825: #else
826:
827: snprintf(devinfo, di_len, "device (class 0x%x)", class);
828: #endif
829: }
830:
831: int
832: iop_print(void *aux, const char *pnp)
833: {
834: struct iop_attach_args *ia;
835: char devinfo[256];
836:
837: ia = aux;
838:
839: if (pnp != NULL) {
840: iop_devinfo(ia->ia_class, devinfo, sizeof devinfo);
841: printf("%s at %s", devinfo, pnp);
842: }
843: printf(" tid %d", ia->ia_tid);
844: return (UNCONF);
845: }
846:
847: #ifdef notyet
848: int
849: iop_vendor_print(void *aux, const char *pnp)
850: {
851:
852: if (pnp != NULL)
853: printf("vendor specific extension at %s", pnp);
854: return (UNCONF);
855: }
856: #endif
857:
858: int
859: iop_submatch(struct device *parent, void *vcf, void *aux)
860: {
861: struct cfdata *cf = vcf;
862: struct iop_attach_args *ia;
863:
864: ia = aux;
865:
866: if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
867: return (0);
868:
869: return ((*cf->cf_attach->ca_match)(parent, cf, aux));
870: }
871:
872: /*
873: * Shut down all configured IOPs.
874: */
875: void
876: iop_shutdown(void *junk)
877: {
878: struct iop_softc *sc;
879: int i;
880:
881: printf("shutting down iop devices...");
882:
883: for (i = 0; i < iop_cd.cd_ndevs; i++) {
884: if (!(sc = (struct iop_softc *)device_lookup(&iop_cd, i)))
885: continue;
886: if ((sc->sc_flags & IOP_ONLINE) == 0)
887: continue;
888:
889: iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
890: 0, 5000);
891:
892: if (letoh16(sc->sc_status.orgid) != I2O_ORG_AMI) {
893: /*
894: * Some AMI firmware revisions will go to sleep and
895: * never come back after this.
896: */
897: iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
898: IOP_ICTX, 0, 1000);
899: }
900: }
901:
902: /* Wait. Some boards could still be flushing, stupidly enough. */
903: delay(5000*1000);
904: printf(" done.\n");
905: }
906:
907: /*
908: * Retrieve IOP status.
909: */
910: int
911: iop_status_get(struct iop_softc *sc, int nosleep)
912: {
913: struct i2o_exec_status_get mf;
914: paddr_t pa = sc->sc_scr_seg->ds_addr;
915: struct i2o_status *st = (struct i2o_status *)sc->sc_scr;
916: int rv;
917:
918: mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
919: mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
920: mf.reserved[0] = 0;
921: mf.reserved[1] = 0;
922: mf.reserved[2] = 0;
923: mf.reserved[3] = 0;
924: mf.addrlow = pa & ~(u_int32_t)0;
925: mf.addrhigh = sizeof pa > sizeof mf.addrlow ? pa >> 32 : 0;
926: mf.length = sizeof(*st);
927:
928: bzero(st, sizeof(*st));
929: bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
930: BUS_DMASYNC_PREREAD);
931:
932: if ((rv = iop_post(sc, (u_int32_t *)&mf)))
933: return (rv);
934:
935: /* XXX */
936: POLL(2500,
937: (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
938: sizeof(*st), BUS_DMASYNC_POSTREAD), st->syncbyte == 0xff));
939:
940: if (st->syncbyte != 0xff)
941: return (EIO);
942:
943: bcopy(st, &sc->sc_status, sizeof(sc->sc_status));
944: return (0);
945: }
946:
947: /*
948: * Initalize and populate the IOP's outbound FIFO.
949: */
950: int
951: iop_ofifo_init(struct iop_softc *sc)
952: {
953: bus_addr_t addr;
954: bus_dma_segment_t seg;
955: struct i2o_exec_outbound_init *mf;
956: u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
957: u_int32_t *sw = (u_int32_t *)sc->sc_scr;
958: int i, rseg, rv;
959:
960: mf = (struct i2o_exec_outbound_init *)mb;
961: mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
962: mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
963: mf->msgictx = IOP_ICTX;
964: mf->msgtctx = 0;
965: mf->pagesize = PAGE_SIZE;
966: mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
967: mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
968: I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
969: mb[sizeof(*mf) / sizeof(u_int32_t) + 1] = sc->sc_scr_seg->ds_addr;
970: mb[0] += 2 << 16;
971:
972: *sw = 0;
973: bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
974: BUS_DMASYNC_PREREAD);
975:
976: /*
977: * The I2O spec says that there are two SGLs: one for the status
978: * word, and one for a list of discarded MFAs. It continues to say
979: * that if you don't want to get the list of MFAs, an IGNORE SGL is
980: * necessary; this isn't the case (and is in fact a bad thing).
981: */
982: if ((rv = iop_post(sc, mb)))
983: return (rv);
984:
985: /* XXX */
986: POLL(5000,
987: (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
988: BUS_DMASYNC_POSTREAD),
989: *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
990: if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
991: printf("%s: outbound FIFO init failed (%d)\n",
992: sc->sc_dv.dv_xname, letoh32(*sw));
993: return (EIO);
994: }
995:
996: /* Allocate DMA safe memory for the reply frames. */
997: if (sc->sc_rep_phys == 0) {
998: sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
999:
1000: rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1001: 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1002: if (rv != 0) {
1003: printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1004: rv);
1005: return (rv);
1006: }
1007:
1008: rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1009: &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1010: if (rv != 0) {
1011: printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1012: return (rv);
1013: }
1014:
1015: rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1016: sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1017: if (rv != 0) {
1018: printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1019: rv);
1020: return (rv);
1021: }
1022:
1023: rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1024: sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1025: if (rv != 0) {
1026: printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1027: return (rv);
1028: }
1029:
1030: sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1031: }
1032:
1033: /* Populate the outbound FIFO. */
1034: for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1035: iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1036: addr += IOP_MAX_MSG_SIZE;
1037: }
1038:
1039: return (0);
1040: }
1041:
1042: /*
1043: * Read the specified number of bytes from the IOP's hardware resource table.
1044: */
1045: int
1046: iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, size_t size)
1047: {
1048: struct iop_msg *im;
1049: struct i2o_exec_hrt_get *mf;
1050: u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1051: int rv;
1052:
1053: im = iop_msg_alloc(sc, NULL, IM_WAIT);
1054: mf = (struct i2o_exec_hrt_get *)mb;
1055: mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1056: mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1057: mf->msgictx = IOP_ICTX;
1058: mf->msgtctx = im->im_tctx;
1059:
1060: iop_msg_map(sc, im, mb, hrt, size, 0);
1061: rv = iop_msg_post(sc, im, mb, 30000);
1062: iop_msg_unmap(sc, im);
1063: iop_msg_free(sc, im);
1064: return (rv);
1065: }
1066:
1067: /*
1068: * Read the IOP's hardware resource table.
1069: */
1070: int
1071: iop_hrt_get(struct iop_softc *sc)
1072: {
1073: struct i2o_hrt hrthdr, *hrt;
1074: size_t size;
1075: int rv;
1076:
1077: rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1078: if (rv != 0)
1079: return (rv);
1080:
1081: DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1082: letoh16(hrthdr.numentries)));
1083:
1084: size = sizeof(struct i2o_hrt) +
1085: (letoh16(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1086: hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1087: if (!hrt)
1088: return (ENOMEM);
1089:
1090: if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1091: free(hrt, M_DEVBUF);
1092: return (rv);
1093: }
1094:
1095: if (sc->sc_hrt != NULL)
1096: free(sc->sc_hrt, M_DEVBUF);
1097: sc->sc_hrt = hrt;
1098: return (0);
1099: }
1100:
1101: /*
1102: * Request the specified number of bytes from the IOP's logical
1103: * configuration table. If a change indicator is specified, this
1104: * is a verbatim notification request, so the caller is prepared
1105: * to wait indefinitely.
1106: */
1107: int
1108: iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, size_t size,
1109: u_int32_t chgind)
1110: {
1111: struct iop_msg *im;
1112: struct i2o_exec_lct_notify *mf;
1113: int rv;
1114: u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1115:
1116: im = iop_msg_alloc(sc, NULL, IM_WAIT);
1117: memset(lct, 0, size);
1118:
1119: mf = (struct i2o_exec_lct_notify *)mb;
1120: mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1121: mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1122: mf->msgictx = IOP_ICTX;
1123: mf->msgtctx = im->im_tctx;
1124: mf->classid = I2O_CLASS_ANY;
1125: mf->changeindicator = chgind;
1126:
1127: #ifdef I2ODEBUG
1128: printf("iop_lct_get0: reading LCT");
1129: if (chgind != 0)
1130: printf(" (async)");
1131: printf("\n");
1132: #endif
1133:
1134: iop_msg_map(sc, im, mb, lct, size, 0);
1135: rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1136: iop_msg_unmap(sc, im);
1137: iop_msg_free(sc, im);
1138: return (rv);
1139: }
1140:
1141: /*
1142: * Read the IOP's logical configuration table.
1143: */
1144: int
1145: iop_lct_get(struct iop_softc *sc)
1146: {
1147: size_t esize, size;
1148: int rv;
1149: struct i2o_lct *lct;
1150:
1151: esize = letoh32(sc->sc_status.expectedlctsize);
1152: lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1153: if (lct == NULL)
1154: return (ENOMEM);
1155:
1156: if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1157: free(lct, M_DEVBUF);
1158: return (rv);
1159: }
1160:
1161: size = letoh16(lct->tablesize) << 2;
1162: if (esize != size) {
1163: free(lct, M_DEVBUF);
1164: lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1165: if (lct == NULL)
1166: return (ENOMEM);
1167:
1168: if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1169: free(lct, M_DEVBUF);
1170: return (rv);
1171: }
1172: }
1173:
1174: /* Swap in the new LCT. */
1175: if (sc->sc_lct != NULL)
1176: free(sc->sc_lct, M_DEVBUF);
1177: sc->sc_lct = lct;
1178: sc->sc_nlctent = ((letoh16(sc->sc_lct->tablesize) << 2) -
1179: sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1180: sizeof(struct i2o_lct_entry);
1181: return (0);
1182: }
1183:
1184: /*
1185: * Request the specified parameter group from the target. If an initiator
1186: * is specified (a) don't wait for the operation to complete, but instead
1187: * let the initiator's interrupt handler deal with the reply and (b) place a
1188: * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1189: */
1190: int
1191: iop_param_op(struct iop_softc *sc, int tid, struct iop_initiator *ii,
1192: int write, int group, void *buf, size_t size)
1193: {
1194: struct iop_msg *im;
1195: struct i2o_util_params_op *mf;
1196: struct i2o_reply *rf;
1197: int rv, func, op;
1198: struct iop_pgop *pgop;
1199: u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1200:
1201: im = iop_msg_alloc(sc, ii, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1202: if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1203: iop_msg_free(sc, im);
1204: return (ENOMEM);
1205: }
1206: if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1207: iop_msg_free(sc, im);
1208: free(pgop, M_DEVBUF);
1209: return (ENOMEM);
1210: }
1211: im->im_dvcontext = pgop;
1212: im->im_rb = rf;
1213:
1214: if (write) {
1215: func = I2O_UTIL_PARAMS_SET;
1216: op = I2O_PARAMS_OP_FIELD_SET;
1217: } else {
1218: func = I2O_UTIL_PARAMS_GET;
1219: op = I2O_PARAMS_OP_FIELD_GET;
1220: }
1221:
1222: mf = (struct i2o_util_params_op *)mb;
1223: mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1224: mf->msgfunc = I2O_MSGFUNC(tid, func);
1225: mf->msgictx = IOP_ICTX;
1226: mf->msgtctx = im->im_tctx;
1227: mf->flags = 0;
1228:
1229: pgop->olh.count = htole16(1);
1230: pgop->olh.reserved = htole16(0);
1231: pgop->oat.operation = htole16(op);
1232: pgop->oat.fieldcount = htole16(0xffff);
1233: pgop->oat.group = htole16(group);
1234:
1235: memset(buf, 0, size);
1236: iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1);
1237: iop_msg_map(sc, im, mb, buf, size, write);
1238: rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1239:
1240: /* Detect errors; let partial transfers to count as success. */
1241: if (ii == NULL && rv == 0) {
1242: if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1243: rf->detail == htole16(I2O_DSC_UNKNOWN_ERROR))
1244: rv = 0;
1245: else
1246: rv = (rf->reqstatus != 0 ? EIO : 0);
1247: }
1248:
1249: if (ii == NULL || rv != 0) {
1250: iop_msg_unmap(sc, im);
1251: iop_msg_free(sc, im);
1252: free(pgop, M_DEVBUF);
1253: free(rf, M_DEVBUF);
1254: }
1255:
1256: return (rv);
1257: }
1258:
1259: /*
1260: * Execute a simple command (no parameters).
1261: */
1262: int
1263: iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1264: int async, int timo)
1265: {
1266: struct iop_msg *im;
1267: struct i2o_msg mf;
1268: int rv, fl;
1269:
1270: fl = (async != 0 ? IM_WAIT : IM_POLL);
1271: im = iop_msg_alloc(sc, NULL, fl);
1272:
1273: mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1274: mf.msgfunc = I2O_MSGFUNC(tid, function);
1275: mf.msgictx = ictx;
1276: mf.msgtctx = im->im_tctx;
1277:
1278: rv = iop_msg_post(sc, im, &mf, timo);
1279: iop_msg_free(sc, im);
1280: return (rv);
1281: }
1282:
1283: /*
1284: * Post the system table to the IOP.
1285: */
1286: int
1287: iop_systab_set(struct iop_softc *sc)
1288: {
1289: struct i2o_exec_sys_tab_set *mf;
1290: struct iop_msg *im;
1291: bus_space_handle_t bsh;
1292: bus_addr_t boo;
1293: u_int32_t mema[2], ioa[2];
1294: int rv;
1295: u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1296:
1297: im = iop_msg_alloc(sc, NULL, IM_WAIT);
1298:
1299: mf = (struct i2o_exec_sys_tab_set *)mb;
1300: mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1301: mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1302: mf->msgictx = IOP_ICTX;
1303: mf->msgtctx = im->im_tctx;
1304: mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1305: mf->segnumber = 0;
1306:
1307: mema[1] = sc->sc_status.desiredprivmemsize;
1308: ioa[1] = sc->sc_status.desiredpriviosize;
1309:
1310: if (mema[1] != 0) {
1311: /*
1312: * XXX This will waste virtual memory. We need a flag to tell
1313: * bus_space_alloc to just reserve, not actually map the area.
1314: */
1315: rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1316: letoh32(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1317: mema[0] = htole32(boo);
1318: if (rv != 0) {
1319: printf("%s: can't alloc priv mem space, err = %d\n",
1320: sc->sc_dv.dv_xname, rv);
1321: mema[0] = 0;
1322: mema[1] = 0;
1323: }
1324: }
1325:
1326: if (ioa[1] != 0) {
1327: /*
1328: * XXX This will potentially waste virtual memory. We
1329: * need a flag to tell bus_space_alloc to just
1330: * reserve, not actually map the area.
1331: */
1332: rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1333: letoh32(ioa[1]), 0, 0, 0, &boo, &bsh);
1334: ioa[0] = htole32(boo);
1335: if (rv != 0) {
1336: printf("%s: can't alloc priv i/o space, err = %d\n",
1337: sc->sc_dv.dv_xname, rv);
1338: ioa[0] = 0;
1339: ioa[1] = 0;
1340: }
1341: }
1342:
1343: iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1);
1344: iop_msg_map(sc, im, mb, mema, sizeof(mema), 1);
1345: iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1);
1346: rv = iop_msg_post(sc, im, mb, 5000);
1347: iop_msg_unmap(sc, im);
1348: iop_msg_free(sc, im);
1349: return (rv);
1350: }
1351:
1352: /*
1353: * Reset the IOP. Must be called with interrupts disabled.
1354: */
1355: int
1356: iop_reset(struct iop_softc *sc)
1357: {
1358: struct i2o_exec_iop_reset mf;
1359: paddr_t pa = sc->sc_scr_seg->ds_addr;
1360: u_int32_t *sw = (u_int32_t *)sc->sc_scr;
1361: u_int32_t mfa;
1362: int rv = 0;
1363:
1364: mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1365: mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1366: mf.reserved[0] = 0;
1367: mf.reserved[1] = 0;
1368: mf.reserved[2] = 0;
1369: mf.reserved[3] = 0;
1370: mf.statuslow = pa & ~(u_int32_t)0;
1371: mf.statushigh = sizeof pa > sizeof mf.statuslow ? pa >> 32 : 0;
1372:
1373: *sw = htole32(0);
1374: bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1375: BUS_DMASYNC_PREREAD);
1376:
1377: if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1378: return (rv);
1379:
1380: /* XXX */
1381: POLL(2500,
1382: (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1383: BUS_DMASYNC_POSTREAD), *sw != htole32(0)));
1384: if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1385: printf("%s: reset rejected, status 0x%x\n",
1386: sc->sc_dv.dv_xname, letoh32(*sw));
1387: return (EIO);
1388: }
1389:
1390: /*
1391: * IOP is now in the INIT state. Wait no more than 10 seconds for
1392: * the inbound queue to become responsive.
1393: */
1394: POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1395: if (mfa == IOP_MFA_EMPTY) {
1396: printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1397: return (EIO);
1398: }
1399:
1400: iop_release_mfa(sc, mfa);
1401:
1402: return (0);
1403: }
1404:
1405: /*
1406: * Register a new initiator. Must be called with the configuration lock
1407: * held.
1408: */
1409: void
1410: iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1411: {
1412: static int ictxgen;
1413: int s;
1414:
1415: /* 0 is reserved (by us) for system messages. */
1416: ii->ii_ictx = ++ictxgen;
1417:
1418: /*
1419: * `Utility initiators' don't make it onto the per-IOP initiator list
1420: * (which is used only for configuration), but do get one slot on
1421: * the inbound queue.
1422: */
1423: if ((ii->ii_flags & II_UTILITY) == 0) {
1424: LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1425: sc->sc_nii++;
1426: } else
1427: sc->sc_nuii++;
1428:
1429: s = splbio();
1430: LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1431: splx(s);
1432: }
1433:
1434: /*
1435: * Unregister an initiator. Must be called with the configuration lock
1436: * held.
1437: */
1438: void
1439: iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1440: {
1441: int s;
1442:
1443: if ((ii->ii_flags & II_UTILITY) == 0) {
1444: LIST_REMOVE(ii, ii_list);
1445: sc->sc_nii--;
1446: } else
1447: sc->sc_nuii--;
1448:
1449: s = splbio();
1450: LIST_REMOVE(ii, ii_hash);
1451: splx(s);
1452: }
1453:
1454: /*
1455: * Handle a reply frame from the IOP.
1456: */
1457: int
1458: iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1459: {
1460: struct iop_msg *im;
1461: struct i2o_reply *rb;
1462: struct i2o_fault_notify *fn;
1463: struct iop_initiator *ii;
1464: u_int off, ictx, tctx, status, size;
1465:
1466: off = (int)(rmfa - sc->sc_rep_phys);
1467: rb = (struct i2o_reply *)(sc->sc_rep + off);
1468:
1469: /* Perform reply queue DMA synchronisation. XXX This is rubbish. */
1470: bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1471: sc->sc_rep_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1472: if (--sc->sc_curib != 0)
1473: bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1474: sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1475:
1476: #ifdef I2ODEBUG
1477: if ((letoh32(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1478: panic("iop_handle_reply: 64-bit reply");
1479: #endif
1480: /*
1481: * Find the initiator.
1482: */
1483: ictx = letoh32(rb->msgictx);
1484: if (ictx == IOP_ICTX)
1485: ii = NULL;
1486: else {
1487: ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1488: for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1489: if (ii->ii_ictx == ictx)
1490: break;
1491: if (ii == NULL) {
1492: #ifdef I2ODEBUG
1493: iop_reply_print(sc, rb);
1494: #endif
1495: printf("%s: WARNING: bad ictx returned (%x)\n",
1496: sc->sc_dv.dv_xname, ictx);
1497: return (-1);
1498: }
1499: }
1500:
1501: /*
1502: * If we received a transport failure notice, we've got to dig the
1503: * transaction context (if any) out of the original message frame,
1504: * and then release the original MFA back to the inbound FIFO.
1505: */
1506: if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1507: status = I2O_STATUS_SUCCESS;
1508:
1509: fn = (struct i2o_fault_notify *)rb;
1510: tctx = iop_inl(sc, fn->lowmfa + 12); /* XXX */
1511: iop_release_mfa(sc, fn->lowmfa);
1512: iop_tfn_print(sc, fn);
1513: } else {
1514: status = rb->reqstatus;
1515: tctx = letoh32(rb->msgtctx);
1516: }
1517:
1518: if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1519: /*
1520: * This initiator tracks state using message wrappers.
1521: *
1522: * Find the originating message wrapper, and if requested
1523: * notify the initiator.
1524: */
1525: im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1526: if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1527: (im->im_flags & IM_ALLOCED) == 0 ||
1528: tctx != im->im_tctx) {
1529: printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1530: sc->sc_dv.dv_xname, tctx, im);
1531: if (im != NULL)
1532: printf("%s: flags=0x%08x tctx=0x%08x\n",
1533: sc->sc_dv.dv_xname, im->im_flags,
1534: im->im_tctx);
1535: #ifdef I2ODEBUG
1536: if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1537: iop_reply_print(sc, rb);
1538: #endif
1539: return (-1);
1540: }
1541:
1542: if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1543: im->im_flags |= IM_FAIL;
1544:
1545: #ifdef I2ODEBUG
1546: if ((im->im_flags & IM_REPLIED) != 0)
1547: panic("%s: dup reply", sc->sc_dv.dv_xname);
1548: #endif
1549: im->im_flags |= IM_REPLIED;
1550:
1551: #ifdef I2ODEBUG
1552: if (status != I2O_STATUS_SUCCESS)
1553: iop_reply_print(sc, rb);
1554: #endif
1555: im->im_reqstatus = status;
1556:
1557: /* Copy the reply frame, if requested. */
1558: if (im->im_rb != NULL) {
1559: size = (letoh32(rb->msgflags) >> 14) & ~3;
1560: #ifdef I2ODEBUG
1561: if (size > IOP_MAX_MSG_SIZE)
1562: panic("iop_handle_reply: reply too large");
1563: #endif
1564: memcpy(im->im_rb, rb, size);
1565: }
1566:
1567: /* Notify the initiator. */
1568: if ((im->im_flags & IM_WAIT) != 0)
1569: wakeup(im);
1570: else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1571: (*ii->ii_intr)(ii->ii_dv, im, rb);
1572: } else {
1573: /*
1574: * This initiator discards message wrappers.
1575: *
1576: * Simply pass the reply frame to the initiator.
1577: */
1578: (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1579: }
1580:
1581: return (status);
1582: }
1583:
1584: /*
1585: * Handle an interrupt from the IOP.
1586: */
1587: int
1588: iop_intr(void *arg)
1589: {
1590: struct iop_softc *sc;
1591: u_int32_t rmfa;
1592:
1593: sc = arg;
1594:
1595: if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1596: return (0);
1597:
1598: for (;;) {
1599: /* Double read to account for IOP bug. */
1600: if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1601: rmfa = iop_inl(sc, IOP_REG_OFIFO);
1602: if (rmfa == IOP_MFA_EMPTY)
1603: break;
1604: }
1605: iop_handle_reply(sc, rmfa);
1606: iop_outl(sc, IOP_REG_OFIFO, rmfa);
1607: }
1608:
1609: return (1);
1610: }
1611:
1612: /*
1613: * Handle an event signalled by the executive.
1614: */
1615: void
1616: iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1617: {
1618: struct i2o_util_event_register_reply *rb;
1619: struct iop_softc *sc;
1620: u_int event;
1621:
1622: sc = (struct iop_softc *)dv;
1623: rb = reply;
1624:
1625: if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1626: return;
1627:
1628: event = letoh32(rb->event);
1629: printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1630: }
1631:
1632: /*
1633: * Allocate a message wrapper.
1634: */
1635: struct iop_msg *
1636: iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii, int flags)
1637: {
1638: struct iop_msg *im;
1639: static u_int tctxgen;
1640: int s, i;
1641:
1642: #ifdef I2ODEBUG
1643: if ((flags & IM_SYSMASK) != 0)
1644: panic("iop_msg_alloc: system flags specified");
1645: #endif
1646:
1647: s = splbio(); /* XXX */
1648: im = SLIST_FIRST(&sc->sc_im_freelist);
1649: #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1650: if (im == NULL)
1651: panic("iop_msg_alloc: no free wrappers");
1652: #endif
1653: SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1654: splx(s);
1655:
1656: if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0)
1657: flags |= IM_DISCARD;
1658:
1659: im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1660: tctxgen += (1 << IOP_TCTX_SHIFT);
1661: im->im_flags = flags | IM_ALLOCED;
1662: im->im_rb = NULL;
1663: i = 0;
1664: do {
1665: im->im_xfer[i++].ix_size = 0;
1666: } while (i < IOP_MAX_MSG_XFERS);
1667:
1668: return (im);
1669: }
1670:
1671: /*
1672: * Free a message wrapper.
1673: */
1674: void
1675: iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1676: {
1677: int s;
1678:
1679: #ifdef I2ODEBUG
1680: if ((im->im_flags & IM_ALLOCED) == 0)
1681: panic("iop_msg_free: wrapper not allocated");
1682: #endif
1683:
1684: im->im_flags = 0;
1685: s = splbio();
1686: SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1687: splx(s);
1688: }
1689:
1690: /*
1691: * Map a data transfer. Write a scatter-gather list into the message frame.
1692: */
1693: int
1694: iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1695: void *xferaddr, size_t xfersize, int out)
1696: {
1697: bus_dmamap_t dm;
1698: bus_dma_segment_t *ds;
1699: struct iop_xfer *ix;
1700: u_int rv, i, nsegs, flg, off, xn;
1701: u_int32_t *p;
1702:
1703: for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1704: if (ix->ix_size == 0)
1705: break;
1706:
1707: #ifdef I2ODEBUG
1708: if (xfersize == 0)
1709: panic("iop_msg_map: null transfer");
1710: if (xfersize > IOP_MAX_XFER)
1711: panic("iop_msg_map: transfer too large");
1712: if (xn == IOP_MAX_MSG_XFERS)
1713: panic("iop_msg_map: too many xfers");
1714: #endif
1715:
1716: /*
1717: * Only the first DMA map is static.
1718: */
1719: if (xn != 0) {
1720: rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1721: IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1722: BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1723: if (rv != 0)
1724: return (rv);
1725: }
1726:
1727: dm = ix->ix_map;
1728: rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 0);
1729: if (rv != 0)
1730: goto bad;
1731:
1732: /*
1733: * How many SIMPLE SG elements can we fit in this message?
1734: */
1735: off = mb[0] >> 16;
1736: p = mb + off;
1737: nsegs = ((IOP_MAX_MSG_SIZE / sizeof *mb) - off) >> 1;
1738:
1739: if (dm->dm_nsegs > nsegs) {
1740: bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1741: rv = EFBIG;
1742: DPRINTF(("iop_msg_map: too many segs\n"));
1743: goto bad;
1744: }
1745:
1746: nsegs = dm->dm_nsegs;
1747: xfersize = 0;
1748:
1749: /*
1750: * Write out the SG list.
1751: */
1752: if (out)
1753: flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1754: else
1755: flg = I2O_SGL_SIMPLE;
1756:
1757: for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1758: p[0] = (u_int32_t)ds->ds_len | flg;
1759: p[1] = (u_int32_t)ds->ds_addr;
1760: xfersize += ds->ds_len;
1761: }
1762:
1763: p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1764: p[1] = (u_int32_t)ds->ds_addr;
1765: xfersize += ds->ds_len;
1766:
1767: /* Fix up the transfer record, and sync the map. */
1768: ix->ix_flags = (out ? IX_OUT : IX_IN);
1769: ix->ix_size = xfersize;
1770: bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1771: out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1772:
1773: /*
1774: * If this is the first xfer we've mapped for this message, adjust
1775: * the SGL offset field in the message header.
1776: */
1777: if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1778: mb[0] += (mb[0] >> 12) & 0xf0;
1779: im->im_flags |= IM_SGLOFFADJ;
1780: }
1781: mb[0] += (nsegs << 17);
1782: return (0);
1783:
1784: bad:
1785: if (xn != 0)
1786: bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1787: return (rv);
1788: }
1789:
1790: /*
1791: * Map a block I/O data transfer (different in that there's only one per
1792: * message maximum, and PAGE addressing may be used). Write a scatter
1793: * gather list into the message frame.
1794: */
1795: int
1796: iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1797: void *xferaddr, int xfersize, int out)
1798: {
1799: bus_dma_segment_t *ds;
1800: bus_dmamap_t dm;
1801: struct iop_xfer *ix;
1802: u_int rv, i, nsegs, off, slen, tlen, flg;
1803: paddr_t saddr, eaddr;
1804: u_int32_t *p;
1805:
1806: #ifdef I2ODEBUG
1807: if (xfersize == 0)
1808: panic("iop_msg_map_bio: null transfer");
1809: if (xfersize > IOP_MAX_XFER)
1810: panic("iop_msg_map_bio: transfer too large");
1811: if ((im->im_flags & IM_SGLOFFADJ) != 0)
1812: panic("iop_msg_map_bio: SGLOFFADJ");
1813: #endif
1814:
1815: ix = im->im_xfer;
1816: dm = ix->ix_map;
1817: rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 0);
1818: if (rv != 0)
1819: return (rv);
1820:
1821: off = mb[0] >> 16;
1822: nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1823:
1824: /*
1825: * If the transfer is highly fragmented and won't fit using SIMPLE
1826: * elements, use PAGE_LIST elements instead. SIMPLE elements are
1827: * potentially more efficient, both for us and the IOP.
1828: */
1829: if (dm->dm_nsegs > nsegs) {
1830: nsegs = 1;
1831: p = mb + off + 1;
1832:
1833: /* XXX This should be done with a bus_space flag. */
1834: for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1835: slen = ds->ds_len;
1836: saddr = ds->ds_addr;
1837:
1838: while (slen > 0) {
1839: eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1840: tlen = min(eaddr - saddr, slen);
1841: slen -= tlen;
1842: *p++ = letoh32(saddr);
1843: saddr = eaddr;
1844: nsegs++;
1845: }
1846: }
1847:
1848: mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1849: I2O_SGL_END;
1850: if (out)
1851: mb[off] |= I2O_SGL_DATA_OUT;
1852: } else {
1853: p = mb + off;
1854: nsegs = dm->dm_nsegs;
1855:
1856: if (out)
1857: flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1858: else
1859: flg = I2O_SGL_SIMPLE;
1860:
1861: for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1862: p[0] = (u_int32_t)ds->ds_len | flg;
1863: p[1] = (u_int32_t)ds->ds_addr;
1864: }
1865:
1866: p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
1867: I2O_SGL_END;
1868: p[1] = (u_int32_t)ds->ds_addr;
1869: nsegs <<= 1;
1870: }
1871:
1872: /* Fix up the transfer record, and sync the map. */
1873: ix->ix_flags = (out ? IX_OUT : IX_IN);
1874: ix->ix_size = xfersize;
1875: bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0,
1876: ix->ix_map->dm_mapsize,
1877: out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1878:
1879: /*
1880: * Adjust the SGL offset and total message size fields. We don't
1881: * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
1882: */
1883: mb[0] += ((off << 4) + (nsegs << 16));
1884: return (0);
1885: }
1886:
1887: /*
1888: * Unmap all data transfers associated with a message wrapper.
1889: */
1890: void
1891: iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1892: {
1893: struct iop_xfer *ix;
1894: int i;
1895:
1896: #ifdef I2ODEBUG
1897: if (im->im_xfer[0].ix_size == 0)
1898: panic("iop_msg_unmap: no transfers mapped");
1899: #endif
1900:
1901: for (ix = im->im_xfer, i = 0;;) {
1902: bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1903: ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1904: BUS_DMASYNC_POSTREAD);
1905: bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1906:
1907: /* Only the first DMA map is static. */
1908: if (i != 0)
1909: bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1910: if ((++ix)->ix_size == 0)
1911: break;
1912: if (++i >= IOP_MAX_MSG_XFERS)
1913: break;
1914: }
1915: }
1916:
1917: /*
1918: * Post a message frame to the IOP's inbound queue.
1919: */
1920: int
1921: iop_post(struct iop_softc *sc, u_int32_t *mb)
1922: {
1923: u_int32_t mfa;
1924: int s;
1925: size_t size = mb[0] >> 14 & ~3;
1926:
1927: /* ZZZ */
1928: if (size > IOP_MAX_MSG_SIZE)
1929: panic("iop_post: frame too large");
1930:
1931: #ifdef I2ODEBUG
1932: {
1933: int i;
1934:
1935: printf("\niop_post\n");
1936: for (i = 0; i < size / sizeof *mb; i++)
1937: printf("%4d %08x\n", i, mb[i]);
1938: }
1939: #endif
1940:
1941: s = splbio(); /* XXX */
1942:
1943: /* Allocate a slot with the IOP. */
1944: if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1945: if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
1946: splx(s);
1947: printf("%s: mfa not forthcoming\n",
1948: sc->sc_dv.dv_xname);
1949: return (EAGAIN);
1950: }
1951:
1952: #ifdef I2ODEBUG
1953: printf("mfa = %u\n", mfa);
1954: #endif
1955:
1956: /* Copy out the message frame. */
1957: bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb,
1958: size / sizeof *mb);
1959: bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, size,
1960: BUS_SPACE_BARRIER_WRITE);
1961:
1962: /* Post the MFA back to the IOP. */
1963: iop_outl(sc, IOP_REG_IFIFO, mfa);
1964:
1965: splx(s);
1966: return (0);
1967: }
1968:
1969: /*
1970: * Post a message to the IOP and deal with completion.
1971: */
1972: int
1973: iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
1974: {
1975: u_int32_t *mb = xmb;
1976: int rv, s;
1977: size_t size = mb[0] >> 14 & 3;
1978:
1979: /* Terminate the scatter/gather list chain. */
1980: if ((im->im_flags & IM_SGLOFFADJ) != 0)
1981: mb[size - 2] |= I2O_SGL_END;
1982:
1983: /* Perform reply buffer DMA synchronisation. */
1984: if (sc->sc_curib++ == 0)
1985: bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1986: sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1987:
1988: if ((rv = iop_post(sc, mb)) != 0)
1989: return (rv);
1990:
1991: if ((im->im_flags & IM_DISCARD) != 0)
1992: iop_msg_free(sc, im);
1993: else if ((im->im_flags & IM_POLL) != 0 && timo == 0) {
1994: /* XXX For ofifo_init(). */
1995: rv = 0;
1996: } else if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
1997: if ((im->im_flags & IM_POLL) != 0)
1998: iop_msg_poll(sc, im, timo);
1999: else
2000: iop_msg_wait(sc, im, timo);
2001:
2002: s = splbio();
2003: if ((im->im_flags & IM_REPLIED) != 0) {
2004: if ((im->im_flags & IM_NOSTATUS) != 0)
2005: rv = 0;
2006: else if ((im->im_flags & IM_FAIL) != 0)
2007: rv = ENXIO;
2008: else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2009: rv = EIO;
2010: else
2011: rv = 0;
2012: } else
2013: rv = EBUSY;
2014: splx(s);
2015: } else
2016: rv = 0;
2017:
2018: return (rv);
2019: }
2020:
2021: /*
2022: * Spin until the specified message is replied to.
2023: */
2024: void
2025: iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2026: {
2027: u_int32_t rmfa;
2028: int s, status;
2029:
2030: s = splbio(); /* XXX */
2031:
2032: /* Wait for completion. */
2033: for (timo *= 10; timo != 0; timo--) {
2034: if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2035: /* Double read to account for IOP bug. */
2036: rmfa = iop_inl(sc, IOP_REG_OFIFO);
2037: if (rmfa == IOP_MFA_EMPTY)
2038: rmfa = iop_inl(sc, IOP_REG_OFIFO);
2039: if (rmfa != IOP_MFA_EMPTY) {
2040: status = iop_handle_reply(sc, rmfa);
2041:
2042: /*
2043: * Return the reply frame to the IOP's
2044: * outbound FIFO.
2045: */
2046: iop_outl(sc, IOP_REG_OFIFO, rmfa);
2047: }
2048: }
2049: if ((im->im_flags & IM_REPLIED) != 0)
2050: break;
2051: DELAY(100);
2052: }
2053:
2054: if (timo == 0) {
2055: #ifdef I2ODEBUG
2056: printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2057: if (iop_status_get(sc, 1) != 0)
2058: printf("iop_msg_poll: unable to retrieve status\n");
2059: else
2060: printf("iop_msg_poll: IOP state = %d\n",
2061: (letoh32(sc->sc_status.segnumber) >> 16) & 0xff);
2062: #endif
2063: }
2064:
2065: splx(s);
2066: }
2067:
2068: /*
2069: * Sleep until the specified message is replied to.
2070: */
2071: void
2072: iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2073: {
2074: int s, rv;
2075:
2076: s = splbio();
2077: if ((im->im_flags & IM_REPLIED) != 0) {
2078: splx(s);
2079: return;
2080: }
2081: rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2082: splx(s);
2083:
2084: #ifdef I2ODEBUG
2085: if (rv != 0) {
2086: printf("iop_msg_wait: tsleep() == %d\n", rv);
2087: if (iop_status_get(sc, 0) != 0)
2088: printf("iop_msg_wait: unable to retrieve status\n");
2089: else
2090: printf("iop_msg_wait: IOP state = %d\n",
2091: (letoh32(sc->sc_status.segnumber) >> 16) & 0xff);
2092: }
2093: #endif
2094: }
2095:
2096: /*
2097: * Release an unused message frame back to the IOP's inbound fifo.
2098: */
2099: void
2100: iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2101: {
2102:
2103: /* Use the frame to issue a no-op. */
2104: iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2105: iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2106: iop_outl(sc, mfa + 8, 0);
2107: iop_outl(sc, mfa + 12, 0);
2108:
2109: iop_outl(sc, IOP_REG_IFIFO, mfa);
2110: }
2111:
2112: #ifdef I2ODEBUG
2113: /*
2114: * Dump a reply frame header.
2115: */
2116: void
2117: iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2118: {
2119: u_int function, detail;
2120: #ifdef I2OVERBOSE
2121: const char *statusstr;
2122: #endif
2123:
2124: function = (letoh32(rb->msgfunc) >> 24) & 0xff;
2125: detail = letoh16(rb->detail);
2126:
2127: printf("%s: reply:\n", sc->sc_dv.dv_xname);
2128:
2129: #ifdef I2OVERBOSE
2130: if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2131: statusstr = iop_status[rb->reqstatus];
2132: else
2133: statusstr = "undefined error code";
2134:
2135: printf("%s: function=0x%02x status=0x%02x (%s)\n",
2136: sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2137: #else
2138: printf("%s: function=0x%02x status=0x%02x\n",
2139: sc->sc_dv.dv_xname, function, rb->reqstatus);
2140: #endif
2141: printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2142: sc->sc_dv.dv_xname, detail, letoh32(rb->msgictx),
2143: letoh32(rb->msgtctx));
2144: printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2145: (letoh32(rb->msgfunc) >> 12) & 4095, letoh32(rb->msgfunc) & 4095,
2146: (letoh32(rb->msgflags) >> 8) & 0xff);
2147: }
2148: #endif
2149:
2150: /*
2151: * Dump a transport failure reply.
2152: */
2153: void
2154: iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2155: {
2156:
2157: printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2158:
2159: printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2160: letoh32(fn->msgictx), letoh32(fn->msgtctx));
2161: printf("%s: failurecode=0x%02x severity=0x%02x\n",
2162: sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2163: printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2164: sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2165: }
2166:
2167: /*
2168: * Translate an I2O ASCII field into a C string.
2169: */
2170: void
2171: iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2172: {
2173: int hc, lc, i, nit;
2174:
2175: dlen--;
2176: lc = 0;
2177: hc = 0;
2178: i = 0;
2179:
2180: /*
2181: * DPT use NUL as a space, whereas AMI use it as a terminator. The
2182: * spec has nothing to say about it. Since AMI fields are usually
2183: * filled with junk after the terminator, ...
2184: */
2185: nit = (letoh16(sc->sc_status.orgid) != I2O_ORG_DPT);
2186:
2187: while (slen-- != 0 && dlen-- != 0) {
2188: if (nit && *src == '\0')
2189: break;
2190: else if (*src <= 0x20 || *src >= 0x7f) {
2191: if (hc)
2192: dst[i++] = ' ';
2193: } else {
2194: hc = 1;
2195: dst[i++] = *src;
2196: lc = i;
2197: }
2198: src++;
2199: }
2200:
2201: dst[lc] = '\0';
2202: }
2203:
2204: /*
2205: * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2206: */
2207: int
2208: iop_print_ident(struct iop_softc *sc, int tid)
2209: {
2210: struct {
2211: struct i2o_param_op_results pr;
2212: struct i2o_param_read_results prr;
2213: struct i2o_param_device_identity di;
2214: } __attribute__ ((__packed__)) p;
2215: char buf[32];
2216: int rv;
2217:
2218: rv = iop_param_op(sc, tid, NULL, 0, I2O_PARAM_DEVICE_IDENTITY, &p,
2219: sizeof(p));
2220: if (rv != 0)
2221: return (rv);
2222:
2223: iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2224: sizeof(buf));
2225: printf(" <%s, ", buf);
2226: iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2227: sizeof(buf));
2228: printf("%s, ", buf);
2229: iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2230: printf("%s>", buf);
2231:
2232: return (0);
2233: }
2234:
2235: /*
2236: * Claim or unclaim the specified TID.
2237: */
2238: int
2239: iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2240: int flags)
2241: {
2242: struct iop_msg *im;
2243: struct i2o_util_claim mf;
2244: int rv, func;
2245:
2246: func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2247: im = iop_msg_alloc(sc, ii, IM_WAIT);
2248:
2249: /* We can use the same structure, as they're identical. */
2250: mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2251: mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2252: mf.msgictx = ii->ii_ictx;
2253: mf.msgtctx = im->im_tctx;
2254: mf.flags = flags;
2255:
2256: rv = iop_msg_post(sc, im, &mf, 5000);
2257: iop_msg_free(sc, im);
2258: return (rv);
2259: }
2260:
2261: /*
2262: * Perform an abort.
2263: */
2264: int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2265: int tctxabort, int flags)
2266: {
2267: struct iop_msg *im;
2268: struct i2o_util_abort mf;
2269: int rv;
2270:
2271: im = iop_msg_alloc(sc, ii, IM_WAIT);
2272:
2273: mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2274: mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2275: mf.msgictx = ii->ii_ictx;
2276: mf.msgtctx = im->im_tctx;
2277: mf.flags = (func << 24) | flags;
2278: mf.tctxabort = tctxabort;
2279:
2280: rv = iop_msg_post(sc, im, &mf, 5000);
2281: iop_msg_free(sc, im);
2282: return (rv);
2283: }
2284:
2285: /*
2286: * Enable or disable reception of events for the specified device.
2287: */
2288: int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2289: {
2290: struct iop_msg *im;
2291: struct i2o_util_event_register mf;
2292:
2293: im = iop_msg_alloc(sc, ii, 0);
2294:
2295: mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2296: mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2297: mf.msgictx = ii->ii_ictx;
2298: mf.msgtctx = im->im_tctx;
2299: mf.eventmask = mask;
2300:
2301: /* This message is replied to only when events are signalled. */
2302: return (iop_msg_post(sc, im, &mf, 0));
2303: }
2304:
2305: int
2306: iopopen(dev_t dev, int flag, int mode, struct proc *p)
2307: {
2308: struct iop_softc *sc;
2309:
2310: if (!(sc = (struct iop_softc *)device_lookup(&iop_cd, minor(dev))))
2311: return (ENXIO);
2312: if ((sc->sc_flags & IOP_ONLINE) == 0)
2313: return (ENXIO);
2314: if ((sc->sc_flags & IOP_OPEN) != 0)
2315: return (EBUSY);
2316: sc->sc_flags |= IOP_OPEN;
2317:
2318: sc->sc_ptb = malloc(IOP_MAX_XFER * IOP_MAX_MSG_XFERS, M_DEVBUF,
2319: M_WAITOK);
2320: if (sc->sc_ptb == NULL) {
2321: sc->sc_flags ^= IOP_OPEN;
2322: return (ENOMEM);
2323: }
2324:
2325: return (0);
2326: }
2327:
2328: int
2329: iopclose(dev_t dev, int flag, int mode, struct proc *p)
2330: {
2331: struct iop_softc *sc;
2332:
2333: sc = (struct iop_softc *)device_lookup(&iop_cd, minor(dev)); /* XXX */
2334: free(sc->sc_ptb, M_DEVBUF);
2335: sc->sc_flags &= ~IOP_OPEN;
2336: return (0);
2337: }
2338:
2339: int
2340: iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2341: {
2342: struct iop_softc *sc;
2343: struct iovec *iov;
2344: int rv, i;
2345:
2346: if (securelevel >= 2)
2347: return (EPERM);
2348:
2349: sc = (struct iop_softc *)device_lookup(&iop_cd, minor(dev)); /* XXX */
2350:
2351: switch (cmd) {
2352: case IOPIOCPT:
2353: return (iop_passthrough(sc, (struct ioppt *)data));
2354:
2355: case IOPIOCGSTATUS:
2356: iov = (struct iovec *)data;
2357: i = sizeof(struct i2o_status);
2358: if (i > iov->iov_len)
2359: i = iov->iov_len;
2360: else
2361: iov->iov_len = i;
2362: if ((rv = iop_status_get(sc, 0)) == 0)
2363: rv = copyout(&sc->sc_status, iov->iov_base, i);
2364: return (rv);
2365:
2366: case IOPIOCGLCT:
2367: case IOPIOCGTIDMAP:
2368: case IOPIOCRECONFIG:
2369: break;
2370:
2371: default:
2372: #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2373: printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2374: #endif
2375: return (ENOTTY);
2376: }
2377:
2378: if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2379: return (rv);
2380:
2381: switch (cmd) {
2382: case IOPIOCGLCT:
2383: iov = (struct iovec *)data;
2384: i = letoh16(sc->sc_lct->tablesize) << 2;
2385: if (i > iov->iov_len)
2386: i = iov->iov_len;
2387: else
2388: iov->iov_len = i;
2389: rv = copyout(sc->sc_lct, iov->iov_base, i);
2390: break;
2391:
2392: case IOPIOCRECONFIG:
2393: rv = iop_reconfigure(sc, 0);
2394: break;
2395:
2396: case IOPIOCGTIDMAP:
2397: iov = (struct iovec *)data;
2398: i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2399: if (i > iov->iov_len)
2400: i = iov->iov_len;
2401: else
2402: iov->iov_len = i;
2403: rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2404: break;
2405: }
2406:
2407: lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2408: return (rv);
2409: }
2410:
2411: int
2412: iop_passthrough(struct iop_softc *sc, struct ioppt *pt)
2413: {
2414: struct iop_msg *im;
2415: struct i2o_msg *mf;
2416: struct ioppt_buf *ptb;
2417: int rv, i, mapped;
2418: void *buf;
2419:
2420: mf = NULL;
2421: im = NULL;
2422: mapped = 1;
2423:
2424: if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2425: pt->pt_msglen > (letoh16(sc->sc_status.inboundmframesize) << 2) ||
2426: pt->pt_msglen < sizeof(struct i2o_msg) ||
2427: pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2428: pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2429: pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2430: return (EINVAL);
2431:
2432: for (i = 0; i < pt->pt_nbufs; i++)
2433: if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2434: rv = ENOMEM;
2435: goto bad;
2436: }
2437:
2438: mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2439: if (mf == NULL)
2440: return (ENOMEM);
2441:
2442: if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2443: goto bad;
2444:
2445: im = iop_msg_alloc(sc, NULL, IM_WAIT | IM_NOSTATUS);
2446: im->im_rb = (struct i2o_reply *)mf;
2447: mf->msgictx = IOP_ICTX;
2448: mf->msgtctx = im->im_tctx;
2449:
2450: for (i = 0; i < pt->pt_nbufs; i++) {
2451: ptb = &pt->pt_bufs[i];
2452: buf = sc->sc_ptb + i * IOP_MAX_XFER;
2453:
2454: if ((u_int)ptb->ptb_datalen > IOP_MAX_XFER) {
2455: rv = EINVAL;
2456: goto bad;
2457: }
2458:
2459: if (ptb->ptb_out != 0) {
2460: rv = copyin(ptb->ptb_data, buf, ptb->ptb_datalen);
2461: if (rv != 0)
2462: goto bad;
2463: }
2464:
2465: rv = iop_msg_map(sc, im, (u_int32_t *)mf, buf,
2466: ptb->ptb_datalen, ptb->ptb_out != 0);
2467: if (rv != 0)
2468: goto bad;
2469: mapped = 1;
2470: }
2471:
2472: if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2473: goto bad;
2474:
2475: i = (letoh32(im->im_rb->msgflags) >> 14) & ~3;
2476: if (i > IOP_MAX_MSG_SIZE)
2477: i = IOP_MAX_MSG_SIZE;
2478: if (i > pt->pt_replylen)
2479: i = pt->pt_replylen;
2480: if ((rv = copyout(im->im_rb, pt->pt_reply, i)) != 0)
2481: goto bad;
2482:
2483: iop_msg_unmap(sc, im);
2484: mapped = 0;
2485:
2486: for (i = 0; i < pt->pt_nbufs; i++) {
2487: ptb = &pt->pt_bufs[i];
2488: if (ptb->ptb_out != 0)
2489: continue;
2490: buf = sc->sc_ptb + i * IOP_MAX_XFER;
2491: rv = copyout(buf, ptb->ptb_data, ptb->ptb_datalen);
2492: if (rv != 0)
2493: break;
2494: }
2495:
2496: bad:
2497: if (mapped != 0)
2498: iop_msg_unmap(sc, im);
2499: if (im != NULL)
2500: iop_msg_free(sc, im);
2501: if (mf != NULL)
2502: free(mf, M_DEVBUF);
2503: return (rv);
2504: }
CVSweb