Annotation of sys/arch/i386/isa/isa_machdep.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: isa_machdep.c,v 1.60 2007/04/28 03:55:40 jsg Exp $ */
2: /* $NetBSD: isa_machdep.c,v 1.22 1997/06/12 23:57:32 thorpej Exp $ */
3:
4: /*-
5: * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: /*-
42: * Copyright (c) 1993, 1994, 1996, 1997
43: * Charles M. Hannum. All rights reserved.
44: * Copyright (c) 1991 The Regents of the University of California.
45: * All rights reserved.
46: *
47: * This code is derived from software contributed to Berkeley by
48: * William Jolitz.
49: *
50: * Redistribution and use in source and binary forms, with or without
51: * modification, are permitted provided that the following conditions
52: * are met:
53: * 1. Redistributions of source code must retain the above copyright
54: * notice, this list of conditions and the following disclaimer.
55: * 2. Redistributions in binary form must reproduce the above copyright
56: * notice, this list of conditions and the following disclaimer in the
57: * documentation and/or other materials provided with the distribution.
58: * 3. Neither the name of the University nor the names of its contributors
59: * may be used to endorse or promote products derived from this software
60: * without specific prior written permission.
61: *
62: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72: * SUCH DAMAGE.
73: *
74: * @(#)isa.c 7.2 (Berkeley) 5/13/91
75: */
76:
77: #include <sys/param.h>
78: #include <sys/systm.h>
79: #include <sys/syslog.h>
80: #include <sys/device.h>
81: #include <sys/malloc.h>
82: #include <sys/proc.h>
83:
84: #include <uvm/uvm_extern.h>
85:
86: #include "ioapic.h"
87:
88: #if NIOAPIC > 0
89: #include <machine/i82093var.h>
90: #include <machine/mpbiosvar.h>
91: #endif
92:
93: #define _I386_BUS_DMA_PRIVATE
94: #include <machine/bus.h>
95:
96: #include <machine/intr.h>
97: #include <machine/pio.h>
98: #include <machine/cpufunc.h>
99: #include <machine/i8259.h>
100:
101: #include <dev/isa/isareg.h>
102: #include <dev/isa/isavar.h>
103: #include <dev/isa/isadmavar.h>
104: #include <i386/isa/isa_machdep.h>
105:
106: #include "isadma.h"
107:
108: extern paddr_t avail_end;
109:
110: #define IDTVEC(name) __CONCAT(X,name)
111: /* default interrupt vector table entries */
112: typedef int (*vector)(void);
113: extern vector IDTVEC(intr)[];
114: void isa_strayintr(int);
115: void intr_calculatemasks(void);
116: int fakeintr(void *);
117:
118: #if NISADMA > 0
119: int _isa_bus_dmamap_create(bus_dma_tag_t, bus_size_t, int,
120: bus_size_t, bus_size_t, int, bus_dmamap_t *);
121: void _isa_bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
122: int _isa_bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
123: bus_size_t, struct proc *, int);
124: int _isa_bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
125: struct mbuf *, int);
126: int _isa_bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
127: struct uio *, int);
128: int _isa_bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
129: bus_dma_segment_t *, int, bus_size_t, int);
130: void _isa_bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
131: void _isa_bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
132: bus_addr_t, bus_size_t, int);
133:
134: int _isa_bus_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
135: bus_size_t, bus_dma_segment_t *, int, int *, int);
136: void _isa_bus_dmamem_free(bus_dma_tag_t,
137: bus_dma_segment_t *, int);
138: int _isa_bus_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
139: int, size_t, caddr_t *, int);
140: void _isa_bus_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
141: paddr_t _isa_bus_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *,
142: int, off_t, int, int);
143:
144: int _isa_dma_check_buffer(void *, bus_size_t, int, bus_size_t,
145: struct proc *);
146: int _isa_dma_alloc_bouncebuf(bus_dma_tag_t, bus_dmamap_t,
147: bus_size_t, int);
148: void _isa_dma_free_bouncebuf(bus_dma_tag_t, bus_dmamap_t);
149:
150: /*
151: * Entry points for ISA DMA. These are mostly wrappers around
152: * the generic functions that understand how to deal with bounce
153: * buffers, if necessary.
154: */
155: struct i386_bus_dma_tag isa_bus_dma_tag = {
156: NULL, /* _cookie */
157: _isa_bus_dmamap_create,
158: _isa_bus_dmamap_destroy,
159: _isa_bus_dmamap_load,
160: _isa_bus_dmamap_load_mbuf,
161: _isa_bus_dmamap_load_uio,
162: _isa_bus_dmamap_load_raw,
163: _isa_bus_dmamap_unload,
164: _isa_bus_dmamap_sync,
165: _isa_bus_dmamem_alloc,
166: _isa_bus_dmamem_free,
167: _isa_bus_dmamem_map,
168: _isa_bus_dmamem_unmap,
169: _isa_bus_dmamem_mmap,
170: };
171: #endif /* NISADMA > 0 */
172:
173: /*
174: * Fill in default interrupt table (in case of spurious interrupt
175: * during configuration of kernel, setup interrupt control unit
176: */
177: void
178: isa_defaultirq(void)
179: {
180: int i;
181:
182: /* icu vectors */
183: for (i = 0; i < ICU_LEN; i++)
184: setgate(&idt[ICU_OFFSET + i], IDTVEC(intr)[i], 0,
185: SDT_SYS386IGT, SEL_KPL, GICODE_SEL);
186:
187: /* initialize 8259's */
188: outb(IO_ICU1, 0x11); /* reset; program device, four bytes */
189: outb(IO_ICU1+1, ICU_OFFSET); /* starting at this vector index */
190: outb(IO_ICU1+1, 1 << IRQ_SLAVE); /* slave on line 2 */
191: #ifdef AUTO_EOI_1
192: outb(IO_ICU1+1, 2 | 1); /* auto EOI, 8086 mode */
193: #else
194: outb(IO_ICU1+1, 1); /* 8086 mode */
195: #endif
196: outb(IO_ICU1+1, 0xff); /* leave interrupts masked */
197: outb(IO_ICU1, 0x68); /* special mask mode (if available) */
198: outb(IO_ICU1, 0x0a); /* Read IRR by default. */
199: #ifdef REORDER_IRQ
200: outb(IO_ICU1, 0xc0 | (3 - 1)); /* pri order 3-7, 0-2 (com2 first) */
201: #endif
202:
203: outb(IO_ICU2, 0x11); /* reset; program device, four bytes */
204: outb(IO_ICU2+1, ICU_OFFSET+8); /* staring at this vector index */
205: outb(IO_ICU2+1, IRQ_SLAVE);
206: #ifdef AUTO_EOI_2
207: outb(IO_ICU2+1, 2 | 1); /* auto EOI, 8086 mode */
208: #else
209: outb(IO_ICU2+1, 1); /* 8086 mode */
210: #endif
211: outb(IO_ICU2+1, 0xff); /* leave interrupts masked */
212: outb(IO_ICU2, 0x68); /* special mask mode (if available) */
213: outb(IO_ICU2, 0x0a); /* Read IRR by default. */
214: }
215:
216: void
217: isa_nodefaultirq(void)
218: {
219: int i;
220:
221: /* icu vectors */
222: for (i = 0; i < ICU_LEN; i++)
223: unsetgate(&idt[ICU_OFFSET + i]);
224: }
225:
226: /*
227: * Handle a NMI, possibly a machine check.
228: * return true to panic system, false to ignore.
229: */
230: int
231: isa_nmi(void)
232: {
233: /* This is historic garbage; these ports are not readable */
234: log(LOG_CRIT, "No-maskable interrupt, may be parity error\n");
235: return(0);
236: }
237:
238: u_long intrstray[ICU_LEN];
239:
240: /*
241: * Caught a stray interrupt, notify
242: */
243: void
244: isa_strayintr(int irq)
245: {
246: /*
247: * Stray interrupts on irq 7 occur when an interrupt line is raised
248: * and then lowered before the CPU acknowledges it. This generally
249: * means either the device is screwed or something is cli'ing too
250: * long and it's timing out.
251: */
252: if (++intrstray[irq] <= 5)
253: log(LOG_ERR, "stray interrupt %d%s\n", irq,
254: intrstray[irq] >= 5 ? "; stopped logging" : "");
255: }
256:
257: int intrtype[ICU_LEN], intrmask[ICU_LEN], intrlevel[ICU_LEN];
258: int iminlevel[ICU_LEN], imaxlevel[ICU_LEN];
259: struct intrhand *intrhand[ICU_LEN];
260:
261: int imask[NIPL]; /* Bitmask telling what interrupts are blocked. */
262: int iunmask[NIPL]; /* Bitmask telling what interrupts are accepted. */
263:
264: /*
265: * Recalculate the interrupt masks from scratch.
266: * We could code special registry and deregistry versions of this function that
267: * would be faster, but the code would be nastier, and we don't expect this to
268: * happen very much anyway.
269: */
270: void
271: intr_calculatemasks(void)
272: {
273: int irq, level, unusedirqs;
274: struct intrhand *q;
275:
276: /* First, figure out which levels each IRQ uses. */
277: unusedirqs = 0xffff;
278: for (irq = 0; irq < ICU_LEN; irq++) {
279: int levels = 0;
280: for (q = intrhand[irq]; q; q = q->ih_next)
281: levels |= 1 << IPL(q->ih_level);
282: intrlevel[irq] = levels;
283: if (levels)
284: unusedirqs &= ~(1 << irq);
285: }
286:
287: /* Then figure out which IRQs use each level. */
288: for (level = 0; level < NIPL; level++) {
289: int irqs = 0;
290: for (irq = 0; irq < ICU_LEN; irq++)
291: if (intrlevel[irq] & (1 << level))
292: irqs |= 1 << irq;
293: imask[level] = irqs | unusedirqs;
294: }
295:
296: /*
297: * Initialize soft interrupt masks to block themselves.
298: */
299: IMASK(IPL_SOFTAST) |= 1 << SIR_AST;
300: IMASK(IPL_SOFTCLOCK) |= 1 << SIR_CLOCK;
301: IMASK(IPL_SOFTNET) |= 1 << SIR_NET;
302: IMASK(IPL_SOFTTTY) |= 1 << SIR_TTY;
303:
304: /*
305: * Enforce a hierarchy that gives slow devices a better chance at not
306: * dropping data.
307: */
308: for (level = 0; level < NIPL - 1; level++)
309: imask[level + 1] |= imask[level];
310:
311: /* And eventually calculate the complete masks. */
312: for (irq = 0; irq < ICU_LEN; irq++) {
313: int irqs = 1 << irq;
314: int minlevel = IPL_NONE;
315: int maxlevel = IPL_NONE;
316:
317: if (intrhand[irq] == NULL) {
318: maxlevel = IPL_HIGH;
319: irqs = IMASK(IPL_HIGH);
320: } else {
321: for (q = intrhand[irq]; q; q = q->ih_next) {
322: irqs |= IMASK(q->ih_level);
323: if (minlevel == IPL_NONE ||
324: q->ih_level < minlevel)
325: minlevel = q->ih_level;
326: if (q->ih_level > maxlevel)
327: maxlevel = q->ih_level;
328: }
329: }
330: if (irqs != IMASK(maxlevel))
331: panic("irq %d level %x mask mismatch: %x vs %x", irq,
332: maxlevel, irqs, IMASK(maxlevel));
333:
334: intrmask[irq] = irqs;
335: iminlevel[irq] = minlevel;
336: imaxlevel[irq] = maxlevel;
337:
338: #if 0
339: printf("irq %d: level %x, mask 0x%x (%x)\n", irq,
340: imaxlevel[irq], intrmask[irq], IMASK(imaxlevel[irq]));
341: #endif
342: }
343:
344: /* Lastly, determine which IRQs are actually in use. */
345: {
346: int irqs = 0;
347: for (irq = 0; irq < ICU_LEN; irq++)
348: if (intrhand[irq])
349: irqs |= 1 << irq;
350: if (irqs >= 0x100) /* any IRQs >= 8 in use */
351: irqs |= 1 << IRQ_SLAVE;
352: imen = ~irqs;
353: SET_ICUS();
354: }
355:
356: /* For speed of splx, provide the inverse of the interrupt masks. */
357: for (irq = 0; irq < ICU_LEN; irq++)
358: iunmask[irq] = ~imask[irq];
359: }
360:
361: int
362: fakeintr(arg)
363: void *arg;
364: {
365: return 0;
366: }
367:
368: #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN && (x) != 2)
369:
370: int
371: isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq)
372: {
373: int i, bestirq, count;
374: int tmp;
375: struct intrhand **p, *q;
376:
377: if (type == IST_NONE)
378: panic("intr_alloc: bogus type");
379:
380: bestirq = -1;
381: count = -1;
382:
383: /* some interrupts should never be dynamically allocated */
384: mask &= 0xdef8;
385:
386: /*
387: * XXX some interrupts will be used later (6 for fdc, 12 for pms).
388: * the right answer is to do "breadth-first" searching of devices.
389: */
390: mask &= 0xefbf;
391:
392: for (i = 0; i < ICU_LEN; i++) {
393: if (LEGAL_IRQ(i) == 0 || (mask & (1<<i)) == 0)
394: continue;
395:
396: switch(intrtype[i]) {
397: case IST_NONE:
398: /*
399: * if nothing's using the irq, just return it
400: */
401: *irq = i;
402: return (0);
403:
404: case IST_EDGE:
405: case IST_LEVEL:
406: if (type != intrtype[i])
407: continue;
408: /*
409: * if the irq is shareable, count the number of other
410: * handlers, and if it's smaller than the last irq like
411: * this, remember it
412: *
413: * XXX We should probably also consider the
414: * interrupt level and stick IPL_TTY with other
415: * IPL_TTY, etc.
416: */
417: for (p = &intrhand[i], tmp = 0; (q = *p) != NULL;
418: p = &q->ih_next, tmp++)
419: ;
420: if ((bestirq == -1) || (count > tmp)) {
421: bestirq = i;
422: count = tmp;
423: }
424: break;
425:
426: case IST_PULSE:
427: /* this just isn't shareable */
428: continue;
429: }
430: }
431:
432: if (bestirq == -1)
433: return (1);
434:
435: *irq = bestirq;
436:
437: return (0);
438: }
439:
440: /*
441: * Just check to see if an IRQ is available/can be shared.
442: * 0 = interrupt not available
443: * 1 = interrupt shareable
444: * 2 = interrupt all to ourself
445: */
446: int
447: isa_intr_check(isa_chipset_tag_t ic, int irq, int type)
448: {
449: if (!LEGAL_IRQ(irq) || type == IST_NONE)
450: return (0);
451:
452: switch (intrtype[irq]) {
453: case IST_NONE:
454: return (2);
455: break;
456: case IST_LEVEL:
457: if (type != intrtype[irq])
458: return (0);
459: return (1);
460: break;
461: case IST_EDGE:
462: case IST_PULSE:
463: if (type != IST_NONE)
464: return (0);
465: }
466: return (1);
467: }
468:
469: /*
470: * Set up an interrupt handler to start being called.
471: * XXX PRONE TO RACE CONDITIONS, UGLY, 'INTERESTING' INSERTION ALGORITHM.
472: */
473: void *
474: isa_intr_establish(isa_chipset_tag_t ic, int irq, int type, int level,
475: int (*ih_fun)(void *), void *ih_arg, char *ih_what)
476: {
477: struct intrhand **p, *q, *ih;
478: static struct intrhand fakehand = {fakeintr};
479:
480: #if NIOAPIC > 0
481: struct mp_intr_map *mip;
482:
483: if (mp_busses != NULL) {
484: int mpspec_pin = irq;
485: int airq;
486:
487: if (mp_isa_bus == NULL)
488: panic("no isa bus");
489:
490: for (mip = mp_isa_bus->mb_intrs; mip != NULL;
491: mip = mip->next) {
492: if (mip->bus_pin == mpspec_pin) {
493: airq = mip->ioapic_ih | irq;
494: break;
495: }
496: }
497: if (mip == NULL && mp_eisa_bus) {
498: for (mip = mp_eisa_bus->mb_intrs; mip != NULL;
499: mip = mip->next) {
500: if (mip->bus_pin == mpspec_pin) {
501: airq = mip->ioapic_ih | irq;
502: break;
503: }
504: }
505: }
506:
507: /* no MP mapping found -- invent! */
508: if (mip == NULL)
509: airq = mpbios_invent(irq, type, mp_isa_bus->mb_idx);
510:
511: return (apic_intr_establish(airq, type, level, ih_fun,
512: ih_arg, ih_what));
513: }
514: #endif
515: /* no point in sleeping unless someone can free memory. */
516: ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
517: if (ih == NULL) {
518: printf("%s: isa_intr_establish: can't malloc handler info\n",
519: ih_what);
520: return (NULL);
521: }
522:
523: if (!LEGAL_IRQ(irq) || type == IST_NONE) {
524: printf("%s: isa_intr_establish: bogus irq or type\n", ih_what);
525: free(ih, M_DEVBUF);
526: return (NULL);
527: }
528: switch (intrtype[irq]) {
529: case IST_NONE:
530: intrtype[irq] = type;
531: break;
532: case IST_EDGE:
533: case IST_LEVEL:
534: if (type == intrtype[irq])
535: break;
536: case IST_PULSE:
537: if (type != IST_NONE) {
538: /*printf("%s: intr_establish: can't share %s with %s, irq %d\n",
539: ih_what, isa_intr_typename(intrtype[irq]),
540: isa_intr_typename(type), irq);*/
541: free(ih, M_DEVBUF);
542: return (NULL);
543: }
544: break;
545: }
546:
547: /*
548: * Figure out where to put the handler.
549: * This is O(N^2), but we want to preserve the order, and N is
550: * generally small.
551: */
552: for (p = &intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
553: ;
554:
555: /*
556: * Actually install a fake handler momentarily, since we might be doing
557: * this with interrupts enabled and don't want the real routine called
558: * until masking is set up.
559: */
560: fakehand.ih_level = level;
561: *p = &fakehand;
562:
563: intr_calculatemasks();
564:
565: /*
566: * Poke the real handler in now.
567: */
568: ih->ih_fun = ih_fun;
569: ih->ih_arg = ih_arg;
570: ih->ih_next = NULL;
571: ih->ih_level = level;
572: ih->ih_irq = irq;
573: evcount_attach(&ih->ih_count, ih_what, (void *)&ih->ih_irq,
574: &evcount_intr);
575: *p = ih;
576:
577: return (ih);
578: }
579:
580: /*
581: * Deregister an interrupt handler.
582: */
583: void
584: isa_intr_disestablish(isa_chipset_tag_t ic, void *arg)
585: {
586: struct intrhand *ih = arg;
587: int irq = ih->ih_irq;
588: struct intrhand **p, *q;
589:
590: #if NIOAPIC > 0
591: if (irq & APIC_INT_VIA_APIC) {
592: apic_intr_disestablish(arg);
593: return;
594: }
595: #endif
596:
597: if (!LEGAL_IRQ(irq))
598: panic("intr_disestablish: bogus irq %d", irq);
599:
600: /*
601: * Remove the handler from the chain.
602: * This is O(n^2), too.
603: */
604: for (p = &intrhand[irq]; (q = *p) != NULL && q != ih; p = &q->ih_next)
605: ;
606: if (q)
607: *p = q->ih_next;
608: else
609: panic("intr_disestablish: handler not registered");
610: evcount_detach(&ih->ih_count);
611: free(ih, M_DEVBUF);
612:
613: intr_calculatemasks();
614:
615: if (intrhand[irq] == NULL)
616: intrtype[irq] = IST_NONE;
617: }
618:
619: void
620: isa_attach_hook(struct device *parent, struct device *self,
621: struct isabus_attach_args *iba)
622: {
623: extern int isa_has_been_seen;
624:
625: /*
626: * Notify others that might need to know that the ISA bus
627: * has now been attached.
628: */
629: if (isa_has_been_seen)
630: panic("isaattach: ISA bus already seen!");
631: isa_has_been_seen = 1;
632: }
633:
634: #if NISADMA > 0
635: /**********************************************************************
636: * bus.h dma interface entry points
637: **********************************************************************/
638:
639: #ifdef ISA_DMA_STATS
640: #define STAT_INCR(v) (v)++
641: #define STAT_DECR(v) do { \
642: if ((v) == 0) \
643: printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
644: else \
645: (v)--; \
646: } while (0)
647: u_long isa_dma_stats_loads;
648: u_long isa_dma_stats_bounces;
649: u_long isa_dma_stats_nbouncebufs;
650: #else
651: #define STAT_INCR(v)
652: #define STAT_DECR(v)
653: #endif
654:
655: /*
656: * Create an ISA DMA map.
657: */
658: int
659: _isa_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
660: bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
661: {
662: struct i386_isa_dma_cookie *cookie;
663: bus_dmamap_t map;
664: int error, cookieflags;
665: void *cookiestore;
666: size_t cookiesize;
667:
668: /* Call common function to create the basic map. */
669: error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
670: flags, dmamp);
671: if (error)
672: return (error);
673:
674: map = *dmamp;
675: map->_dm_cookie = NULL;
676:
677: cookiesize = sizeof(struct i386_isa_dma_cookie);
678:
679: /*
680: * ISA only has 24-bits of address space. This means
681: * we can't DMA to pages over 16M. In order to DMA to
682: * arbitrary buffers, we use "bounce buffers" - pages
683: * in memory below the 16M boundary. On DMA reads,
684: * DMA happens to the bounce buffers, and is copied into
685: * the caller's buffer. On writes, data is copied into
686: * but bounce buffer, and the DMA happens from those
687: * pages. To software using the DMA mapping interface,
688: * this looks simply like a data cache.
689: *
690: * If we have more than 16M of RAM in the system, we may
691: * need bounce buffers. We check and remember that here.
692: *
693: * There are exceptions, however. VLB devices can do
694: * 32-bit DMA, and indicate that here.
695: *
696: * ...or, there is an opposite case. The most segments
697: * a transfer will require is (maxxfer / NBPG) + 1. If
698: * the caller can't handle that many segments (e.g. the
699: * ISA DMA controller), we may have to bounce it as well.
700: */
701: cookieflags = 0;
702: if ((avail_end > ISA_DMA_BOUNCE_THRESHOLD &&
703: (flags & ISABUS_DMA_32BIT) == 0) ||
704: ((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
705: cookieflags |= ID_MIGHT_NEED_BOUNCE;
706: cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
707: }
708:
709: /*
710: * Allocate our cookie.
711: */
712: if ((cookiestore = malloc(cookiesize, M_DEVBUF,
713: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL) {
714: error = ENOMEM;
715: goto out;
716: }
717: bzero(cookiestore, cookiesize);
718: cookie = (struct i386_isa_dma_cookie *)cookiestore;
719: cookie->id_flags = cookieflags;
720: map->_dm_cookie = cookie;
721:
722: if (cookieflags & ID_MIGHT_NEED_BOUNCE) {
723: /*
724: * Allocate the bounce pages now if the caller
725: * wishes us to do so.
726: */
727: if ((flags & BUS_DMA_ALLOCNOW) == 0)
728: goto out;
729:
730: error = _isa_dma_alloc_bouncebuf(t, map, size, flags);
731: }
732:
733: out:
734: if (error) {
735: if (map->_dm_cookie != NULL)
736: free(map->_dm_cookie, M_DEVBUF);
737: _bus_dmamap_destroy(t, map);
738: }
739: return (error);
740: }
741:
742: /*
743: * Destroy an ISA DMA map.
744: */
745: void
746: _isa_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
747: {
748: struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
749:
750: /*
751: * Free any bounce pages this map might hold.
752: */
753: if (cookie->id_flags & ID_HAS_BOUNCE)
754: _isa_dma_free_bouncebuf(t, map);
755:
756: free(cookie, M_DEVBUF);
757: _bus_dmamap_destroy(t, map);
758: }
759:
760: /*
761: * Load an ISA DMA map with a linear buffer.
762: */
763: int
764: _isa_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
765: bus_size_t buflen, struct proc *p, int flags)
766: {
767: struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
768: int error;
769:
770: STAT_INCR(isa_dma_stats_loads);
771:
772: /*
773: * Check to see if we might need to bounce the transfer.
774: */
775: if (cookie->id_flags & ID_MIGHT_NEED_BOUNCE) {
776: /*
777: * Check if all pages are below the bounce
778: * threshold. If they are, don't bother bouncing.
779: */
780: if (_isa_dma_check_buffer(buf, buflen,
781: map->_dm_segcnt, map->_dm_boundary, p) == 0)
782: return (_bus_dmamap_load(t, map, buf, buflen,
783: p, flags));
784:
785: STAT_INCR(isa_dma_stats_bounces);
786:
787: /*
788: * Allocate bounce pages, if necessary.
789: */
790: if ((cookie->id_flags & ID_HAS_BOUNCE) == 0) {
791: error = _isa_dma_alloc_bouncebuf(t, map, buflen,
792: flags);
793: if (error)
794: return (error);
795: }
796:
797: /*
798: * Cache a pointer to the caller's buffer and
799: * load the DMA map with the bounce buffer.
800: */
801: cookie->id_origbuf = buf;
802: cookie->id_origbuflen = buflen;
803: error = _bus_dmamap_load(t, map, cookie->id_bouncebuf,
804: buflen, p, flags);
805:
806: if (error) {
807: /*
808: * Free the bounce pages, unless our resources
809: * are reserved for our exclusive use.
810: */
811: if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
812: _isa_dma_free_bouncebuf(t, map);
813: }
814:
815: /* ...so _isa_bus_dmamap_sync() knows we're bouncing */
816: cookie->id_flags |= ID_IS_BOUNCING;
817: } else {
818: /*
819: * Just use the generic load function.
820: */
821: error = _bus_dmamap_load(t, map, buf, buflen, p, flags);
822: }
823:
824: return (error);
825: }
826:
827: /*
828: * Like _isa_bus_dmamap_load(), but for mbufs.
829: */
830: int
831: _isa_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m,
832: int flags)
833: {
834:
835: panic("_isa_bus_dmamap_load_mbuf: not implemented");
836: }
837:
838: /*
839: * Like _isa_bus_dmamap_load(), but for uios.
840: */
841: int
842: _isa_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
843: int flags)
844: {
845:
846: panic("_isa_bus_dmamap_load_uio: not implemented");
847: }
848:
849: /*
850: * Like _isa_bus_dmamap_load(), but for raw memory allocated with
851: * bus_dmamem_alloc().
852: */
853: int
854: _isa_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
855: bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
856: {
857:
858: panic("_isa_bus_dmamap_load_raw: not implemented");
859: }
860:
861: /*
862: * Unload an ISA DMA map.
863: */
864: void
865: _isa_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
866: {
867: struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
868:
869: /*
870: * If we have bounce pages, free them, unless they're
871: * reserved for our exclusive use.
872: */
873: if ((cookie->id_flags & ID_HAS_BOUNCE) &&
874: (map->_dm_flags & BUS_DMA_ALLOCNOW) == 0)
875: _isa_dma_free_bouncebuf(t, map);
876:
877: cookie->id_flags &= ~ID_IS_BOUNCING;
878:
879: /*
880: * Do the generic bits of the unload.
881: */
882: _bus_dmamap_unload(t, map);
883: }
884:
885: /*
886: * Synchronize an ISA DMA map.
887: */
888: void
889: _isa_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
890: bus_size_t len, int op)
891: {
892: struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
893:
894: #ifdef DEBUG
895: if ((op & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
896: if (offset >= map->dm_mapsize)
897: panic("_isa_bus_dmamap_sync: bad offset");
898: if (len == 0 || (offset + len) > map->dm_mapsize)
899: panic("_isa_bus_dmamap_sync: bad length");
900: }
901: #endif
902:
903: switch (op) {
904: case BUS_DMASYNC_PREREAD:
905: /*
906: * Nothing to do for pre-read.
907: */
908: break;
909:
910: case BUS_DMASYNC_PREWRITE:
911: /*
912: * If we're bouncing this transfer, copy the
913: * caller's buffer to the bounce buffer.
914: */
915: if (cookie->id_flags & ID_IS_BOUNCING)
916: bcopy((char *)cookie->id_origbuf + offset,
917: cookie->id_bouncebuf + offset,
918: len);
919: break;
920:
921: case BUS_DMASYNC_POSTREAD:
922: /*
923: * If we're bouncing this transfer, copy the
924: * bounce buffer to the caller's buffer.
925: */
926: if (cookie->id_flags & ID_IS_BOUNCING)
927: bcopy((char *)cookie->id_bouncebuf + offset,
928: cookie->id_origbuf + offset,
929: len);
930: break;
931:
932: case BUS_DMASYNC_POSTWRITE:
933: /*
934: * Nothing to do for post-write.
935: */
936: break;
937: }
938:
939: #if 0
940: /* This is a noop anyhow, so why bother calling it? */
941: _bus_dmamap_sync(t, map, op);
942: #endif
943: }
944:
945: /*
946: * Allocate memory safe for ISA DMA.
947: */
948: int
949: _isa_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
950: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
951: int flags)
952: {
953: paddr_t high;
954:
955: if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
956: high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD);
957: else
958: high = trunc_page(avail_end);
959:
960: return (_bus_dmamem_alloc_range(t, size, alignment, boundary,
961: segs, nsegs, rsegs, flags, 0, high));
962: }
963:
964: /*
965: * Free memory safe for ISA DMA.
966: */
967: void
968: _isa_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
969: {
970:
971: _bus_dmamem_free(t, segs, nsegs);
972: }
973:
974: /*
975: * Map ISA DMA-safe memory into kernel virtual address space.
976: */
977: int
978: _isa_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
979: size_t size, caddr_t *kvap, int flags)
980: {
981:
982: return (_bus_dmamem_map(t, segs, nsegs, size, kvap, flags));
983: }
984:
985: /*
986: * Unmap ISA DMA-safe memory from kernel virtual address space.
987: */
988: void
989: _isa_bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
990: {
991:
992: _bus_dmamem_unmap(t, kva, size);
993: }
994:
995: /*
996: * mmap(2) ISA DMA-safe memory.
997: */
998: paddr_t
999: _isa_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1000: off_t off, int prot, int flags)
1001: {
1002:
1003: return (_bus_dmamem_mmap(t, segs, nsegs, off, prot, flags));
1004: }
1005:
1006: /**********************************************************************
1007: * ISA DMA utility functions
1008: **********************************************************************/
1009:
1010: /*
1011: * Return 0 if all pages in the passed buffer lie within the DMA'able
1012: * range RAM.
1013: */
1014: int
1015: _isa_dma_check_buffer(void *buf, bus_size_t buflen, int segcnt,
1016: bus_size_t boundary, struct proc *p)
1017: {
1018: vaddr_t vaddr = (vaddr_t)buf;
1019: vaddr_t endva;
1020: paddr_t pa, lastpa;
1021: u_long pagemask = ~(boundary - 1);
1022: pmap_t pmap;
1023: int nsegs;
1024:
1025: endva = round_page(vaddr + buflen);
1026:
1027: nsegs = 1;
1028: lastpa = 0;
1029:
1030: if (p != NULL)
1031: pmap = p->p_vmspace->vm_map.pmap;
1032: else
1033: pmap = pmap_kernel();
1034:
1035: for (; vaddr < endva; vaddr += NBPG) {
1036: /*
1037: * Get physical address for this segment.
1038: */
1039: pmap_extract(pmap, (vaddr_t)vaddr, &pa);
1040: pa = trunc_page(pa);
1041:
1042: /*
1043: * Is it below the DMA'able threshold?
1044: */
1045: if (pa > ISA_DMA_BOUNCE_THRESHOLD)
1046: return (EINVAL);
1047:
1048: if (lastpa) {
1049: /*
1050: * Check excessive segment count.
1051: */
1052: if (lastpa + NBPG != pa) {
1053: if (++nsegs > segcnt)
1054: return (EFBIG);
1055: }
1056:
1057: /*
1058: * Check boundary restriction.
1059: */
1060: if (boundary) {
1061: if ((lastpa ^ pa) & pagemask)
1062: return (EINVAL);
1063: }
1064: }
1065: lastpa = pa;
1066: }
1067:
1068: return (0);
1069: }
1070:
1071: int
1072: _isa_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, bus_size_t size, int flags)
1073: {
1074: struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
1075: int error = 0;
1076:
1077: cookie->id_bouncebuflen = round_page(size);
1078: error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1079: NBPG, map->_dm_boundary, cookie->id_bouncesegs,
1080: map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1081: if (error)
1082: goto out;
1083: error = _isa_bus_dmamem_map(t, cookie->id_bouncesegs,
1084: cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1085: (caddr_t *)&cookie->id_bouncebuf, flags);
1086:
1087: out:
1088: if (error) {
1089: _isa_bus_dmamem_free(t, cookie->id_bouncesegs,
1090: cookie->id_nbouncesegs);
1091: cookie->id_bouncebuflen = 0;
1092: cookie->id_nbouncesegs = 0;
1093: } else {
1094: cookie->id_flags |= ID_HAS_BOUNCE;
1095: STAT_INCR(isa_dma_stats_nbouncebufs);
1096: }
1097:
1098: return (error);
1099: }
1100:
1101: void
1102: _isa_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1103: {
1104: struct i386_isa_dma_cookie *cookie = map->_dm_cookie;
1105:
1106: STAT_DECR(isa_dma_stats_nbouncebufs);
1107:
1108: _isa_bus_dmamem_unmap(t, cookie->id_bouncebuf,
1109: cookie->id_bouncebuflen);
1110: _isa_bus_dmamem_free(t, cookie->id_bouncesegs,
1111: cookie->id_nbouncesegs);
1112: cookie->id_bouncebuflen = 0;
1113: cookie->id_nbouncesegs = 0;
1114: cookie->id_flags &= ~ID_HAS_BOUNCE;
1115: }
1116: #endif /* NISADMA > 0 */
CVSweb