Annotation of sys/arch/amd64/pci/iommu.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: iommu.c,v 1.20 2007/05/27 21:44:23 jason Exp $ */
2:
3: /*
4: * Copyright (c) 2005 Jason L. Wright (jason@thought.net)
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19: * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25: * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26: * POSSIBILITY OF SUCH DAMAGE.
27: */
28:
29: #include <sys/types.h>
30: #include <sys/param.h>
31: #include <sys/time.h>
32: #include <sys/systm.h>
33: #include <sys/errno.h>
34: #include <sys/device.h>
35: #include <sys/lock.h>
36: #include <sys/extent.h>
37: #include <sys/malloc.h>
38:
39: #include <uvm/uvm_extern.h>
40:
41: #define _X86_BUS_DMA_PRIVATE
42: #include <machine/bus.h>
43:
44: #include <machine/pio.h>
45: #include <machine/intr.h>
46:
47: #include <dev/isa/isareg.h>
48: #include <dev/isa/isavar.h>
49: #include <dev/pci/pcivar.h>
50: #include <dev/pci/pcireg.h>
51: #include <dev/pci/pcidevs.h>
52:
53: #define MCANB_CTRL 0x40 /* Machine Check, NorthBridge */
54: #define SCRUB_CTRL 0x58 /* dram/l2/dcache */
55: #define GART_APCTRL 0x90 /* aperture control */
56: #define GART_APBASE 0x94 /* aperture base */
57: #define GART_TBLBASE 0x98 /* aperture table base */
58: #define GART_CACHECTRL 0x9c /* aperture cache control */
59:
60: #define MCANB_CORRECCEN 0x00000001 /* correctable ecc error */
61: #define MCANB_UNCORRECCEN 0x00000002 /* uncorrectable ecc error */
62: #define MCANB_CRCERR0EN 0x00000004 /* hypertrans link 0 crc */
63: #define MCANB_CRCERR1EN 0x00000008 /* hypertrans link 1 crc */
64: #define MCANB_CRCERR2EN 0x00000010 /* hypertrans link 2 crc */
65: #define MCANB_SYNCPKT0EN 0x00000020 /* hypertrans link 0 sync */
66: #define MCANB_SYNCPKT1EN 0x00000040 /* hypertrans link 1 sync */
67: #define MCANB_SYNCPKT2EN 0x00000080 /* hypertrans link 2 sync */
68: #define MCANB_MSTRABRTEN 0x00000100 /* master abort error */
69: #define MCANB_TGTABRTEN 0x00000200 /* target abort error */
70: #define MCANB_GARTTBLWKEN 0x00000400 /* gart table walk error */
71: #define MCANB_ATOMICRMWEN 0x00000800 /* atomic r/m/w error */
72: #define MCANB_WCHDOGTMREN 0x00001000 /* watchdog timer error */
73:
74: #define GART_APCTRL_ENABLE 0x00000001 /* enable */
75: #define GART_APCTRL_SIZE 0x0000000e /* size mask */
76: #define GART_APCTRL_SIZE_32M 0x00000000 /* 32M */
77: #define GART_APCTRL_SIZE_64M 0x00000002 /* 64M */
78: #define GART_APCTRL_SIZE_128M 0x00000004 /* 128M */
79: #define GART_APCTRL_SIZE_256M 0x00000006 /* 256M */
80: #define GART_APCTRL_SIZE_512M 0x00000008 /* 512M */
81: #define GART_APCTRL_SIZE_1G 0x0000000a /* 1G */
82: #define GART_APCTRL_SIZE_2G 0x0000000c /* 2G */
83: #define GART_APCTRL_DISCPU 0x00000010 /* disable CPU access */
84: #define GART_APCTRL_DISIO 0x00000020 /* disable IO access */
85: #define GART_APCTRL_DISTBL 0x00000040 /* disable table walk probe */
86:
87: #define GART_APBASE_MASK 0x00007fff /* base [39:25] */
88:
89: #define GART_TBLBASE_MASK 0xfffffff0 /* table base [39:12] */
90:
91: #define GART_PTE_VALID 0x00000001 /* valid */
92: #define GART_PTE_COHERENT 0x00000002 /* coherent */
93: #define GART_PTE_PHYSHI 0x00000ff0 /* phys addr[39:32] */
94: #define GART_PTE_PHYSLO 0xfffff000 /* phys addr[31:12] */
95:
96: #define GART_CACHE_INVALIDATE 0x00000001 /* invalidate (s/c) */
97: #define GART_CACHE_PTEERR 0x00000002 /* pte error */
98:
99: #define IOMMU_START 0x80000000 /* beginning */
100: #define IOMMU_END 0xffffffff /* end */
101: #define IOMMU_SIZE 512 /* size in MB */
102: #define IOMMU_ALIGN IOMMU_SIZE
103:
104: extern paddr_t avail_end;
105: extern struct extent *iomem_ex;
106:
107: int amdgarts;
108: int amdgart_enable = 0;
109:
110: struct amdgart_softc {
111: pci_chipset_tag_t g_pc;
112: pcitag_t g_tag;
113: struct extent *g_ex;
114: paddr_t g_pa;
115: paddr_t g_scribpa;
116: void *g_scrib;
117: u_int32_t g_scribpte;
118: u_int32_t *g_pte;
119: bus_dma_tag_t g_dmat;
120: } *amdgart_softcs;
121:
122: void amdgart_invalidate_wait(void);
123: void amdgart_invalidate(void);
124: void amdgart_probe(struct pcibus_attach_args *);
125: void amdgart_dumpregs(void);
126: int amdgart_iommu_map(bus_dmamap_t, struct extent *, bus_dma_segment_t *);
127: int amdgart_iommu_unmap(struct extent *, bus_dma_segment_t *);
128: int amdgart_reload(struct extent *, bus_dmamap_t);
129: int amdgart_ok(pci_chipset_tag_t, pcitag_t);
130: void amdgart_initpt(struct amdgart_softc *, u_long);
131:
132: int amdgart_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
133: bus_size_t, int, bus_dmamap_t *);
134: void amdgart_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
135: int amdgart_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
136: struct proc *, int);
137: int amdgart_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
138: int amdgart_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
139: int amdgart_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
140: bus_dma_segment_t *, int, bus_size_t, int);
141: void amdgart_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
142: void amdgart_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
143: bus_size_t, int);
144:
145: int amdgart_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t,
146: bus_dma_segment_t *, int, int *, int);
147: void amdgart_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
148: int amdgart_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t,
149: caddr_t *, int);
150: void amdgart_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t);
151: paddr_t amdgart_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *, int, off_t,
152: int, int);
153:
154: struct x86_bus_dma_tag amdgart_bus_dma_tag = {
155: NULL, /* _may_bounce */
156: amdgart_dmamap_create,
157: amdgart_dmamap_destroy,
158: amdgart_dmamap_load,
159: amdgart_dmamap_load_mbuf,
160: amdgart_dmamap_load_uio,
161: amdgart_dmamap_load_raw,
162: amdgart_dmamap_unload,
163: NULL,
164: amdgart_dmamem_alloc,
165: amdgart_dmamem_free,
166: amdgart_dmamem_map,
167: amdgart_dmamem_unmap,
168: amdgart_dmamem_mmap,
169: };
170:
171: void
172: amdgart_invalidate_wait(void)
173: {
174: u_int32_t v;
175: int i, n;
176:
177: for (n = 0; n < amdgarts; n++) {
178: for (i = 1000; i > 0; i--) {
179: v = pci_conf_read(amdgart_softcs[n].g_pc,
180: amdgart_softcs[n].g_tag, GART_CACHECTRL);
181: if ((v & GART_CACHE_INVALIDATE) == 0)
182: break;
183: delay(1);
184: }
185: if (i == 0)
186: printf("GART%d: timeout\n", n);
187: }
188: }
189:
190: void
191: amdgart_invalidate(void)
192: {
193: int n;
194:
195: for (n = 0; n < amdgarts; n++)
196: pci_conf_write(amdgart_softcs[n].g_pc,
197: amdgart_softcs[n].g_tag, GART_CACHECTRL,
198: GART_CACHE_INVALIDATE);
199: amdgart_invalidate_wait();
200: }
201:
202: void
203: amdgart_dumpregs(void)
204: {
205: int n, i, dirty;
206: u_int8_t *p;
207:
208: for (n = 0; n < amdgarts; n++) {
209: printf("GART%d:\n", n);
210: printf(" apctl %x\n", pci_conf_read(amdgart_softcs[n].g_pc,
211: amdgart_softcs[n].g_tag, GART_APCTRL));
212: printf(" apbase %x\n", pci_conf_read(amdgart_softcs[n].g_pc,
213: amdgart_softcs[n].g_tag, GART_APBASE));
214: printf(" tblbase %x\n", pci_conf_read(amdgart_softcs[n].g_pc,
215: amdgart_softcs[n].g_tag, GART_TBLBASE));
216: printf(" cachectl %x\n", pci_conf_read(amdgart_softcs[n].g_pc,
217: amdgart_softcs[n].g_tag, GART_CACHECTRL));
218:
219: p = amdgart_softcs[n].g_scrib;
220: dirty = 0;
221: for (i = 0; i < PAGE_SIZE; i++, p++)
222: if (*p != '\0')
223: dirty++;
224: printf(" scribble: %s\n", dirty ? "dirty" : "clean");
225: }
226: }
227:
228: int
229: amdgart_ok(pci_chipset_tag_t pc, pcitag_t tag)
230: {
231: pcireg_t v;
232:
233: v = pci_conf_read(pc, tag, PCI_ID_REG);
234: if (PCI_VENDOR(v) != PCI_VENDOR_AMD)
235: return (0);
236: if (PCI_PRODUCT(v) != PCI_PRODUCT_AMD_AMD64_MISC)
237: return (0);
238:
239: v = pci_conf_read(pc, tag, GART_APCTRL);
240: if (v & GART_APCTRL_ENABLE)
241: return (0);
242:
243: return (1);
244: }
245:
246: void
247: amdgart_probe(struct pcibus_attach_args *pba)
248: {
249: int dev, func, count = 0, r;
250: u_long dvabase = (u_long)-1, mapsize, ptesize;
251: pcitag_t tag;
252: pcireg_t v;
253: struct pglist plist;
254: void *scrib = NULL;
255: struct extent *ex = NULL;
256: u_int32_t *pte;
257: paddr_t ptepa;
258:
259: if (amdgart_enable == 0)
260: return;
261:
262: TAILQ_INIT(&plist);
263:
264: for (count = 0, dev = 24; dev < 32; dev++) {
265: for (func = 0; func < 8; func++) {
266: tag = pci_make_tag(pba->pba_pc, 0, dev, func);
267:
268: if (amdgart_ok(pba->pba_pc, tag))
269: count++;
270: }
271: }
272:
273: if (count == 0)
274: return;
275:
276: amdgart_softcs = malloc(sizeof(*amdgart_softcs) * count, M_DEVBUF,
277: M_NOWAIT);
278: if (amdgart_softcs == NULL) {
279: printf("\nGART: can't get softc");
280: goto err;
281: }
282:
283: dvabase = IOMMU_START;
284:
285: mapsize = IOMMU_SIZE * 1024 * 1024;
286: ptesize = mapsize / (PAGE_SIZE / sizeof(u_int32_t));
287:
288: r = uvm_pglistalloc(ptesize, ptesize, trunc_page(avail_end),
289: ptesize, ptesize, &plist, 1, 0);
290: if (r != 0) {
291: printf("\nGART: failed to get pte pages");
292: goto err;
293: }
294: ptepa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&plist));
295: pte = (u_int32_t *)pmap_map_nc_direct(TAILQ_FIRST(&plist));
296:
297: ex = extent_create("iommu", dvabase, dvabase + mapsize - 1, M_DEVBUF,
298: NULL, NULL, EX_NOWAIT | EX_NOCOALESCE);
299: if (ex == NULL) {
300: printf("\nGART: extent create failed");
301: goto err;
302: }
303:
304: scrib = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
305: if (scrib == NULL) {
306: printf("\nGART: didn't get scribble page");
307: goto err;
308: }
309: bzero(scrib, PAGE_SIZE);
310:
311: for (count = 0, dev = 24; dev < 32; dev++) {
312: for (func = 0; func < 8; func++) {
313: tag = pci_make_tag(pba->pba_pc, 0, dev, func);
314:
315: if (!amdgart_ok(pba->pba_pc, tag))
316: continue;
317:
318: v = pci_conf_read(pba->pba_pc, tag, GART_APCTRL);
319: v |= GART_APCTRL_DISCPU | GART_APCTRL_DISTBL |
320: GART_APCTRL_DISIO;
321: v &= ~(GART_APCTRL_ENABLE | GART_APCTRL_SIZE);
322: switch (IOMMU_SIZE) {
323: case 32:
324: v |= GART_APCTRL_SIZE_32M;
325: break;
326: case 64:
327: v |= GART_APCTRL_SIZE_64M;
328: break;
329: case 128:
330: v |= GART_APCTRL_SIZE_128M;
331: break;
332: case 256:
333: v |= GART_APCTRL_SIZE_256M;
334: break;
335: case 512:
336: v |= GART_APCTRL_SIZE_512M;
337: break;
338: case 1024:
339: v |= GART_APCTRL_SIZE_1G;
340: break;
341: case 2048:
342: v |= GART_APCTRL_SIZE_2G;
343: break;
344: default:
345: printf("\nGART: bad size");
346: return;
347: }
348: pci_conf_write(pba->pba_pc, tag, GART_APCTRL, v);
349:
350: pci_conf_write(pba->pba_pc, tag, GART_APBASE,
351: dvabase >> 25);
352:
353: pci_conf_write(pba->pba_pc, tag, GART_TBLBASE,
354: (ptepa >> 8) & GART_TBLBASE_MASK);
355:
356: v = pci_conf_read(pba->pba_pc, tag, GART_APCTRL);
357: v |= GART_APCTRL_ENABLE;
358: v &= ~GART_APCTRL_DISIO;
359: pci_conf_write(pba->pba_pc, tag, GART_APCTRL, v);
360:
361: amdgart_softcs[count].g_pc = pba->pba_pc;
362: amdgart_softcs[count].g_tag = tag;
363: amdgart_softcs[count].g_ex = ex;
364: amdgart_softcs[count].g_pa = dvabase;
365: pmap_extract(pmap_kernel(), (vaddr_t)scrib,
366: &amdgart_softcs[count].g_scribpa);
367: amdgart_softcs[count].g_scrib = scrib;
368: amdgart_softcs[count].g_scribpte =
369: GART_PTE_VALID | GART_PTE_COHERENT |
370: ((amdgart_softcs[count].g_scribpa >> 28) &
371: GART_PTE_PHYSHI) |
372: (amdgart_softcs[count].g_scribpa &
373: GART_PTE_PHYSLO);
374: amdgart_softcs[count].g_pte = pte;
375: amdgart_softcs[count].g_dmat = pba->pba_dmat;
376:
377: amdgart_initpt(&amdgart_softcs[count],
378: ptesize / sizeof(*amdgart_softcs[count].g_pte));
379:
380: printf("\niommu%d at cpu%d: base 0x%lx length %dMB pte 0x%lx",
381: count, dev - 24, dvabase, IOMMU_SIZE, ptepa);
382: count++;
383: }
384: }
385:
386: pba->pba_dmat = &amdgart_bus_dma_tag;
387: amdgarts = count;
388:
389: return;
390:
391: err:
392: if (ex != NULL)
393: extent_destroy(ex);
394: if (scrib != NULL)
395: free(scrib, M_DEVBUF);
396: if (amdgart_softcs != NULL)
397: free(amdgart_softcs, M_DEVBUF);
398: if (!TAILQ_EMPTY(&plist))
399: uvm_pglistfree(&plist);
400: }
401:
402: void
403: amdgart_initpt(struct amdgart_softc *sc, u_long nent)
404: {
405: u_long i;
406:
407: for (i = 0; i < nent; i++)
408: sc->g_pte[i] = sc->g_scribpte;
409: amdgart_invalidate();
410: }
411:
412: int
413: amdgart_reload(struct extent *ex, bus_dmamap_t dmam)
414: {
415: int i, j, err;
416:
417: for (i = 0; i < dmam->dm_nsegs; i++) {
418: psize_t len;
419:
420: len = dmam->dm_segs[i].ds_len;
421: err = amdgart_iommu_map(dmam, ex, &dmam->dm_segs[i]);
422: if (err) {
423: for (j = 0; j < i - 1; j++)
424: amdgart_iommu_unmap(ex, &dmam->dm_segs[j]);
425: return (err);
426: }
427: }
428: return (0);
429: }
430:
431: int
432: amdgart_iommu_map(bus_dmamap_t dmam, struct extent *ex, bus_dma_segment_t *seg)
433: {
434: paddr_t base, end, idx;
435: psize_t alen;
436: u_long res;
437: int err, s;
438: u_int32_t pgno, flags;
439:
440: base = trunc_page(seg->ds_addr);
441: end = roundup(seg->ds_addr + seg->ds_len, PAGE_SIZE);
442: alen = end - base;
443:
444: s = splhigh();
445: err = extent_alloc(ex, alen, PAGE_SIZE, 0, dmam->_dm_boundary,
446: EX_NOWAIT, &res);
447: splx(s);
448: if (err) {
449: printf("GART: extent_alloc %d\n", err);
450: return (err);
451: }
452:
453: seg->ds_addr = res | (seg->ds_addr & PGOFSET);
454:
455: for (idx = 0; idx < alen; idx += PAGE_SIZE) {
456: pgno = ((res + idx) - amdgart_softcs[0].g_pa) >> PGSHIFT;
457: flags = GART_PTE_VALID | GART_PTE_COHERENT |
458: (((base + idx) >> 28) & GART_PTE_PHYSHI) |
459: ((base + idx) & GART_PTE_PHYSLO);
460: amdgart_softcs[0].g_pte[pgno] = flags;
461: }
462:
463: return (0);
464: }
465:
466: int
467: amdgart_iommu_unmap(struct extent *ex, bus_dma_segment_t *seg)
468: {
469: paddr_t base, end, idx;
470: psize_t alen;
471: int err, s;
472: u_int32_t pgno;
473:
474: base = trunc_page(seg->ds_addr);
475: end = roundup(seg->ds_addr + seg->ds_len, PAGE_SIZE);
476: alen = end - base;
477:
478: /*
479: * order is significant here; invalidate the iommu page table
480: * entries, then mark them as freed in the extent.
481: */
482:
483: for (idx = 0; idx < alen; idx += PAGE_SIZE) {
484: pgno = ((base - amdgart_softcs[0].g_pa) + idx) >> PGSHIFT;
485: amdgart_softcs[0].g_pte[pgno] = amdgart_softcs[0].g_scribpte;
486: }
487:
488: s = splhigh();
489: err = extent_free(ex, base, alen, EX_NOWAIT);
490: splx(s);
491: if (err) {
492: /* XXX Shouldn't happen, but if it does, I think we lose. */
493: printf("GART: extent_free %d\n", err);
494: return (err);
495: }
496:
497: return (0);
498: }
499:
500: int
501: amdgart_dmamap_create(bus_dma_tag_t tag, bus_size_t size, int nsegments,
502: bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
503: {
504: return (bus_dmamap_create(amdgart_softcs[0].g_dmat, size, nsegments,
505: maxsegsz, boundary, flags, dmamp));
506: }
507:
508: void
509: amdgart_dmamap_destroy(bus_dma_tag_t tag, bus_dmamap_t dmam)
510: {
511: bus_dmamap_destroy(amdgart_softcs[0].g_dmat, dmam);
512: }
513:
514: int
515: amdgart_dmamap_load(bus_dma_tag_t tag, bus_dmamap_t dmam, void *buf,
516: bus_size_t buflen, struct proc *p, int flags)
517: {
518: int err;
519:
520: err = bus_dmamap_load(amdgart_softcs[0].g_dmat, dmam, buf, buflen,
521: p, flags);
522: if (err)
523: return (err);
524: err = amdgart_reload(amdgart_softcs[0].g_ex, dmam);
525: if (err)
526: bus_dmamap_unload(amdgart_softcs[0].g_dmat, dmam);
527: else
528: amdgart_invalidate();
529: return (err);
530: }
531:
532: int
533: amdgart_dmamap_load_mbuf(bus_dma_tag_t tag, bus_dmamap_t dmam,
534: struct mbuf *chain, int flags)
535: {
536: int err;
537:
538: err = bus_dmamap_load_mbuf(amdgart_softcs[0].g_dmat, dmam,
539: chain, flags);
540: if (err)
541: return (err);
542: err = amdgart_reload(amdgart_softcs[0].g_ex, dmam);
543: if (err)
544: bus_dmamap_unload(amdgart_softcs[0].g_dmat, dmam);
545: else
546: amdgart_invalidate();
547: return (err);
548: }
549:
550: int
551: amdgart_dmamap_load_uio(bus_dma_tag_t tag, bus_dmamap_t dmam,
552: struct uio *uio, int flags)
553: {
554: int err;
555:
556: err = bus_dmamap_load_uio(amdgart_softcs[0].g_dmat, dmam, uio, flags);
557: if (err)
558: return (err);
559: err = amdgart_reload(amdgart_softcs[0].g_ex, dmam);
560: if (err)
561: bus_dmamap_unload(amdgart_softcs[0].g_dmat, dmam);
562: else
563: amdgart_invalidate();
564: return (err);
565: }
566:
567: int
568: amdgart_dmamap_load_raw(bus_dma_tag_t tag, bus_dmamap_t dmam,
569: bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
570: {
571: int err;
572:
573: err = bus_dmamap_load_raw(amdgart_softcs[0].g_dmat, dmam, segs, nsegs,
574: size, flags);
575: if (err)
576: return (err);
577: err = amdgart_reload(amdgart_softcs[0].g_ex, dmam);
578: if (err)
579: bus_dmamap_unload(amdgart_softcs[0].g_dmat, dmam);
580: else
581: amdgart_invalidate();
582: return (err);
583: }
584:
585: void
586: amdgart_dmamap_unload(bus_dma_tag_t tag, bus_dmamap_t dmam)
587: {
588: int i;
589:
590: for (i = 0; i < dmam->dm_nsegs; i++)
591: amdgart_iommu_unmap(amdgart_softcs[0].g_ex, &dmam->dm_segs[i]);
592: amdgart_invalidate();
593: bus_dmamap_unload(amdgart_softcs[0].g_dmat, dmam);
594: }
595:
596: void
597: amdgart_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_addr_t offset,
598: bus_size_t size, int ops)
599: {
600: /*
601: * XXX how do we deal with non-coherent mappings? We don't
602: * XXX allow them right now.
603: */
604: bus_dmamap_sync(amdgart_softcs[0].g_dmat, dmam, offset, size, ops);
605: }
606:
607: int
608: amdgart_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment,
609: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
610: int flags)
611: {
612: return (bus_dmamem_alloc(amdgart_softcs[0].g_dmat, size, alignment,
613: boundary, segs, nsegs, rsegs, flags));
614: }
615:
616: void
617: amdgart_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs)
618: {
619: bus_dmamem_free(amdgart_softcs[0].g_dmat, segs, nsegs);
620: }
621:
622: int
623: amdgart_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
624: size_t size, caddr_t *kvap, int flags)
625: {
626: return (bus_dmamem_map(amdgart_softcs[0].g_dmat, segs, nsegs, size,
627: kvap, flags));
628: }
629:
630: void
631: amdgart_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size)
632: {
633: bus_dmamem_unmap(amdgart_softcs[0].g_dmat, kva, size);
634: }
635:
636: paddr_t
637: amdgart_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
638: off_t off, int prot, int flags)
639: {
640: return (bus_dmamem_mmap(amdgart_softcs[0].g_dmat, segs, nsegs, off,
641: prot, flags));
642: }
CVSweb