Annotation of sys/arch/sgi/sgi/bus_dma.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: bus_dma.c,v 1.2 2007/07/18 20:03:51 miod Exp $ */
2:
3: /*
4: * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
5: *
6: * Redistribution and use in source and binary forms, with or without
7: * modification, are permitted provided that the following conditions
8: * are met:
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
14: *
15: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16: * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19: * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25: * SUCH DAMAGE.
26: *
27: */
28: #include <sys/param.h>
29: #include <sys/systm.h>
30: #include <sys/kernel.h>
31: #include <sys/proc.h>
32: #include <sys/malloc.h>
33: #include <sys/mbuf.h>
34: #include <sys/user.h>
35:
36: #include <uvm/uvm_extern.h>
37:
38: #include <mips64/archtype.h>
39: #include <machine/cpu.h>
40: #include <machine/autoconf.h>
41:
42: #include <machine/bus.h>
43:
44: /*
45: * Common function for DMA map creation. May be called by bus-specific
46: * DMA map creation functions.
47: */
48: int
49: _dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
50: bus_dma_tag_t t;
51: bus_size_t size;
52: int nsegments;
53: bus_size_t maxsegsz;
54: bus_size_t boundary;
55: int flags;
56: bus_dmamap_t *dmamp;
57: {
58: struct machine_bus_dmamap *map;
59: void *mapstore;
60: size_t mapsize;
61:
62: /*
63: * Allocate and initialize the DMA map. The end of the map
64: * is a variable-sized array of segments, so we allocate enough
65: * room for them in one shot.
66: *
67: * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
68: * of ALLOCNOW notifies others that we've reserved these resources,
69: * and they are not to be freed.
70: *
71: * The bus_dmamap_t includes one bus_dma_segment_t, hence
72: * the (nsegments - 1).
73: */
74: mapsize = sizeof(struct machine_bus_dmamap) +
75: (sizeof(bus_dma_segment_t) * (nsegments - 1));
76: if ((mapstore = malloc(mapsize, M_DEVBUF,
77: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
78: return (ENOMEM);
79:
80: bzero(mapstore, mapsize);
81: map = (struct machine_bus_dmamap *)mapstore;
82: map->_dm_size = size;
83: map->_dm_segcnt = nsegments;
84: map->_dm_maxsegsz = maxsegsz;
85: map->_dm_boundary = boundary;
86: map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
87:
88: *dmamp = map;
89: return (0);
90: }
91:
92: /*
93: * Common function for DMA map destruction. May be called by bus-specific
94: * DMA map destruction functions.
95: */
96: void
97: _dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
98: {
99: free(map, M_DEVBUF);
100: }
101:
102: /*
103: * Common function for loading a DMA map with a linear buffer. May
104: * be called by bus-specific DMA map load functions.
105: */
106: int
107: _dmamap_load(t, map, buf, buflen, p, flags)
108: bus_dma_tag_t t;
109: bus_dmamap_t map;
110: void *buf;
111: bus_size_t buflen;
112: struct proc *p;
113: int flags;
114: {
115: bus_size_t sgsize;
116: bus_addr_t curaddr, lastaddr, baddr, bmask;
117: caddr_t vaddr = buf;
118: int first, seg;
119: pmap_t pmap;
120: bus_size_t saved_buflen;
121:
122: /*
123: * Make sure that on error condition we return "no valid mappings".
124: */
125: map->dm_nsegs = 0;
126: map->dm_mapsize = 0;
127:
128: if (buflen > map->_dm_size)
129: return (EINVAL);
130:
131: if (p != NULL)
132: pmap = p->p_vmspace->vm_map.pmap;
133: else
134: pmap = pmap_kernel();
135:
136: lastaddr = ~0; /* XXX gcc */
137: bmask = ~(map->_dm_boundary - 1);
138: bmask &= t->_dma_mask;
139:
140: saved_buflen = buflen;
141: for (first = 1, seg = 0; buflen > 0; ) {
142: /*
143: * Get the physical address for this segment.
144: */
145: if (pmap_extract(pmap, (vaddr_t)vaddr, (paddr_t *)&curaddr) ==
146: FALSE)
147: panic("_dmapmap_load: pmap_extract(%x, %x) failed!",
148: pmap, vaddr);
149:
150: /*
151: * Compute the segment size, and adjust counts.
152: */
153: sgsize = NBPG - ((u_long)vaddr & PGOFSET);
154: if (buflen < sgsize)
155: sgsize = buflen;
156:
157: /*
158: * Make sure we don't cross any boundaries.
159: */
160: if (map->_dm_boundary > 0) {
161: baddr = (curaddr + map->_dm_boundary) & bmask;
162: if (sgsize > (baddr - curaddr))
163: sgsize = (baddr - curaddr);
164: }
165:
166: /*
167: * Insert chunk into a segment, coalescing with
168: * previous segment if possible.
169: */
170: if (first) {
171: map->dm_segs[seg].ds_addr =
172: (*t->_pa_to_device)(curaddr);
173: map->dm_segs[seg].ds_len = sgsize;
174: map->dm_segs[seg].ds_vaddr = (vaddr_t)vaddr;
175: first = 0;
176: } else {
177: if (curaddr == lastaddr &&
178: (map->dm_segs[seg].ds_len + sgsize) <=
179: map->_dm_maxsegsz &&
180: (map->_dm_boundary == 0 ||
181: (map->dm_segs[seg].ds_addr & bmask) ==
182: (curaddr & bmask)))
183: map->dm_segs[seg].ds_len += sgsize;
184: else {
185: if (++seg >= map->_dm_segcnt)
186: break;
187: map->dm_segs[seg].ds_addr =
188: (*t->_pa_to_device)(curaddr);
189: map->dm_segs[seg].ds_len = sgsize;
190: map->dm_segs[seg].ds_vaddr = (vaddr_t)vaddr;
191: }
192: }
193:
194: lastaddr = curaddr + sgsize;
195: vaddr += sgsize;
196: buflen -= sgsize;
197: }
198:
199: /*
200: * Did we fit?
201: */
202: if (buflen != 0)
203: return (EFBIG); /* XXX better return value here? */
204:
205: map->dm_nsegs = seg + 1;
206: map->dm_mapsize = saved_buflen;
207: return (0);
208: }
209:
210: /*
211: * Like _bus_dmamap_load(), but for mbufs.
212: */
213: int
214: _dmamap_load_mbuf(t, map, m, flags)
215: bus_dma_tag_t t;
216: bus_dmamap_t map;
217: struct mbuf *m;
218: int flags;
219: {
220: int i;
221: size_t len;
222:
223: map->dm_nsegs = 0;
224: map->dm_mapsize = 0;
225:
226: i = 0;
227: len = 0;
228: while (m) {
229: vaddr_t vaddr = mtod(m, vaddr_t);
230: long buflen = (long)m->m_len;
231:
232: len += buflen;
233: while (buflen > 0 && i < map->_dm_segcnt) {
234: paddr_t pa;
235: long incr;
236:
237: incr = min(buflen, NBPG);
238: buflen -= incr;
239: if (pmap_extract(pmap_kernel(), vaddr, &pa) == FALSE)
240: panic("_dmamap_load_mbuf: pmap_extract(%x, %x) failed!",
241: pmap_kernel(), vaddr);
242:
243: if (i > 0 && pa == (*t->_device_to_pa)(map->dm_segs[i-1].ds_addr + map->dm_segs[i-1].ds_len)
244: && ((map->dm_segs[i-1].ds_len + incr) < map->_dm_maxsegsz)) {
245: /* Hey, waddyaknow, they're contiguous */
246: map->dm_segs[i-1].ds_len += incr;
247: continue;
248: }
249: map->dm_segs[i].ds_addr =
250: (*t->_pa_to_device)(pa);
251: map->dm_segs[i].ds_vaddr = vaddr;
252: map->dm_segs[i].ds_len = incr;
253: i++;
254: vaddr += incr;
255: }
256: m = m->m_next;
257: if (m && i >= map->_dm_segcnt) {
258: /* Exceeded the size of our dmamap */
259: return EFBIG;
260: }
261: }
262: map->dm_nsegs = i;
263: map->dm_mapsize = len;
264: return (0);
265: }
266:
267: /*
268: * Like _dmamap_load(), but for uios.
269: */
270: int
271: _dmamap_load_uio(t, map, uio, flags)
272: bus_dma_tag_t t;
273: bus_dmamap_t map;
274: struct uio *uio;
275: int flags;
276: {
277: return (EOPNOTSUPP);
278: }
279:
280: /*
281: * Like _dmamap_load(), but for raw memory allocated with
282: * bus_dmamem_alloc().
283: */
284: int
285: _dmamap_load_raw(t, map, segs, nsegs, size, flags)
286: bus_dma_tag_t t;
287: bus_dmamap_t map;
288: bus_dma_segment_t *segs;
289: int nsegs;
290: bus_size_t size;
291: int flags;
292: {
293: if (nsegs > map->_dm_segcnt || size > map->_dm_size)
294: return (EINVAL);
295:
296: /*
297: * Make sure we don't cross any boundaries.
298: */
299: if (map->_dm_boundary) {
300: bus_addr_t bmask = ~(map->_dm_boundary - 1);
301: int i;
302:
303: bmask &= t->_dma_mask;
304: for (i = 0; i < nsegs; i++) {
305: if (segs[i].ds_len > map->_dm_maxsegsz)
306: return (EINVAL);
307: if ((segs[i].ds_addr & bmask) !=
308: ((segs[i].ds_addr + segs[i].ds_len - 1) & bmask))
309: return (EINVAL);
310: }
311: }
312:
313: bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
314: map->dm_nsegs = nsegs;
315: map->dm_mapsize = size;
316: return (0);
317: }
318:
319: /*
320: * Common function for unloading a DMA map. May be called by
321: * bus-specific DMA map unload functions.
322: */
323: void
324: _dmamap_unload(t, map)
325: bus_dma_tag_t t;
326: bus_dmamap_t map;
327: {
328:
329: /*
330: * No resources to free; just mark the mappings as
331: * invalid.
332: */
333: map->dm_nsegs = 0;
334: map->dm_mapsize = 0;
335: }
336:
337: /*
338: * Common function for DMA map synchronization. May be called
339: * by bus-specific DMA map synchronization functions.
340: */
341: void
342: _dmamap_sync(t, map, addr, size, op)
343: bus_dma_tag_t t;
344: bus_dmamap_t map;
345: bus_addr_t addr;
346: bus_size_t size;
347: int op;
348: {
349: #define SYNC_R 0 /* WB invalidate, WT invalidate */
350: #define SYNC_W 1 /* WB writeback + invalidate, WT unaffected */
351: #define SYNC_X 2 /* WB writeback + invalidate, WT invalidate */
352: int nsegs;
353: int curseg;
354:
355: nsegs = map->dm_nsegs;
356: curseg = 0;
357:
358: #ifdef DEBUG_BUSDMASYNC
359: printf("dmasync %p:%p:%p:", map, addr, size);
360: if (op & BUS_DMASYNC_PREWRITE) printf("PRW ");
361: if (op & BUS_DMASYNC_PREREAD) printf("PRR ");
362: if (op & BUS_DMASYNC_POSTWRITE) printf("POW ");
363: if (op & BUS_DMASYNC_POSTREAD) printf("POR ");
364: printf("\n");
365: #endif
366:
367: while (size && nsegs) {
368: bus_addr_t vaddr;
369: bus_size_t ssize;
370:
371: ssize = map->dm_segs[curseg].ds_len;
372: vaddr = map->dm_segs[curseg].ds_vaddr;
373:
374: if (addr != 0) {
375: if (addr >= ssize) {
376: addr -= ssize;
377: ssize = 0;
378: } else {
379: vaddr += addr;
380: ssize -= addr;
381: addr = 0;
382: }
383: }
384: if (ssize > size)
385: ssize = size;
386:
387: if (IS_XKPHYS(vaddr) && XKPHYS_TO_CCA(vaddr) == CCA_NC) {
388: size -= ssize;
389: ssize = 0;
390: }
391:
392: if (ssize != 0) {
393: #ifdef DEBUG_BUSDMASYNC_FRAG
394: printf(" syncing %p:%p ", vaddr, ssize);
395: if (op & BUS_DMASYNC_PREWRITE) printf("PRW ");
396: if (op & BUS_DMASYNC_PREREAD) printf("PRR ");
397: if (op & BUS_DMASYNC_POSTWRITE) printf("POW ");
398: if (op & BUS_DMASYNC_POSTREAD) printf("POR ");
399: printf("\n");
400: #endif
401: /*
402: * If only PREWRITE is requested, writeback and
403: * invalidate. PREWRITE with PREREAD writebacks
404: * and invalidates *all* cache levels.
405: * Otherwise, just invalidate.
406: * POSTREAD and POSTWRITE are no-ops since
407: * we are not bouncing data.
408: */
409: if (op & BUS_DMASYNC_PREWRITE) {
410: if (op & BUS_DMASYNC_PREREAD)
411: Mips_IOSyncDCache(vaddr, ssize, SYNC_X);
412: else
413: Mips_IOSyncDCache(vaddr, ssize, SYNC_W);
414: } else if (op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTREAD)) {
415: Mips_IOSyncDCache(vaddr, ssize, SYNC_R);
416: }
417: size -= ssize;
418: }
419: curseg++;
420: nsegs--;
421: }
422:
423: if (size != 0) {
424: panic("_dmamap_sync: ran off map!");
425: }
426: }
427:
428: /*
429: * Common function for DMA-safe memory allocation. May be called
430: * by bus-specific DMA memory allocation functions.
431: */
432: int
433: _dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
434: bus_dma_tag_t t;
435: bus_size_t size, alignment, boundary;
436: bus_dma_segment_t *segs;
437: int nsegs;
438: int *rsegs;
439: int flags;
440: {
441: return (_dmamem_alloc_range(t, size, alignment, boundary,
442: segs, nsegs, rsegs, flags, 0, 0xf0000000));
443: }
444:
445: /*
446: * Common function for freeing DMA-safe memory. May be called by
447: * bus-specific DMA memory free functions.
448: */
449: void
450: _dmamem_free(t, segs, nsegs)
451: bus_dma_tag_t t;
452: bus_dma_segment_t *segs;
453: int nsegs;
454: {
455: vm_page_t m;
456: bus_addr_t addr;
457: struct pglist mlist;
458: int curseg;
459:
460: /*
461: * Build a list of pages to free back to the VM system.
462: */
463: TAILQ_INIT(&mlist);
464: for (curseg = 0; curseg < nsegs; curseg++) {
465: for (addr = segs[curseg].ds_addr;
466: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
467: addr += PAGE_SIZE) {
468: m = PHYS_TO_VM_PAGE((*t->_device_to_pa)(addr));
469: TAILQ_INSERT_TAIL(&mlist, m, pageq);
470: }
471: }
472:
473: uvm_pglistfree(&mlist);
474: }
475:
476: /*
477: * Common function for mapping DMA-safe memory. May be called by
478: * bus-specific DMA memory map functions.
479: */
480: int
481: _dmamem_map(t, segs, nsegs, size, kvap, flags)
482: bus_dma_tag_t t;
483: bus_dma_segment_t *segs;
484: int nsegs;
485: size_t size;
486: caddr_t *kvap;
487: int flags;
488: {
489: vaddr_t va;
490: paddr_t pa;
491: bus_addr_t addr;
492: int curseg;
493:
494: if (nsegs == 1) {
495: if (flags & BUS_DMA_COHERENT)
496: *kvap = (caddr_t)PHYS_TO_XKPHYS(segs[0].ds_addr,
497: CCA_NC);
498: else
499: *kvap = (caddr_t)PHYS_TO_XKPHYS(segs[0].ds_addr,
500: CCA_NONCOHERENT);
501: return (0);
502: }
503:
504: size = round_page(size);
505: va = uvm_km_valloc(kernel_map, size);
506: if (va == 0)
507: return (ENOMEM);
508:
509: *kvap = (caddr_t)va;
510:
511: for (curseg = 0; curseg < nsegs; curseg++) {
512: for (addr = segs[curseg].ds_addr;
513: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
514: addr += NBPG, va += NBPG, size -= NBPG) {
515: if (size == 0)
516: panic("_dmamem_map: size botch");
517: pa = (*t->_device_to_pa)(addr);
518: pmap_enter(pmap_kernel(), va, pa,
519: VM_PROT_READ | VM_PROT_WRITE,
520: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
521: segs[curseg].ds_vaddr = va;
522:
523: if (flags & BUS_DMA_COHERENT &&
524: sys_config.system_type == SGI_O2)
525: pmap_page_cache(PHYS_TO_VM_PAGE(pa),
526: PV_UNCACHED);
527: }
528: pmap_update(pmap_kernel());
529: }
530:
531: return (0);
532: }
533:
534: /*
535: * Common function for unmapping DMA-safe memory. May be called by
536: * bus-specific DMA memory unmapping functions.
537: */
538: void
539: _dmamem_unmap(t, kva, size)
540: bus_dma_tag_t t;
541: caddr_t kva;
542: size_t size;
543: {
544: if (IS_XKPHYS((vaddr_t)kva))
545: return;
546:
547: size = round_page(size);
548: pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
549: pmap_update(pmap_kernel());
550: uvm_km_free(kernel_map, (vaddr_t)kva, size);
551: }
552:
553: /*
554: * Common function for mmap(2)'ing DMA-safe memory. May be called by
555: * bus-specific DMA mmap(2)'ing functions.
556: */
557: paddr_t
558: _dmamem_mmap(t, segs, nsegs, off, prot, flags)
559: bus_dma_tag_t t;
560: bus_dma_segment_t *segs;
561: int nsegs;
562: off_t off;
563: int prot, flags;
564: {
565: int i;
566:
567: for (i = 0; i < nsegs; i++) {
568: #ifdef DIAGNOSTIC
569: if (off & PGOFSET)
570: panic("_dmamem_mmap: offset unaligned");
571: if (segs[i].ds_addr & PGOFSET)
572: panic("_dmamem_mmap: segment unaligned");
573: if (segs[i].ds_len & PGOFSET)
574: panic("_dmamem_mmap: segment size not multiple"
575: " of page size");
576: #endif
577: if (off >= segs[i].ds_len) {
578: off -= segs[i].ds_len;
579: continue;
580: }
581:
582: return (atop((*t->_device_to_pa)(segs[i].ds_addr) + off));
583: }
584:
585: /* Page not found. */
586: return (-1);
587: }
588:
589: /**********************************************************************
590: * DMA utility functions
591: **********************************************************************/
592:
593: /*
594: * Allocate physical memory from the given physical address range.
595: * Called by DMA-safe memory allocation methods.
596: */
597: int
598: _dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
599: flags, low, high)
600: bus_dma_tag_t t;
601: bus_size_t size, alignment, boundary;
602: bus_dma_segment_t *segs;
603: int nsegs;
604: int *rsegs;
605: int flags;
606: vaddr_t low;
607: vaddr_t high;
608: {
609: vaddr_t curaddr, lastaddr;
610: vm_page_t m;
611: struct pglist mlist;
612: int curseg, error;
613:
614: /* Always round the size. */
615: size = round_page(size);
616:
617: /*
618: * Allocate pages from the VM system.
619: */
620: TAILQ_INIT(&mlist);
621: error = uvm_pglistalloc(size, low, high,
622: alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
623: if (error)
624: return (error);
625:
626: /*
627: * Compute the location, size, and number of segments actually
628: * returned by the VM code.
629: */
630: m = TAILQ_FIRST(&mlist);
631: curseg = 0;
632: lastaddr = segs[curseg].ds_addr =
633: (*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
634: segs[curseg].ds_len = PAGE_SIZE;
635: m = TAILQ_NEXT(m, pageq);
636:
637: for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
638: curaddr = (*t->_pa_to_device)(VM_PAGE_TO_PHYS(m));
639: #ifdef DIAGNOSTIC
640: if (curaddr < low || curaddr >= high) {
641: printf("vm_page_alloc_memory returned non-sensical"
642: " address 0x%lx\n", curaddr);
643: panic("_dmamem_alloc_range");
644: }
645: #endif
646: if (curaddr == (lastaddr + PAGE_SIZE))
647: segs[curseg].ds_len += PAGE_SIZE;
648: else {
649: curseg++;
650: segs[curseg].ds_addr = curaddr;
651: segs[curseg].ds_len = PAGE_SIZE;
652: }
653: lastaddr = curaddr;
654: }
655:
656: *rsegs = curseg + 1;
657:
658: return (0);
659: }
CVSweb