Annotation of sys/arch/mvme88k/mvme88k/bus_dma.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: bus_dma.c,v 1.5 2007/02/11 12:49:38 miod Exp $ */
2: /* $NetBSD: bus_dma.c,v 1.2 2001/06/10 02:31:25 briggs Exp $ */
3:
4: /*-
5: * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: #include <sys/param.h>
42: #include <sys/proc.h>
43: #include <sys/user.h>
44: #include <sys/extent.h>
45: #include <sys/buf.h>
46: #include <sys/device.h>
47: #include <sys/systm.h>
48: #include <sys/conf.h>
49: #include <sys/file.h>
50: #include <sys/malloc.h>
51: #include <sys/mbuf.h>
52: #include <sys/mount.h>
53:
54: #include <uvm/uvm_extern.h>
55:
56: #include <machine/bus.h>
57: #include <machine/cmmu.h>
58: #include <machine/intr.h>
59:
60: int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
61: bus_size_t, struct proc *, int, paddr_t *, int *, int);
62:
63: int _bus_dmamem_alloc_range(bus_dma_tag_t, bus_size_t, bus_size_t,
64: bus_size_t, bus_dma_segment_t *, int, int *, int, paddr_t, paddr_t);
65:
66: /*
67: * Common function for DMA map creation. May be called by bus-specific
68: * DMA map creation functions.
69: */
70: int
71: bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
72: bus_dma_tag_t t;
73: bus_size_t size;
74: int nsegments;
75: bus_size_t maxsegsz;
76: bus_size_t boundary;
77: int flags;
78: bus_dmamap_t *dmamp;
79: {
80: struct m88k_bus_dmamap *map;
81: void *mapstore;
82: size_t mapsize;
83:
84: /*
85: * Allocate and initialize the DMA map. The end of the map
86: * is a variable-sized array of segments, so we allocate enough
87: * room for them in one shot.
88: *
89: * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
90: * of ALLOCNOW notifies others that we've reserved these resources,
91: * and they are not to be freed.
92: *
93: * The bus_dmamap_t includes one bus_dma_segment_t, hence
94: * the (nsegments - 1).
95: */
96: mapsize = sizeof(struct m88k_bus_dmamap) +
97: (sizeof(bus_dma_segment_t) * (nsegments - 1));
98: if ((mapstore = malloc(mapsize, M_DEVBUF,
99: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
100: return (ENOMEM);
101:
102: memset(mapstore, 0, mapsize);
103: map = (struct m88k_bus_dmamap *)mapstore;
104: map->_dm_size = size;
105: map->_dm_segcnt = nsegments;
106: map->_dm_maxsegsz = maxsegsz;
107: map->_dm_boundary = boundary;
108: map->dm_mapsize = 0; /* no valid mappings */
109: map->dm_nsegs = 0;
110:
111: *dmamp = map;
112: return (0);
113: }
114:
115: /*
116: * Common function for DMA map destruction. May be called by bus-specific
117: * DMA map destruction functions.
118: */
119: void
120: bus_dmamap_destroy(t, map)
121: bus_dma_tag_t t;
122: bus_dmamap_t map;
123: {
124:
125: free(map, M_DEVBUF);
126: }
127:
128: /*
129: * Utility function to load a linear buffer. lastaddrp holds state
130: * between invocations (for multiple-buffer loads). segp contains
131: * the starting segment on entrance, and the ending segment on exit.
132: * first indicates if this is the first invocation of this function.
133: */
134: int
135: _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
136: bus_dma_tag_t t;
137: bus_dmamap_t map;
138: void *buf;
139: bus_size_t buflen;
140: struct proc *p;
141: int flags;
142: paddr_t *lastaddrp;
143: int *segp;
144: int first;
145: {
146: bus_size_t sgsize;
147: bus_addr_t curaddr, lastaddr, baddr, bmask;
148: vaddr_t vaddr = (vaddr_t)buf;
149: int seg;
150: pmap_t pmap;
151:
152: if (p != NULL)
153: pmap = vm_map_pmap(&p->p_vmspace->vm_map);
154: else
155: pmap = pmap_kernel();
156:
157: lastaddr = *lastaddrp;
158: bmask = ~(map->_dm_boundary - 1);
159:
160: for (seg = *segp; buflen > 0 ; ) {
161: /*
162: * Get the physical address for this segment.
163: */
164: if (pmap_extract(pmap, vaddr, (paddr_t *)&curaddr) == FALSE)
165: return (EINVAL);
166:
167: /*
168: * Compute the segment size, and adjust counts.
169: */
170: sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
171: if (buflen < sgsize)
172: sgsize = buflen;
173:
174: /*
175: * Make sure we don't cross any boundaries.
176: */
177: if (map->_dm_boundary > 0) {
178: baddr = (curaddr + map->_dm_boundary) & bmask;
179: if (sgsize > (baddr - curaddr))
180: sgsize = (baddr - curaddr);
181: }
182:
183: /*
184: * Insert chunk into a segment, coalescing with
185: * the previous segment if possible.
186: */
187: if (first) {
188: map->dm_segs[seg].ds_addr = curaddr;
189: map->dm_segs[seg].ds_len = sgsize;
190: first = 0;
191: } else {
192: if (curaddr == lastaddr &&
193: (map->dm_segs[seg].ds_len + sgsize) <=
194: map->_dm_maxsegsz &&
195: (map->_dm_boundary == 0 ||
196: (map->dm_segs[seg].ds_addr & bmask) ==
197: (curaddr & bmask)))
198: map->dm_segs[seg].ds_len += sgsize;
199: else {
200: if (++seg >= map->_dm_segcnt)
201: break;
202: map->dm_segs[seg].ds_addr = curaddr;
203: map->dm_segs[seg].ds_len = sgsize;
204: }
205: }
206:
207: lastaddr = curaddr + sgsize;
208: vaddr += sgsize;
209: buflen -= sgsize;
210: }
211:
212: *segp = seg;
213: *lastaddrp = lastaddr;
214:
215: /*
216: * Did we fit?
217: */
218: if (buflen != 0)
219: return (EFBIG); /* XXX better return value here? */
220:
221: return (0);
222: }
223:
224: /*
225: * Common function for loading a DMA map with a linear buffer. May
226: * be called by bus-specific DMA map load functions.
227: */
228: int
229: bus_dmamap_load(t, map, buf, buflen, p, flags)
230: bus_dma_tag_t t;
231: bus_dmamap_t map;
232: void *buf;
233: bus_size_t buflen;
234: struct proc *p;
235: int flags;
236: {
237: paddr_t lastaddr;
238: int seg, error;
239:
240: /*
241: * Make sure that on error condition we return "no valid mappings".
242: */
243: map->dm_mapsize = 0;
244: map->dm_nsegs = 0;
245:
246: if (buflen > map->_dm_size)
247: return (EINVAL);
248:
249: seg = 0;
250: error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
251: &lastaddr, &seg, 1);
252: if (error == 0) {
253: map->dm_mapsize = buflen;
254: map->dm_nsegs = seg + 1;
255: }
256: return (error);
257: }
258:
259: /*
260: * Like _bus_dmamap_load(), but for mbufs.
261: */
262: int
263: bus_dmamap_load_mbuf(t, map, m0, flags)
264: bus_dma_tag_t t;
265: bus_dmamap_t map;
266: struct mbuf *m0;
267: int flags;
268: {
269: paddr_t lastaddr;
270: int seg, error, first;
271: struct mbuf *m;
272:
273: /*
274: * Make sure that on error condition we return "no valid mappings."
275: */
276: map->dm_mapsize = 0;
277: map->dm_nsegs = 0;
278:
279: #ifdef DIAGNOSTIC
280: if ((m0->m_flags & M_PKTHDR) == 0)
281: panic("bus_dmamap_load_mbuf: no packet header");
282: #endif
283:
284: if (m0->m_pkthdr.len > map->_dm_size)
285: return (EINVAL);
286:
287: first = 1;
288: seg = 0;
289: error = 0;
290: for (m = m0; m != NULL && error == 0; m = m->m_next) {
291: if (m->m_len == 0)
292: continue;
293: error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
294: NULL, flags, &lastaddr, &seg, first);
295: first = 0;
296: }
297: if (error == 0) {
298: map->dm_mapsize = m0->m_pkthdr.len;
299: map->dm_nsegs = seg + 1;
300: }
301: return (error);
302: }
303:
304: /*
305: * Like _bus_dmamap_load(), but for uios.
306: */
307: int
308: bus_dmamap_load_uio(t, map, uio, flags)
309: bus_dma_tag_t t;
310: bus_dmamap_t map;
311: struct uio *uio;
312: int flags;
313: {
314: paddr_t lastaddr;
315: int seg, i, error, first;
316: bus_size_t minlen, resid;
317: struct proc *p = NULL;
318: struct iovec *iov;
319: caddr_t addr;
320:
321: /*
322: * Make sure that on error condition we return "no valid mappings."
323: */
324: map->dm_mapsize = 0;
325: map->dm_nsegs = 0;
326:
327: resid = uio->uio_resid;
328: iov = uio->uio_iov;
329:
330: if (resid > map->_dm_size)
331: return (EINVAL);
332:
333: if (uio->uio_segflg == UIO_USERSPACE) {
334: p = uio->uio_procp;
335: #ifdef DIAGNOSTIC
336: if (p == NULL)
337: panic("bus_dmamap_load_uio: USERSPACE but no proc");
338: #endif
339: }
340:
341: first = 1;
342: seg = 0;
343: error = 0;
344: for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
345: /*
346: * Now at the first iovec to load. Load each iovec
347: * until we have exhausted the residual count.
348: */
349: minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
350: addr = (caddr_t)iov[i].iov_base;
351:
352: error = _bus_dmamap_load_buffer(t, map, addr, minlen,
353: p, flags, &lastaddr, &seg, first);
354: first = 0;
355:
356: resid -= minlen;
357: }
358: if (error == 0) {
359: map->dm_mapsize = uio->uio_resid;
360: map->dm_nsegs = seg + 1;
361: }
362: return (error);
363: }
364:
365: /*
366: * Like bus_dmamap_load(), but for raw memory allocated with
367: * bus_dmamem_alloc().
368: */
369: int
370: bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
371: bus_dma_tag_t t;
372: bus_dmamap_t map;
373: bus_dma_segment_t *segs;
374: int nsegs;
375: bus_size_t size;
376: int flags;
377: {
378: if (nsegs > map->_dm_segcnt || size > map->_dm_size)
379: return (EINVAL);
380:
381: /*
382: * Make sure we don't cross any boundaries.
383: */
384: if (map->_dm_boundary) {
385: bus_addr_t bmask = ~(map->_dm_boundary - 1);
386: int i;
387:
388: for (i = 0; i < nsegs; i++) {
389: if (segs[i].ds_len > map->_dm_maxsegsz)
390: return (EINVAL);
391: if ((segs[i].ds_addr & bmask) !=
392: ((segs[i].ds_addr + segs[i].ds_len - 1) & bmask))
393: return (EINVAL);
394: }
395: }
396:
397: bcopy(segs, map->dm_segs, nsegs * sizeof(*segs));
398: map->dm_nsegs = nsegs;
399: return (0);
400: }
401:
402: /*
403: * Common function for unloading a DMA map. May be called by
404: * chipset-specific DMA map unload functions.
405: */
406: void
407: bus_dmamap_unload(t, map)
408: bus_dma_tag_t t;
409: bus_dmamap_t map;
410: {
411:
412: /*
413: * No resources to free; just mark the mappings as
414: * invalid.
415: */
416: map->dm_mapsize = 0;
417: map->dm_nsegs = 0;
418: }
419:
420: /*
421: * Common function for DMA map synchronization. May be called
422: * by chipset-specific DMA map synchronization functions.
423: */
424:
425: void
426: bus_dmamap_sync(t, map, offset, len, op)
427: bus_dma_tag_t t;
428: bus_dmamap_t map;
429: bus_addr_t offset;
430: bus_size_t len;
431: int op;
432: {
433: u_int nsegs;
434: bus_dma_segment_t *seg;
435:
436: if (op & BUS_DMASYNC_PREREAD)
437: op = DMA_CACHE_SYNC_INVAL;
438: else if (op & BUS_DMASYNC_PREWRITE)
439: op = DMA_CACHE_SYNC;
440: else if (op & BUS_DMASYNC_POSTREAD)
441: op = DMA_CACHE_INV;
442: else
443: return;
444:
445: nsegs = map->dm_nsegs;
446: seg = map->dm_segs;
447: while (nsegs != 0 && len != 0) {
448: if (offset >= seg->ds_len) {
449: offset -= seg->ds_len;
450: } else {
451: bus_addr_t addr;
452: bus_size_t sublen;
453:
454: addr = seg->ds_addr + offset;
455: sublen = seg->ds_len - offset;
456: if (sublen > len)
457: sublen = len;
458:
459: dma_cachectl_pa(addr, sublen, op);
460:
461: offset = 0;
462: len -= sublen;
463: }
464: seg++;
465: nsegs--;
466: }
467: }
468:
469: /*
470: * Common function for DMA-safe memory allocation. May be called
471: * by bus-specific DMA memory allocation functions.
472: */
473: int
474: bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
475: bus_dma_tag_t t;
476: bus_size_t size, alignment, boundary;
477: bus_dma_segment_t *segs;
478: int nsegs;
479: int *rsegs;
480: int flags;
481: {
482: paddr_t avail_start = (paddr_t)-1, avail_end = 0;
483: int bank;
484:
485: for (bank = 0; bank < vm_nphysseg; bank++) {
486: if (avail_start > vm_physmem[bank].avail_start << PGSHIFT)
487: avail_start = vm_physmem[bank].avail_start << PGSHIFT;
488: if (avail_end < vm_physmem[bank].avail_end << PGSHIFT)
489: avail_end = vm_physmem[bank].avail_end << PGSHIFT;
490: }
491:
492: return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,
493: nsegs, rsegs, flags, avail_start, avail_end - PAGE_SIZE);
494: }
495:
496: /*
497: * Common function for freeing DMA-safe memory. May be called by
498: * bus-specific DMA memory free functions.
499: */
500: void
501: bus_dmamem_free(t, segs, nsegs)
502: bus_dma_tag_t t;
503: bus_dma_segment_t *segs;
504: int nsegs;
505: {
506: struct vm_page *m;
507: bus_addr_t addr;
508: struct pglist mlist;
509: int curseg;
510:
511: /*
512: * Build a list of pages to free back to the VM system.
513: */
514: TAILQ_INIT(&mlist);
515: for (curseg = 0; curseg < nsegs; curseg++) {
516: for (addr = segs[curseg].ds_addr;
517: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
518: addr += PAGE_SIZE) {
519: m = PHYS_TO_VM_PAGE(addr);
520: TAILQ_INSERT_TAIL(&mlist, m, pageq);
521: }
522: }
523:
524: uvm_pglistfree(&mlist);
525: }
526:
527: /*
528: * Common function for mapping DMA-safe memory. May be called by
529: * bus-specific DMA memory map functions.
530: */
531: int
532: bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
533: bus_dma_tag_t t;
534: bus_dma_segment_t *segs;
535: int nsegs;
536: size_t size;
537: caddr_t *kvap;
538: int flags;
539: {
540: vaddr_t va;
541: bus_addr_t addr;
542: int curseg;
543:
544: size = round_page(size);
545:
546: va = uvm_km_valloc(kernel_map, size);
547:
548: if (va == 0)
549: return (ENOMEM);
550:
551: *kvap = (caddr_t)va;
552:
553: for (curseg = 0; curseg < nsegs; curseg++) {
554: for (addr = segs[curseg].ds_addr;
555: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
556: addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
557: if (size == 0)
558: panic("bus_dmamem_map: size botch");
559: pmap_enter(pmap_kernel(), va, addr,
560: VM_PROT_READ | VM_PROT_WRITE,
561: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
562: }
563: }
564: pmap_update(pmap_kernel());
565:
566: return (0);
567: }
568:
569: /*
570: * Common function for unmapping DMA-safe memory. May be called by
571: * bus-specific DMA memory unmapping functions.
572: */
573: void
574: bus_dmamem_unmap(t, kva, size)
575: bus_dma_tag_t t;
576: caddr_t kva;
577: size_t size;
578: {
579:
580: #ifdef DIAGNOSTIC
581: if ((u_long)kva & PGOFSET)
582: panic("bus_dmamem_unmap");
583: #endif
584:
585: size = round_page(size);
586: uvm_km_free(kernel_map, (vaddr_t)kva, size);
587: }
588:
589: /*
590: * Common functin for mmap(2)'ing DMA-safe memory. May be called by
591: * bus-specific DMA mmap(2)'ing functions.
592: */
593: paddr_t
594: bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
595: bus_dma_tag_t t;
596: bus_dma_segment_t *segs;
597: int nsegs;
598: off_t off;
599: int prot, flags;
600: {
601: int i;
602:
603: for (i = 0; i < nsegs; i++) {
604: #ifdef DIAGNOSTIC
605: if (off & PGOFSET)
606: panic("bus_dmamem_mmap: offset unaligned");
607: if (segs[i].ds_addr & PGOFSET)
608: panic("bus_dmamem_mmap: segment unaligned");
609: if (segs[i].ds_len & PGOFSET)
610: panic("bus_dmamem_mmap: segment size not multiple"
611: " of page size");
612: #endif
613: if (off >= segs[i].ds_len) {
614: off -= segs[i].ds_len;
615: continue;
616: }
617:
618: return (atop(segs[i].ds_addr + off));
619: }
620:
621: /* Page not found. */
622: return (-1);
623: }
624:
625: /*
626: * Allocate physical memory from the given physical address range.
627: * Called by DMA-safe memory allocation methods.
628: */
629: int
630: _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
631: flags, low, high)
632: bus_dma_tag_t t;
633: bus_size_t size, alignment, boundary;
634: bus_dma_segment_t *segs;
635: int nsegs;
636: int *rsegs;
637: int flags;
638: paddr_t low;
639: paddr_t high;
640: {
641: paddr_t curaddr, lastaddr;
642: struct vm_page *m;
643: struct pglist mlist;
644: int curseg, error;
645:
646: /* Always round the size. */
647: size = round_page(size);
648:
649: /*
650: * Allocate pages from the VM system.
651: */
652: TAILQ_INIT(&mlist);
653: error = uvm_pglistalloc(size, low, high, alignment, boundary,
654: &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
655: if (error)
656: return (error);
657:
658: /*
659: * Compute the location, size, and number of segments actually
660: * returned by the VM code.
661: */
662: m = TAILQ_FIRST(&mlist);
663: curseg = 0;
664: lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
665: segs[curseg].ds_len = PAGE_SIZE;
666: m = TAILQ_NEXT(m, pageq);
667:
668: for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
669: curaddr = VM_PAGE_TO_PHYS(m);
670: #ifdef DIAGNOSTIC
671: if (curaddr < low || curaddr >= high) {
672: panic("_bus_dmamem_alloc_range: uvm_pglistalloc "
673: "returned non-sensical address 0x%lx\n", curaddr);
674: }
675: #endif
676: if (curaddr == (lastaddr + PAGE_SIZE))
677: segs[curseg].ds_len += PAGE_SIZE;
678: else {
679: curseg++;
680: segs[curseg].ds_addr = curaddr;
681: segs[curseg].ds_len = PAGE_SIZE;
682: }
683: lastaddr = curaddr;
684: }
685:
686: *rsegs = curseg + 1;
687:
688: return (0);
689: }
CVSweb