Annotation of sys/arch/arm/arm/bus_dma.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: bus_dma.c,v 1.10 2007/05/29 21:00:50 jason Exp $ */
2: /* $NetBSD: bus_dma.c,v 1.38 2003/10/30 08:44:13 scw Exp $ */
3:
4: /*-
5: * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the NetBSD
23: * Foundation, Inc. and its contributors.
24: * 4. Neither the name of The NetBSD Foundation nor the names of its
25: * contributors may be used to endorse or promote products derived
26: * from this software without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38: * POSSIBILITY OF SUCH DAMAGE.
39: */
40:
41: #define _ARM32_BUS_DMA_PRIVATE
42:
43: #include <sys/param.h>
44: #include <sys/systm.h>
45: #include <sys/kernel.h>
46: #include <sys/proc.h>
47: #include <sys/buf.h>
48: #include <sys/reboot.h>
49: #include <sys/conf.h>
50: #include <sys/file.h>
51: #include <sys/malloc.h>
52: #include <sys/mbuf.h>
53: #include <sys/vnode.h>
54: #include <sys/device.h>
55:
56: #include <uvm/uvm_extern.h>
57:
58: #include <machine/bus.h>
59: #include <machine/cpu.h>
60:
61: #include <arm/cpufunc.h>
62:
63: int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
64: bus_size_t, struct proc *, int, paddr_t *, int *, int);
65: struct arm32_dma_range *_bus_dma_inrange(struct arm32_dma_range *,
66: int, bus_addr_t);
67:
68: /*
69: * Check to see if the specified page is in an allowed DMA range.
70: */
71: __inline struct arm32_dma_range *
72: _bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
73: bus_addr_t curaddr)
74: {
75: struct arm32_dma_range *dr;
76: int i;
77:
78: for (i = 0, dr = ranges; i < nranges; i++, dr++) {
79: if (curaddr >= dr->dr_sysbase &&
80: round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
81: return (dr);
82: }
83:
84: return (NULL);
85: }
86:
87: /*
88: * Common function for DMA map creation. May be called by bus-specific
89: * DMA map creation functions.
90: */
91: int
92: _bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
93: bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
94: {
95: struct arm32_bus_dmamap *map;
96: void *mapstore;
97: size_t mapsize;
98:
99: #ifdef DEBUG_DMA
100: printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
101: t, size, nsegments, maxsegsz, boundary, flags);
102: #endif /* DEBUG_DMA */
103:
104: /*
105: * Allocate and initialize the DMA map. The end of the map
106: * is a variable-sized array of segments, so we allocate enough
107: * room for them in one shot.
108: *
109: * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
110: * of ALLOCNOW notifies others that we've reserved these resources,
111: * and they are not to be freed.
112: *
113: * The bus_dmamap_t includes one bus_dma_segment_t, hence
114: * the (nsegments - 1).
115: */
116: mapsize = sizeof(struct arm32_bus_dmamap) +
117: (sizeof(bus_dma_segment_t) * (nsegments - 1));
118: if ((mapstore = malloc(mapsize, M_DEVBUF,
119: (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
120: return (ENOMEM);
121:
122: memset(mapstore, 0, mapsize);
123: map = (struct arm32_bus_dmamap *)mapstore;
124: map->_dm_size = size;
125: map->_dm_segcnt = nsegments;
126: map->_dm_maxsegsz = maxsegsz;
127: map->_dm_boundary = boundary;
128: map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
129: map->_dm_origbuf = NULL;
130: map->_dm_buftype = ARM32_BUFTYPE_INVALID;
131: map->_dm_proc = NULL;
132: map->dm_mapsize = 0; /* no valid mappings */
133: map->dm_nsegs = 0;
134:
135: *dmamp = map;
136: #ifdef DEBUG_DMA
137: printf("dmamap_create:map=%p\n", map);
138: #endif /* DEBUG_DMA */
139: return (0);
140: }
141:
142: /*
143: * Common function for DMA map destruction. May be called by bus-specific
144: * DMA map destruction functions.
145: */
146: void
147: _bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
148: {
149:
150: #ifdef DEBUG_DMA
151: printf("dmamap_destroy: t=%p map=%p\n", t, map);
152: #endif /* DEBUG_DMA */
153:
154: /*
155: * Explicit unload.
156: */
157: map->dm_mapsize = 0;
158: map->dm_nsegs = 0;
159: map->_dm_origbuf = NULL;
160: map->_dm_buftype = ARM32_BUFTYPE_INVALID;
161: map->_dm_proc = NULL;
162:
163: free(map, M_DEVBUF);
164: }
165:
166: /*
167: * Common function for loading a DMA map with a linear buffer. May
168: * be called by bus-specific DMA map load functions.
169: */
170: int
171: _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
172: bus_size_t buflen, struct proc *p, int flags)
173: {
174: paddr_t lastaddr;
175: int seg, error;
176:
177: #ifdef DEBUG_DMA
178: printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
179: t, map, buf, buflen, p, flags);
180: #endif /* DEBUG_DMA */
181:
182: /*
183: * Make sure that on error condition we return "no valid mappings".
184: */
185: map->dm_mapsize = 0;
186: map->dm_nsegs = 0;
187:
188: if (buflen > map->_dm_size)
189: return (EINVAL);
190:
191: /* _bus_dmamap_load_buffer() clears this if we're not... */
192: map->_dm_flags |= ARM32_DMAMAP_COHERENT;
193:
194: seg = 0;
195: error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
196: &lastaddr, &seg, 1);
197: if (error == 0) {
198: map->dm_mapsize = buflen;
199: map->dm_nsegs = seg + 1;
200: map->_dm_origbuf = buf;
201: map->_dm_buftype = ARM32_BUFTYPE_LINEAR;
202: map->_dm_proc = p;
203: }
204: #ifdef DEBUG_DMA
205: printf("dmamap_load: error=%d\n", error);
206: #endif /* DEBUG_DMA */
207: return (error);
208: }
209:
210: /*
211: * Like _bus_dmamap_load(), but for mbufs.
212: */
213: int
214: _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
215: int flags)
216: {
217: #if 0
218: struct arm32_dma_range *dr;
219: #endif
220: paddr_t lastaddr;
221: int seg, error, first;
222: struct mbuf *m;
223:
224: #ifdef DEBUG_DMA
225: printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
226: t, map, m0, flags);
227: #endif /* DEBUG_DMA */
228:
229: /*
230: * Make sure that on error condition we return "no valid mappings."
231: */
232: map->dm_mapsize = 0;
233: map->dm_nsegs = 0;
234:
235: #ifdef DIAGNOSTIC
236: if ((m0->m_flags & M_PKTHDR) == 0)
237: panic("_bus_dmamap_load_mbuf: no packet header");
238: #endif /* DIAGNOSTIC */
239:
240: if (m0->m_pkthdr.len > map->_dm_size)
241: return (EINVAL);
242:
243: /*
244: * Mbuf chains should almost never have coherent (i.e.
245: * un-cached) mappings, so clear that flag now.
246: */
247: map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
248:
249: first = 1;
250: seg = 0;
251: error = 0;
252: for (m = m0; m != NULL && error == 0; m = m->m_next) {
253: if (m->m_len == 0)
254: continue;
255: error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
256: NULL, flags, &lastaddr, &seg, first);
257: first = 0;
258: }
259: if (error == 0) {
260: map->dm_mapsize = m0->m_pkthdr.len;
261: map->dm_nsegs = seg + 1;
262: map->_dm_origbuf = m0;
263: map->_dm_buftype = ARM32_BUFTYPE_MBUF;
264: map->_dm_proc = NULL; /* always kernel */
265: }
266: #ifdef DEBUG_DMA
267: printf("dmamap_load_mbuf: error=%d\n", error);
268: #endif /* DEBUG_DMA */
269: return (error);
270: }
271:
272: /*
273: * Like _bus_dmamap_load(), but for uios.
274: */
275: int
276: _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
277: int flags)
278: {
279: paddr_t lastaddr;
280: int seg, i, error, first;
281: bus_size_t minlen, resid;
282: struct proc *p = NULL;
283: struct iovec *iov;
284: caddr_t addr;
285:
286: /*
287: * Make sure that on error condition we return "no valid mappings."
288: */
289: map->dm_mapsize = 0;
290: map->dm_nsegs = 0;
291:
292: resid = uio->uio_resid;
293: iov = uio->uio_iov;
294:
295: if (uio->uio_segflg == UIO_USERSPACE) {
296: p = uio->uio_procp;
297: #ifdef DIAGNOSTIC
298: if (p == NULL)
299: panic("_bus_dmamap_load_uio: USERSPACE but no proc");
300: #endif
301: }
302:
303: /* _bus_dmamap_load_buffer() clears this if we're not... */
304: map->_dm_flags |= ARM32_DMAMAP_COHERENT;
305:
306: first = 1;
307: seg = 0;
308: error = 0;
309: for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
310: /*
311: * Now at the first iovec to load. Load each iovec
312: * until we have exhausted the residual count.
313: */
314: minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
315: addr = (caddr_t)iov[i].iov_base;
316:
317: error = _bus_dmamap_load_buffer(t, map, addr, minlen,
318: p, flags, &lastaddr, &seg, first);
319: first = 0;
320:
321: resid -= minlen;
322: }
323: if (error == 0) {
324: map->dm_mapsize = uio->uio_resid;
325: map->dm_nsegs = seg + 1;
326: map->_dm_origbuf = uio;
327: map->_dm_buftype = ARM32_BUFTYPE_UIO;
328: map->_dm_proc = p;
329: }
330: return (error);
331: }
332:
333: /*
334: * Like _bus_dmamap_load(), but for raw memory allocated with
335: * bus_dmamem_alloc().
336: */
337: int
338: _bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
339: bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
340: {
341:
342: panic("_bus_dmamap_load_raw: not implemented");
343: }
344:
345: /*
346: * Common function for unloading a DMA map. May be called by
347: * bus-specific DMA map unload functions.
348: */
349: void
350: _bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
351: {
352:
353: #ifdef DEBUG_DMA
354: printf("dmamap_unload: t=%p map=%p\n", t, map);
355: #endif /* DEBUG_DMA */
356:
357: /*
358: * No resources to free; just mark the mappings as
359: * invalid.
360: */
361: map->dm_mapsize = 0;
362: map->dm_nsegs = 0;
363: map->_dm_origbuf = NULL;
364: map->_dm_buftype = ARM32_BUFTYPE_INVALID;
365: map->_dm_proc = NULL;
366: }
367:
368: static __inline void
369: _bus_dmamap_sync_linear(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
370: bus_size_t len, int ops)
371: {
372: vaddr_t addr = (vaddr_t) map->_dm_origbuf;
373:
374: addr += offset;
375:
376: switch (ops) {
377: case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
378: cpu_dcache_wbinv_range(addr, len);
379: break;
380:
381: case BUS_DMASYNC_PREREAD:
382: if (((addr | len) & arm_dcache_align_mask) == 0)
383: cpu_dcache_inv_range(addr, len);
384: else
385: cpu_dcache_wbinv_range(addr, len);
386: break;
387:
388: case BUS_DMASYNC_PREWRITE:
389: cpu_dcache_wb_range(addr, len);
390: break;
391:
392: case BUS_DMASYNC_POSTREAD:
393: cpu_dcache_inv_range(addr, len);
394: break;
395: }
396: }
397:
398: static __inline void
399: _bus_dmamap_sync_mbuf(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
400: bus_size_t len, int ops)
401: {
402: struct mbuf *m, *m0 = map->_dm_origbuf;
403: bus_size_t minlen, moff;
404: vaddr_t maddr;
405:
406: for (moff = offset, m = m0; m != NULL && len != 0;
407: m = m->m_next) {
408: /* Find the beginning mbuf. */
409: if (moff >= m->m_len) {
410: moff -= m->m_len;
411: continue;
412: }
413:
414: /*
415: * Now at the first mbuf to sync; nail each one until
416: * we have exhausted the length.
417: */
418: minlen = m->m_len - moff;
419: if (len < minlen)
420: minlen = len;
421:
422: maddr = mtod(m, vaddr_t);
423: maddr += moff;
424:
425: /*
426: * We can save a lot of work here if we know the mapping
427: * is read-only at the MMU:
428: *
429: * If a mapping is read-only, no dirty cache blocks will
430: * exist for it. If a writable mapping was made read-only,
431: * we know any dirty cache lines for the range will have
432: * been cleaned for us already. Therefore, if the upper
433: * layer can tell us we have a read-only mapping, we can
434: * skip all cache cleaning.
435: *
436: * NOTE: This only works if we know the pmap cleans pages
437: * before making a read-write -> read-only transition. If
438: * this ever becomes non-true (e.g. Physically Indexed
439: * cache), this will have to be revisited.
440: */
441: switch (ops) {
442: case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
443: /* if (! M_ROMAP(m)) */{
444: cpu_dcache_wbinv_range(maddr, minlen);
445: break;
446: }
447: /* else FALLTHROUGH */
448:
449: case BUS_DMASYNC_PREREAD:
450: if (((maddr | minlen) & arm_dcache_align_mask) == 0)
451: cpu_dcache_inv_range(maddr, minlen);
452: else
453: cpu_dcache_wbinv_range(maddr, minlen);
454: break;
455:
456: case BUS_DMASYNC_PREWRITE:
457: /* if (! M_ROMAP(m)) */
458: cpu_dcache_wb_range(maddr, minlen);
459: break;
460: }
461: moff = 0;
462: len -= minlen;
463: }
464: }
465:
466: static __inline void
467: _bus_dmamap_sync_uio(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
468: bus_size_t len, int ops)
469: {
470: struct uio *uio = map->_dm_origbuf;
471: struct iovec *iov;
472: bus_size_t minlen, ioff;
473: vaddr_t addr;
474:
475: for (iov = uio->uio_iov, ioff = offset; len != 0; iov++) {
476: /* Find the beginning iovec. */
477: if (ioff >= iov->iov_len) {
478: ioff -= iov->iov_len;
479: continue;
480: }
481:
482: /*
483: * Now at the first iovec to sync; nail each one until
484: * we have exhausted the length.
485: */
486: minlen = iov->iov_len - ioff;
487: if (len < minlen)
488: minlen = len;
489:
490: addr = (vaddr_t) iov->iov_base;
491: addr += ioff;
492:
493: switch (ops) {
494: case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
495: cpu_dcache_wbinv_range(addr, minlen);
496: break;
497:
498: case BUS_DMASYNC_PREREAD:
499: if (((addr | minlen) & arm_dcache_align_mask) == 0)
500: cpu_dcache_inv_range(addr, minlen);
501: else
502: cpu_dcache_wbinv_range(addr, minlen);
503: break;
504:
505: case BUS_DMASYNC_PREWRITE:
506: cpu_dcache_wb_range(addr, minlen);
507: break;
508: }
509: ioff = 0;
510: len -= minlen;
511: }
512: }
513:
514: /*
515: * Common function for DMA map synchronization. May be called
516: * by bus-specific DMA map synchronization functions.
517: *
518: * This version works for the Virtually Indexed Virtually Tagged
519: * cache found on 32-bit ARM processors.
520: *
521: * XXX Should have separate versions for write-through vs.
522: * XXX write-back caches. We currently assume write-back
523: * XXX here, which is not as efficient as it could be for
524: * XXX the write-through case.
525: */
526: void
527: _bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
528: bus_size_t len, int ops)
529: {
530:
531: #ifdef DEBUG_DMA
532: printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
533: t, map, offset, len, ops);
534: #endif /* DEBUG_DMA */
535:
536: /*
537: * Mixing of PRE and POST operations is not allowed.
538: */
539: if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
540: (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
541: panic("_bus_dmamap_sync: mix PRE and POST");
542:
543: #ifdef DIAGNOSTIC
544: if (offset >= map->dm_mapsize)
545: panic("_bus_dmamap_sync: bad offset %lu (map size is %lu)",
546: offset, map->dm_mapsize);
547: if (len == 0 || (offset + len) > map->dm_mapsize)
548: panic("_bus_dmamap_sync: bad length");
549: #endif
550:
551: /*
552: * For a virtually-indexed write-back cache, we need
553: * to do the following things:
554: *
555: * PREREAD -- Invalidate the D-cache. We do this
556: * here in case a write-back is required by the back-end.
557: *
558: * PREWRITE -- Write-back the D-cache. Note that if
559: * we are doing a PREREAD|PREWRITE, we can collapse
560: * the whole thing into a single Wb-Inv.
561: *
562: * POSTREAD -- Invalidate the D-Cache. Contents of
563: * the cache could be from before a device wrote
564: * to the memory.
565: *
566: * POSTWRITE -- Nothing.
567: */
568:
569: /* Skip cache frobbing if mapping was COHERENT. */
570: if (map->_dm_flags & ARM32_DMAMAP_COHERENT) {
571: /* Drain the write buffer. */
572: cpu_drain_writebuf();
573: return;
574: }
575:
576: /*
577: * If the mapping belongs to a non-kernel vmspace, and the
578: * vmspace has not been active since the last time a full
579: * cache flush was performed, we don't need to do anything.
580: */
581: if (__predict_false(map->_dm_proc != NULL &&
582: map->_dm_proc->p_vmspace->vm_map.pmap->pm_cstate.cs_cache_d == 0))
583: return;
584:
585: switch (map->_dm_buftype) {
586: case ARM32_BUFTYPE_LINEAR:
587: _bus_dmamap_sync_linear(t, map, offset, len, ops);
588: break;
589:
590: case ARM32_BUFTYPE_MBUF:
591: _bus_dmamap_sync_mbuf(t, map, offset, len, ops);
592: break;
593:
594: case ARM32_BUFTYPE_UIO:
595: _bus_dmamap_sync_uio(t, map, offset, len, ops);
596: break;
597:
598: case ARM32_BUFTYPE_INVALID:
599: panic("_bus_dmamap_sync: ARM32_BUFTYPE_INVALID");
600: break;
601:
602: default:
603: printf("unknown buffer type %d\n", map->_dm_buftype);
604: panic("_bus_dmamap_sync");
605: }
606:
607: /* Drain the write buffer. */
608: cpu_drain_writebuf();
609: }
610:
611: /*
612: * Common function for DMA-safe memory allocation. May be called
613: * by bus-specific DMA memory allocation functions.
614: */
615:
616: extern paddr_t physical_start;
617: extern paddr_t physical_end;
618:
619: int
620: _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
621: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
622: int flags)
623: {
624: struct arm32_dma_range *dr;
625: int error, i;
626:
627: #ifdef DEBUG_DMA
628: printf("dmamem_alloc t=%p size=%lx align=%lx boundary=%lx "
629: "segs=%p nsegs=%x rsegs=%p flags=%x\n", t, size, alignment,
630: boundary, segs, nsegs, rsegs, flags);
631: #endif
632:
633: if ((dr = t->_ranges) != NULL) {
634: error = ENOMEM;
635: for (i = 0; i < t->_nranges; i++, dr++) {
636: if (dr->dr_len == 0)
637: continue;
638: error = _bus_dmamem_alloc_range(t, size, alignment,
639: boundary, segs, nsegs, rsegs, flags,
640: trunc_page(dr->dr_sysbase),
641: trunc_page(dr->dr_sysbase + dr->dr_len));
642: if (error == 0)
643: break;
644: }
645: } else {
646: error = _bus_dmamem_alloc_range(t, size, alignment, boundary,
647: segs, nsegs, rsegs, flags, trunc_page(physical_start),
648: trunc_page(physical_end));
649: }
650:
651: #ifdef DEBUG_DMA
652: printf("dmamem_alloc: =%d\n", error);
653: #endif
654:
655: return(error);
656: }
657:
658: /*
659: * Common function for freeing DMA-safe memory. May be called by
660: * bus-specific DMA memory free functions.
661: */
662: void
663: _bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
664: {
665: struct vm_page *m;
666: bus_addr_t addr;
667: struct pglist mlist;
668: int curseg;
669:
670: #ifdef DEBUG_DMA
671: printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
672: #endif /* DEBUG_DMA */
673:
674: /*
675: * Build a list of pages to free back to the VM system.
676: */
677: TAILQ_INIT(&mlist);
678: for (curseg = 0; curseg < nsegs; curseg++) {
679: for (addr = segs[curseg].ds_addr;
680: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
681: addr += PAGE_SIZE) {
682: m = PHYS_TO_VM_PAGE(addr);
683: TAILQ_INSERT_TAIL(&mlist, m, pageq);
684: }
685: }
686: uvm_pglistfree(&mlist);
687: }
688:
689: /*
690: * Common function for mapping DMA-safe memory. May be called by
691: * bus-specific DMA memory map functions.
692: */
693: int
694: _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
695: size_t size, caddr_t *kvap, int flags)
696: {
697: vaddr_t va;
698: bus_addr_t addr;
699: int curseg;
700: pt_entry_t *ptep/*, pte*/;
701:
702: #ifdef DEBUG_DMA
703: printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
704: segs, nsegs, (unsigned long)size, flags);
705: #endif /* DEBUG_DMA */
706:
707: size = round_page(size);
708: va = uvm_km_valloc(kernel_map, size);
709:
710: if (va == 0)
711: return (ENOMEM);
712:
713: *kvap = (caddr_t)va;
714:
715: for (curseg = 0; curseg < nsegs; curseg++) {
716: for (addr = segs[curseg].ds_addr;
717: addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
718: addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
719: #ifdef DEBUG_DMA
720: printf("wiring p%lx to v%lx", addr, va);
721: #endif /* DEBUG_DMA */
722: if (size == 0)
723: panic("_bus_dmamem_map: size botch");
724: pmap_enter(pmap_kernel(), va, addr,
725: VM_PROT_READ | VM_PROT_WRITE,
726: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
727: /*
728: * If the memory must remain coherent with the
729: * cache then we must make the memory uncacheable
730: * in order to maintain virtual cache coherency.
731: * We must also guarantee the cache does not already
732: * contain the virtual addresses we are making
733: * uncacheable.
734: */
735: if (flags & BUS_DMA_COHERENT) {
736: cpu_dcache_wbinv_range(va, PAGE_SIZE);
737: cpu_drain_writebuf();
738: ptep = vtopte(va);
739: *ptep &= ~L2_S_CACHE_MASK;
740: PTE_SYNC(ptep);
741: tlb_flush();
742: }
743: #ifdef DEBUG_DMA
744: ptep = vtopte(va);
745: printf(" pte=v%p *pte=%x\n", ptep, *ptep);
746: #endif /* DEBUG_DMA */
747: }
748: }
749: pmap_update(pmap_kernel());
750: #ifdef DEBUG_DMA
751: printf("dmamem_map: =%p\n", *kvap);
752: #endif /* DEBUG_DMA */
753: return (0);
754: }
755:
756: /*
757: * Common function for unmapping DMA-safe memory. May be called by
758: * bus-specific DMA memory unmapping functions.
759: */
760: void
761: _bus_dmamem_unmap(bus_dma_tag_t t, caddr_t kva, size_t size)
762: {
763:
764: #ifdef DEBUG_DMA
765: printf("dmamem_unmap: t=%p kva=%p size=%lx\n", t, kva,
766: (unsigned long)size);
767: #endif /* DEBUG_DMA */
768: #ifdef DIAGNOSTIC
769: if ((u_long)kva & PGOFSET)
770: panic("_bus_dmamem_unmap");
771: #endif /* DIAGNOSTIC */
772:
773: size = round_page(size);
774: uvm_km_free(kernel_map, (vaddr_t)kva, size);
775: }
776:
777: /*
778: * Common function for mmap(2)'ing DMA-safe memory. May be called by
779: * bus-specific DMA mmap(2)'ing functions.
780: */
781: paddr_t
782: _bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
783: off_t off, int prot, int flags)
784: {
785: int i;
786:
787: for (i = 0; i < nsegs; i++) {
788: #ifdef DIAGNOSTIC
789: if (off & PGOFSET)
790: panic("_bus_dmamem_mmap: offset unaligned");
791: if (segs[i].ds_addr & PGOFSET)
792: panic("_bus_dmamem_mmap: segment unaligned");
793: if (segs[i].ds_len & PGOFSET)
794: panic("_bus_dmamem_mmap: segment size not multiple"
795: " of page size");
796: #endif /* DIAGNOSTIC */
797: if (off >= segs[i].ds_len) {
798: off -= segs[i].ds_len;
799: continue;
800: }
801:
802: return (atop(segs[i].ds_addr + off));
803: }
804:
805: /* Page not found. */
806: return (-1);
807: }
808:
809: /**********************************************************************
810: * DMA utility functions
811: **********************************************************************/
812:
813: /*
814: * Utility function to load a linear buffer. lastaddrp holds state
815: * between invocations (for multiple-buffer loads). segp contains
816: * the starting segment on entrance, and the ending segment on exit.
817: * first indicates if this is the first invocation of this function.
818: */
819: int
820: _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
821: bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp,
822: int *segp, int first)
823: {
824: struct arm32_dma_range *dr;
825: bus_size_t sgsize;
826: bus_addr_t curaddr, lastaddr, baddr, bmask;
827: vaddr_t vaddr = (vaddr_t)buf;
828: pd_entry_t *pde;
829: pt_entry_t pte;
830: int seg;
831: pmap_t pmap;
832: pt_entry_t *ptep;
833:
834: #ifdef DEBUG_DMA
835: printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
836: buf, buflen, flags, first);
837: #endif /* DEBUG_DMA */
838:
839: if (p != NULL)
840: pmap = p->p_vmspace->vm_map.pmap;
841: else
842: pmap = pmap_kernel();
843:
844: lastaddr = *lastaddrp;
845: bmask = ~(map->_dm_boundary - 1);
846:
847: for (seg = *segp; buflen > 0; ) {
848: /*
849: * Get the physical address for this segment.
850: *
851: * XXX Don't support checking for coherent mappings
852: * XXX in user address space.
853: */
854: if (__predict_true(pmap == pmap_kernel())) {
855: (void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
856: if (__predict_false(pmap_pde_section(pde))) {
857: curaddr = (*pde & L1_S_FRAME) |
858: (vaddr & L1_S_OFFSET);
859: if (*pde & L1_S_CACHE_MASK) {
860: map->_dm_flags &=
861: ~ARM32_DMAMAP_COHERENT;
862: }
863: } else {
864: pte = *ptep;
865: KDASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV);
866: if (__predict_false((pte & L2_TYPE_MASK)
867: == L2_TYPE_L)) {
868: curaddr = (pte & L2_L_FRAME) |
869: (vaddr & L2_L_OFFSET);
870: if (pte & L2_L_CACHE_MASK) {
871: map->_dm_flags &=
872: ~ARM32_DMAMAP_COHERENT;
873: }
874: } else {
875: curaddr = (pte & L2_S_FRAME) |
876: (vaddr & L2_S_OFFSET);
877: if (pte & L2_S_CACHE_MASK) {
878: map->_dm_flags &=
879: ~ARM32_DMAMAP_COHERENT;
880: }
881: }
882: }
883: } else {
884: (void) pmap_extract(pmap, vaddr, &curaddr);
885: map->_dm_flags &= ~ARM32_DMAMAP_COHERENT;
886: }
887:
888: /*
889: * Make sure we're in an allowed DMA range.
890: */
891: if (t->_ranges != NULL) {
892: /* XXX cache last result? */
893: dr = _bus_dma_inrange(t->_ranges, t->_nranges,
894: curaddr);
895: if (dr == NULL)
896: return (EINVAL);
897:
898: /*
899: * In a valid DMA range. Translate the physical
900: * memory address to an address in the DMA window.
901: */
902: curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
903: }
904:
905: /*
906: * Compute the segment size, and adjust counts.
907: */
908: sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
909: if (buflen < sgsize)
910: sgsize = buflen;
911:
912: /*
913: * Make sure we don't cross any boundaries.
914: */
915: if (map->_dm_boundary > 0) {
916: baddr = (curaddr + map->_dm_boundary) & bmask;
917: if (sgsize > (baddr - curaddr))
918: sgsize = (baddr - curaddr);
919: }
920:
921: /*
922: * Insert chunk into a segment, coalescing with
923: * previous segment if possible.
924: */
925: if (first) {
926: map->dm_segs[seg].ds_addr = curaddr;
927: map->dm_segs[seg].ds_len = sgsize;
928: first = 0;
929: } else {
930: if (curaddr == lastaddr &&
931: (map->dm_segs[seg].ds_len + sgsize) <=
932: map->_dm_maxsegsz &&
933: (map->_dm_boundary == 0 ||
934: (map->dm_segs[seg].ds_addr & bmask) ==
935: (curaddr & bmask)))
936: map->dm_segs[seg].ds_len += sgsize;
937: else {
938: if (++seg >= map->_dm_segcnt)
939: break;
940: map->dm_segs[seg].ds_addr = curaddr;
941: map->dm_segs[seg].ds_len = sgsize;
942: }
943: }
944:
945: lastaddr = curaddr + sgsize;
946: vaddr += sgsize;
947: buflen -= sgsize;
948: }
949:
950: *segp = seg;
951: *lastaddrp = lastaddr;
952:
953: /*
954: * Did we fit?
955: */
956: if (buflen != 0)
957: return (EFBIG); /* XXX better return value here? */
958: return (0);
959: }
960:
961: /*
962: * Allocate physical memory from the given physical address range.
963: * Called by DMA-safe memory allocation methods.
964: */
965: int
966: _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
967: bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
968: int flags, paddr_t low, paddr_t high)
969: {
970: paddr_t curaddr, lastaddr;
971: struct vm_page *m;
972: struct pglist mlist;
973: int curseg, error;
974:
975: #ifdef DEBUG_DMA
976: printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
977: t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
978: #endif /* DEBUG_DMA */
979:
980: /* Always round the size. */
981: size = round_page(size);
982:
983: TAILQ_INIT(&mlist);
984: /*
985: * Allocate pages from the VM system.
986: */
987: error = uvm_pglistalloc(size, low, high, alignment, boundary,
988: &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
989: if (error)
990: return (error);
991:
992: /*
993: * Compute the location, size, and number of segments actually
994: * returned by the VM code.
995: */
996: m = TAILQ_FIRST(&mlist);
997: curseg = 0;
998: lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
999: segs[curseg].ds_len = PAGE_SIZE;
1000: #ifdef DEBUG_DMA
1001: printf("alloc: page %lx\n", lastaddr);
1002: #endif /* DEBUG_DMA */
1003: m = TAILQ_NEXT(m, pageq);
1004:
1005: for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
1006: curaddr = VM_PAGE_TO_PHYS(m);
1007: #ifdef DIAGNOSTIC
1008: if (curaddr < low || curaddr >= high) {
1009: printf("uvm_pglistalloc returned non-sensical"
1010: " address 0x%lx\n", curaddr);
1011: panic("_bus_dmamem_alloc_range");
1012: }
1013: #endif /* DIAGNOSTIC */
1014: #ifdef DEBUG_DMA
1015: printf("alloc: page %lx\n", curaddr);
1016: #endif /* DEBUG_DMA */
1017: if (curaddr == (lastaddr + PAGE_SIZE))
1018: segs[curseg].ds_len += PAGE_SIZE;
1019: else {
1020: curseg++;
1021: segs[curseg].ds_addr = curaddr;
1022: segs[curseg].ds_len = PAGE_SIZE;
1023: }
1024: lastaddr = curaddr;
1025: }
1026:
1027: *rsegs = curseg + 1;
1028:
1029: return (0);
1030: }
1031:
1032: /*
1033: * Check if a memory region intersects with a DMA range, and return the
1034: * page-rounded intersection if it does.
1035: */
1036: int
1037: arm32_dma_range_intersect(struct arm32_dma_range *ranges, int nranges,
1038: paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep)
1039: {
1040: struct arm32_dma_range *dr;
1041: int i;
1042:
1043: if (ranges == NULL)
1044: return (0);
1045:
1046: for (i = 0, dr = ranges; i < nranges; i++, dr++) {
1047: if (dr->dr_sysbase <= pa &&
1048: pa < (dr->dr_sysbase + dr->dr_len)) {
1049: /*
1050: * Beginning of region intersects with this range.
1051: */
1052: *pap = trunc_page(pa);
1053: *sizep = round_page(min(pa + size,
1054: dr->dr_sysbase + dr->dr_len) - pa);
1055: return (1);
1056: }
1057: if (pa < dr->dr_sysbase && dr->dr_sysbase < (pa + size)) {
1058: /*
1059: * End of region intersects with this range.
1060: */
1061: *pap = trunc_page(dr->dr_sysbase);
1062: *sizep = round_page(min((pa + size) - dr->dr_sysbase,
1063: dr->dr_len));
1064: return (1);
1065: }
1066: }
1067:
1068: /* No intersection found. */
1069: return (0);
1070: }
1071:
1072: /*
1073: * probably should be ppc_space_copy
1074: */
1075:
1076: #define _CONCAT(A,B) A ## B
1077: #define __C(A,B) _CONCAT(A,B)
1078:
1079: #define BUS_SPACE_READ_RAW_MULTI_N(BYTES,SHIFT,TYPE) \
1080: void \
1081: __C(bus_space_read_raw_multi_,BYTES)(bus_space_tag_t bst, \
1082: bus_space_handle_t h, bus_addr_t o, u_int8_t *dst, bus_size_t size) \
1083: { \
1084: TYPE *rdst = (TYPE *)dst; \
1085: int i; \
1086: int count = size >> SHIFT; \
1087: \
1088: for (i = 0; i < count; i++) { \
1089: rdst[i] = __bs_rs(BYTES, bst, h, o); \
1090: } \
1091: }
1092: BUS_SPACE_READ_RAW_MULTI_N(2,1,u_int16_t)
1093: BUS_SPACE_READ_RAW_MULTI_N(4,2,u_int32_t)
1094:
1095: #define BUS_SPACE_WRITE_RAW_MULTI_N(BYTES,SHIFT,TYPE) \
1096: void \
1097: __C(bus_space_write_raw_multi_,BYTES)( bus_space_tag_t bst, \
1098: bus_space_handle_t h, bus_addr_t o, const u_int8_t *src, \
1099: bus_size_t size) \
1100: { \
1101: int i; \
1102: TYPE *rsrc = (TYPE *)src; \
1103: int count = size >> SHIFT; \
1104: \
1105: for (i = 0; i < count; i++) { \
1106: __bs_ws(BYTES, bst, h, o, rsrc[i]); \
1107: } \
1108: }
1109:
1110: BUS_SPACE_WRITE_RAW_MULTI_N(2,1,u_int16_t)
1111: BUS_SPACE_WRITE_RAW_MULTI_N(4,2,u_int32_t)
CVSweb