[BACK]Return to bus_dma.c CVS log [TXT][DIR] Up to [local] / sys / arch / vax / vax

Annotation of sys/arch/vax/vax/bus_dma.c, Revision 1.1.1.1

1.1       nbrk        1: /*     $OpenBSD: bus_dma.c,v 1.16 2005/11/08 15:05:56 martin Exp $     */
                      2: /*     $NetBSD: bus_dma.c,v 1.5 1999/11/13 00:32:20 thorpej Exp $      */
                      3:
                      4: /*-
                      5:  * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
                      6:  * All rights reserved.
                      7:  *
                      8:  * This code is derived from software contributed to The NetBSD Foundation
                      9:  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
                     10:  * NASA Ames Research Center.
                     11:  *
                     12:  * Redistribution and use in source and binary forms, with or without
                     13:  * modification, are permitted provided that the following conditions
                     14:  * are met:
                     15:  * 1. Redistributions of source code must retain the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer.
                     17:  * 2. Redistributions in binary form must reproduce the above copyright
                     18:  *    notice, this list of conditions and the following disclaimer in the
                     19:  *    documentation and/or other materials provided with the distribution.
                     20:  * 3. All advertising materials mentioning features or use of this software
                     21:  *    must display the following acknowledgement:
                     22:  *     This product includes software developed by the NetBSD
                     23:  *     Foundation, Inc. and its contributors.
                     24:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     25:  *    contributors may be used to endorse or promote products derived
                     26:  *    from this software without specific prior written permission.
                     27:  *
                     28:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     29:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     30:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     31:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     32:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     33:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     34:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     35:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     36:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     37:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     38:  * POSSIBILITY OF SUCH DAMAGE.
                     39:  */
                     40: /*
                     41:  * bus_dma routines for vax. File copied from arm32/bus_dma.c.
                     42:  * NetBSD: bus_dma.c,v 1.11 1998/09/21 22:53:35 thorpej Exp
                     43:  */
                     44:
                     45: #include <sys/param.h>
                     46: #include <sys/systm.h>
                     47: #include <sys/kernel.h>
                     48: #include <sys/proc.h>
                     49: #include <sys/buf.h>
                     50: #include <sys/reboot.h>
                     51: #include <sys/conf.h>
                     52: #include <sys/file.h>
                     53: #include <sys/malloc.h>
                     54: #include <sys/mbuf.h>
                     55: #include <sys/vnode.h>
                     56: #include <sys/device.h>
                     57:
                     58: #include <uvm/uvm_extern.h>
                     59:
                     60: #define _VAX_BUS_DMA_PRIVATE
                     61: #include <machine/bus.h>
                     62:
                     63: #include <machine/ka43.h>
                     64: #include <machine/sid.h>
                     65:
                     66: extern vaddr_t avail_start, avail_end, virtual_avail;
                     67:
                     68: int    _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *,
                     69:            bus_size_t, struct proc *, int, paddr_t *, int *, int);
                     70: int    _bus_dma_inrange(bus_dma_segment_t *, int, bus_addr_t);
                     71: int    _bus_dmamem_alloc_range(bus_dma_tag_t, bus_size_t, bus_size_t,
                     72:            bus_size_t, bus_dma_segment_t*, int, int *, int, vaddr_t, vaddr_t);
                     73: /*
                     74:  * Common function for DMA map creation.  May be called by bus-specific
                     75:  * DMA map creation functions.
                     76:  */
                     77: int
                     78: _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
                     79:        bus_dma_tag_t t;
                     80:        bus_size_t size;
                     81:        int nsegments;
                     82:        bus_size_t maxsegsz;
                     83:        bus_size_t boundary;
                     84:        int flags;
                     85:        bus_dmamap_t *dmamp;
                     86: {
                     87:        struct vax_bus_dmamap *map;
                     88:        void *mapstore;
                     89:        size_t mapsize;
                     90:
                     91: #ifdef DEBUG_DMA
                     92:        printf("dmamap_create: t=%p size=%lx nseg=%x msegsz=%lx boundary=%lx flags=%x\n",
                     93:            t, size, nsegments, maxsegsz, boundary, flags);
                     94: #endif /* DEBUG_DMA */
                     95:
                     96:        /*
                     97:         * Allocate and initialize the DMA map.  The end of the map
                     98:         * is a variable-sized array of segments, so we allocate enough
                     99:         * room for them in one shot.
                    100:         *
                    101:         * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
                    102:         * of ALLOCNOW notifies others that we've reserved these resources,
                    103:         * and they are not to be freed.
                    104:         *
                    105:         * The bus_dmamap_t includes one bus_dma_segment_t, hence
                    106:         * the (nsegments - 1).
                    107:         */
                    108:        mapsize = sizeof(struct vax_bus_dmamap) +
                    109:            (sizeof(bus_dma_segment_t) * (nsegments - 1));
                    110:        if ((mapstore = malloc(mapsize, M_DEVBUF,
                    111:            (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
                    112:                return (ENOMEM);
                    113:
                    114:        bzero(mapstore, mapsize);
                    115:        map = (struct vax_bus_dmamap *)mapstore;
                    116:        map->_dm_size = size;
                    117:        map->_dm_segcnt = nsegments;
                    118:        map->_dm_maxsegsz = maxsegsz;
                    119:        map->_dm_boundary = boundary;
                    120:        map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
                    121:        map->dm_mapsize = 0;            /* no valid mappings */
                    122:        map->dm_nsegs = 0;
                    123:
                    124:        *dmamp = map;
                    125: #ifdef DEBUG_DMA
                    126:        printf("dmamap_create:map=%p\n", map);
                    127: #endif /* DEBUG_DMA */
                    128:        return (0);
                    129: }
                    130:
                    131: /*
                    132:  * Common function for DMA map destruction.  May be called by bus-specific
                    133:  * DMA map destruction functions.
                    134:  */
                    135: void
                    136: _bus_dmamap_destroy(t, map)
                    137:        bus_dma_tag_t t;
                    138:        bus_dmamap_t map;
                    139: {
                    140:
                    141: #ifdef DEBUG_DMA
                    142:        printf("dmamap_destroy: t=%p map=%p\n", t, map);
                    143: #endif /* DEBUG_DMA */
                    144: #ifdef DIAGNOSTIC
                    145:        if (map->dm_nsegs > 0)
                    146:                printf("bus_dmamap_destroy() called for map with valid mappings\n");
                    147: #endif /* DIAGNOSTIC */
                    148:        free(map, M_DEVBUF);
                    149: }
                    150:
                    151: /*
                    152:  * Common function for loading a DMA map with a linear buffer.  May
                    153:  * be called by bus-specific DMA map load functions.
                    154:  */
                    155: int
                    156: _bus_dmamap_load(t, map, buf, buflen, p, flags)
                    157:        bus_dma_tag_t t;
                    158:        bus_dmamap_t map;
                    159:        void *buf;
                    160:        bus_size_t buflen;
                    161:        struct proc *p;
                    162:        int flags;
                    163: {
                    164:        paddr_t lastaddr;
                    165:        int seg, error;
                    166:
                    167: #ifdef DEBUG_DMA
                    168:        printf("dmamap_load: t=%p map=%p buf=%p len=%lx p=%p f=%d\n",
                    169:            t, map, buf, buflen, p, flags);
                    170: #endif /* DEBUG_DMA */
                    171:
                    172:        /*
                    173:         * Make sure that on error condition we return "no valid mappings".
                    174:         */
                    175:        map->dm_mapsize = 0;
                    176:        map->dm_nsegs = 0;
                    177:
                    178:        if (buflen > map->_dm_size)
                    179:                return (EINVAL);
                    180:
                    181:        seg = 0;
                    182:        error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags,
                    183:            &lastaddr, &seg, 1);
                    184:        if (error == 0) {
                    185:                map->dm_mapsize = buflen;
                    186:                map->dm_nsegs = seg + 1;
                    187:        }
                    188: #ifdef DEBUG_DMA
                    189:        printf("dmamap_load: error=%d\n", error);
                    190: #endif /* DEBUG_DMA */
                    191:        return (error);
                    192: }
                    193:
                    194: /*
                    195:  * Like _bus_dmamap_load(), but for mbufs.
                    196:  */
                    197: int
                    198: _bus_dmamap_load_mbuf(t, map, m0, flags)
                    199:        bus_dma_tag_t t;
                    200:        bus_dmamap_t map;
                    201:        struct mbuf *m0;
                    202:        int flags;
                    203: {
                    204:        paddr_t lastaddr;
                    205:        int seg, error, first;
                    206:        struct mbuf *m;
                    207:
                    208: #ifdef DEBUG_DMA
                    209:        printf("dmamap_load_mbuf: t=%p map=%p m0=%p f=%d\n",
                    210:            t, map, m0, flags);
                    211: #endif /* DEBUG_DMA */
                    212:
                    213:        /*
                    214:         * Make sure that on error condition we return "no valid mappings."
                    215:         */
                    216:        map->dm_mapsize = 0;
                    217:        map->dm_nsegs = 0;
                    218:
                    219: #ifdef DIAGNOSTIC
                    220:        if ((m0->m_flags & M_PKTHDR) == 0)
                    221:                panic("_bus_dmamap_load_mbuf: no packet header");
                    222: #endif /* DIAGNOSTIC */
                    223:
                    224:        if (m0->m_pkthdr.len > map->_dm_size)
                    225:                return (EINVAL);
                    226:
                    227:        first = 1;
                    228:        seg = 0;
                    229:        error = 0;
                    230:        for (m = m0; m != NULL && error == 0; m = m->m_next) {
                    231:                if (m->m_len == 0)
                    232:                        continue;
                    233:                error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
                    234:                    NULL, flags, &lastaddr, &seg, first);
                    235:                first = 0;
                    236:        }
                    237:        if (error == 0) {
                    238:                map->dm_mapsize = m0->m_pkthdr.len;
                    239:                map->dm_nsegs = seg + 1;
                    240:        }
                    241: #ifdef DEBUG_DMA
                    242:        printf("dmamap_load_mbuf: error=%d\n", error);
                    243: #endif /* DEBUG_DMA */
                    244:        return (error);
                    245: }
                    246:
                    247: /*
                    248:  * Like _bus_dmamap_load(), but for uios.
                    249:  */
                    250: int
                    251: _bus_dmamap_load_uio(t, map, uio, flags)
                    252:        bus_dma_tag_t t;
                    253:        bus_dmamap_t map;
                    254:        struct uio *uio;
                    255:        int flags;
                    256: {
                    257:        paddr_t lastaddr;
                    258:        int seg, i, error, first;
                    259:        bus_size_t minlen, resid;
                    260:        struct proc *p = NULL;
                    261:        struct iovec *iov;
                    262:        caddr_t addr;
                    263:
                    264:        /*
                    265:         * Make sure that on error condition we return "no valid mappings."
                    266:         */
                    267:        map->dm_mapsize = 0;
                    268:        map->dm_nsegs = 0;
                    269:
                    270:        resid = uio->uio_resid;
                    271:        iov = uio->uio_iov;
                    272:
                    273:        if (uio->uio_segflg == UIO_USERSPACE) {
                    274:                p = uio->uio_procp;
                    275: #ifdef DIAGNOSTIC
                    276:                if (p == NULL)
                    277:                        panic("_bus_dmamap_load_uio: USERSPACE but no proc");
                    278: #endif
                    279:        }
                    280:
                    281:        first = 1;
                    282:        seg = 0;
                    283:        error = 0;
                    284:        for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
                    285:                /*
                    286:                 * Now at the first iovec to load.  Load each iovec
                    287:                 * until we have exhausted the residual count.
                    288:                 */
                    289:                minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
                    290:                addr = (caddr_t)iov[i].iov_base;
                    291:
                    292:                error = _bus_dmamap_load_buffer(t, map, addr, minlen,
                    293:                    p, flags, &lastaddr, &seg, first);
                    294:                first = 0;
                    295:
                    296:                resid -= minlen;
                    297:        }
                    298:        if (error == 0) {
                    299:                map->dm_mapsize = uio->uio_resid;
                    300:                map->dm_nsegs = seg + 1;
                    301:        }
                    302:        return (error);
                    303: }
                    304:
                    305: /*
                    306:  * Like _bus_dmamap_load(), but for raw memory allocated with
                    307:  * bus_dmamem_alloc().
                    308:  */
                    309: int
                    310: _bus_dmamap_load_raw(t, map, segs, nsegs, size, flags)
                    311:        bus_dma_tag_t t;
                    312:        bus_dmamap_t map;
                    313:        bus_dma_segment_t *segs;
                    314:        int nsegs;
                    315:        bus_size_t size;
                    316:        int flags;
                    317: {
                    318:
                    319:        panic("_bus_dmamap_load_raw: not implemented");
                    320: }
                    321:
                    322: /*
                    323:  * Common function for unloading a DMA map.  May be called by
                    324:  * bus-specific DMA map unload functions.
                    325:  */
                    326: void
                    327: _bus_dmamap_unload(t, map)
                    328:        bus_dma_tag_t t;
                    329:        bus_dmamap_t map;
                    330: {
                    331:
                    332: #ifdef DEBUG_DMA
                    333:        printf("dmamap_unload: t=%p map=%p\n", t, map);
                    334: #endif /* DEBUG_DMA */
                    335:
                    336:        /*
                    337:         * No resources to free; just mark the mappings as
                    338:         * invalid.
                    339:         */
                    340:        map->dm_mapsize = 0;
                    341:        map->dm_nsegs = 0;
                    342: }
                    343:
                    344: /*
                    345:  * Common function for DMA map synchronization.  May be called
                    346:  * by bus-specific DMA map synchronization functions.
                    347:  */
                    348: void
                    349: _bus_dmamap_sync(t, map, offset, len, ops)
                    350:        bus_dma_tag_t t;
                    351:        bus_dmamap_t map;
                    352:        bus_addr_t offset;
                    353:        bus_size_t len;
                    354:        int ops;
                    355: {
                    356: #ifdef DEBUG_DMA
                    357:        printf("dmamap_sync: t=%p map=%p offset=%lx len=%lx ops=%x\n",
                    358:            t, map, offset, len, ops);
                    359: #endif /* DEBUG_DMA */
                    360:        /*
                    361:         * A vax only has snoop-cache, so this routine is a no-op.
                    362:         */
                    363:        return;
                    364: }
                    365:
                    366: /*
                    367:  * Common function for DMA-safe memory allocation.  May be called
                    368:  * by bus-specific DMA memory allocation functions.
                    369:  */
                    370:
                    371: int
                    372: _bus_dmamem_alloc(t, size, alignment, boundary, segs, nsegs, rsegs, flags)
                    373:        bus_dma_tag_t t;
                    374:        bus_size_t size, alignment, boundary;
                    375:        bus_dma_segment_t *segs;
                    376:        int nsegs;
                    377:        int *rsegs;
                    378:        int flags;
                    379: {
                    380:        int error;
                    381:
                    382:        error =  (_bus_dmamem_alloc_range(t, size, alignment, boundary,
                    383:            segs, nsegs, rsegs, flags, round_page(avail_start),
                    384:            trunc_page(avail_end)));
                    385:        return(error);
                    386: }
                    387:
                    388: /*
                    389:  * Common function for freeing DMA-safe memory.  May be called by
                    390:  * bus-specific DMA memory free functions.
                    391:  */
                    392: void
                    393: _bus_dmamem_free(t, segs, nsegs)
                    394:        bus_dma_tag_t t;
                    395:        bus_dma_segment_t *segs;
                    396:        int nsegs;
                    397: {
                    398:        struct vm_page *m;
                    399:        bus_addr_t addr;
                    400:        struct pglist mlist;
                    401:        int curseg;
                    402:
                    403: #ifdef DEBUG_DMA
                    404:        printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
                    405: #endif /* DEBUG_DMA */
                    406:
                    407:        /*
                    408:         * Build a list of pages to free back to the VM system.
                    409:         */
                    410:        TAILQ_INIT(&mlist);
                    411:        for (curseg = 0; curseg < nsegs; curseg++) {
                    412:                for (addr = segs[curseg].ds_addr;
                    413:                    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
                    414:                    addr += PAGE_SIZE) {
                    415:                        m = PHYS_TO_VM_PAGE(addr);
                    416:                        TAILQ_INSERT_TAIL(&mlist, m, pageq);
                    417:                }
                    418:        }
                    419:        uvm_pglistfree(&mlist);
                    420: }
                    421:
                    422: /*
                    423:  * Common function for mapping DMA-safe memory.  May be called by
                    424:  * bus-specific DMA memory map functions.
                    425:  */
                    426: int
                    427: _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
                    428:        bus_dma_tag_t t;
                    429:        bus_dma_segment_t *segs;
                    430:        int nsegs;
                    431:        size_t size;
                    432:        caddr_t *kvap;
                    433:        int flags;
                    434: {
                    435:        vaddr_t va;
                    436:        bus_addr_t addr;
                    437:        int curseg;
                    438:
                    439:        /*
                    440:         * Special case (but common):
                    441:         * If there is only one physical segment then the already-mapped
                    442:         * virtual address is returned, since all physical memory is already
                    443:         * in the beginning of kernel virtual memory.
                    444:         */
                    445:        if (nsegs == 1) {
                    446:                *kvap = (caddr_t)(segs[0].ds_addr | KERNBASE);
                    447:                /*
                    448:                 * KA43 (3100/m76) must have its DMA-safe memory accessed
                    449:                 * through DIAGMEM. Remap it here.
                    450:                 */
                    451:                if (vax_boardtype == VAX_BTYP_43) {
                    452:                        pmap_map((vaddr_t)*kvap, segs[0].ds_addr|KA43_DIAGMEM,
                    453:                            (segs[0].ds_addr|KA43_DIAGMEM) + size,
                    454:                            VM_PROT_READ|VM_PROT_WRITE);
                    455:                }
                    456:                return 0;
                    457:        }
                    458:        size = round_page(size);
                    459:        va = uvm_km_valloc(kernel_map, size);
                    460:
                    461:        if (va == 0)
                    462:                return (ENOMEM);
                    463:
                    464:        *kvap = (caddr_t)va;
                    465:
                    466:        for (curseg = 0; curseg < nsegs; curseg++) {
                    467:                for (addr = segs[curseg].ds_addr;
                    468:                    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
                    469:                    addr += NBPG, va += NBPG, size -= NBPG) {
                    470:                        if (size == 0)
                    471:                                panic("_bus_dmamem_map: size botch");
                    472:                        if (vax_boardtype == VAX_BTYP_43)
                    473:                                addr |= KA43_DIAGMEM;
                    474:                        pmap_enter(pmap_kernel(), va, addr,
                    475:                            VM_PROT_READ | VM_PROT_WRITE,
                    476:                            VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                    477:                }
                    478:        }
                    479:        pmap_update(pmap_kernel());
                    480:        return (0);
                    481: }
                    482:
                    483: /*
                    484:  * Common function for unmapping DMA-safe memory.  May be called by
                    485:  * bus-specific DMA memory unmapping functions.
                    486:  */
                    487: void
                    488: _bus_dmamem_unmap(t, kva, size)
                    489:        bus_dma_tag_t t;
                    490:        caddr_t kva;
                    491:        size_t size;
                    492: {
                    493:
                    494: #ifdef DEBUG_DMA
                    495:        printf("dmamem_unmap: t=%p kva=%p size=%x\n", t, kva, size);
                    496: #endif /* DEBUG_DMA */
                    497: #ifdef DIAGNOSTIC
                    498:        if ((u_long)kva & PGOFSET)
                    499:                panic("_bus_dmamem_unmap");
                    500: #endif /* DIAGNOSTIC */
                    501:
                    502:        /* Avoid free'ing if not mapped */
                    503:        if (kva >= (caddr_t)virtual_avail)
                    504:                uvm_km_free(kernel_map, (vaddr_t)kva, round_page(size));
                    505: }
                    506:
                    507: /*
                    508:  * Common functin for mmap(2)'ing DMA-safe memory.  May be called by
                    509:  * bus-specific DMA mmap(2)'ing functions.
                    510:  */
                    511: paddr_t
                    512: _bus_dmamem_mmap(t, segs, nsegs, off, prot, flags)
                    513:        bus_dma_tag_t t;
                    514:        bus_dma_segment_t *segs;
                    515:        int nsegs;
                    516:        off_t off;
                    517:        int prot, flags;
                    518: {
                    519:        int i;
                    520:
                    521:        for (i = 0; i < nsegs; i++) {
                    522: #ifdef DIAGNOSTIC
                    523:                if (off & PGOFSET)
                    524:                        panic("_bus_dmamem_mmap: offset unaligned");
                    525:                if (segs[i].ds_addr & PGOFSET)
                    526:                        panic("_bus_dmamem_mmap: segment unaligned");
                    527:                if (segs[i].ds_len & PGOFSET)
                    528:                        panic("_bus_dmamem_mmap: segment size not multiple"
                    529:                            " of page size");
                    530: #endif /* DIAGNOSTIC */
                    531:                if (off >= segs[i].ds_len) {
                    532:                        off -= segs[i].ds_len;
                    533:                        continue;
                    534:                }
                    535:
                    536:                return (atop(segs[i].ds_addr + off));
                    537:        }
                    538:
                    539:        /* Page not found. */
                    540:        return (-1);
                    541: }
                    542:
                    543: /**********************************************************************
                    544:  * DMA utility functions
                    545:  **********************************************************************/
                    546:
                    547: /*
                    548:  * Utility function to load a linear buffer.  lastaddrp holds state
                    549:  * between invocations (for multiple-buffer loads).  segp contains
                    550:  * the starting segment on entrance, and the ending segment on exit.
                    551:  * first indicates if this is the first invocation of this function.
                    552:  */
                    553: int
                    554: _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
                    555:        bus_dma_tag_t t;
                    556:        bus_dmamap_t map;
                    557:        void *buf;
                    558:        bus_size_t buflen;
                    559:        struct proc *p;
                    560:        int flags;
                    561:        paddr_t *lastaddrp;
                    562:        int *segp;
                    563:        int first;
                    564: {
                    565:        bus_size_t sgsize;
                    566:        bus_addr_t curaddr, lastaddr, baddr, bmask;
                    567:        vaddr_t vaddr = (vaddr_t)buf;
                    568:        int seg;
                    569:        pmap_t pmap;
                    570:
                    571: #ifdef DEBUG_DMA
                    572:        printf("_bus_dmamem_load_buffer(buf=%p, len=%lx, flags=%d, 1st=%d)\n",
                    573:            buf, buflen, flags, first);
                    574: #endif /* DEBUG_DMA */
                    575:
                    576:        if (p != NULL)
                    577:                pmap = p->p_vmspace->vm_map.pmap;
                    578:        else
                    579:                pmap = pmap_kernel();
                    580:
                    581:        lastaddr = *lastaddrp;
                    582:        bmask  = ~(map->_dm_boundary - 1);
                    583:
                    584:        for (seg = *segp; buflen > 0; ) {
                    585:                /*
                    586:                 * Get the physical address for this segment.
                    587:                 */
                    588:                pmap_extract(pmap, (vaddr_t)vaddr, &curaddr);
                    589: #if 0
                    590:                /*
                    591:                 * Make sure we're in an allowed DMA range.
                    592:                 */
                    593:                if (t->_ranges != NULL &&
                    594:                    _bus_dma_inrange(t->_ranges, t->_nranges, curaddr) == 0)
                    595:                        return (EINVAL);
                    596: #endif
                    597:
                    598:                /*
                    599:                 * Compute the segment size, and adjust counts.
                    600:                 */
                    601:                sgsize = NBPG - ((u_long)vaddr & PGOFSET);
                    602:                if (buflen < sgsize)
                    603:                        sgsize = buflen;
                    604:
                    605:                /*
                    606:                 * Make sure we don't cross any boundaries.
                    607:                 */
                    608:                if (map->_dm_boundary > 0) {
                    609:                        baddr = (curaddr + map->_dm_boundary) & bmask;
                    610:                        if (sgsize > (baddr - curaddr))
                    611:                                sgsize = (baddr - curaddr);
                    612:                }
                    613:
                    614:                /*
                    615:                 * Insert chunk into a segment, coalescing with
                    616:                 * previous segment if possible.
                    617:                 */
                    618:                if (first) {
                    619:                        map->dm_segs[seg].ds_addr = curaddr;
                    620:                        map->dm_segs[seg].ds_len = sgsize;
                    621:                        first = 0;
                    622:                } else {
                    623:                        if (curaddr == lastaddr &&
                    624:                            (map->dm_segs[seg].ds_len + sgsize) <=
                    625:                             map->_dm_maxsegsz &&
                    626:                            (map->_dm_boundary == 0 ||
                    627:                             (map->dm_segs[seg].ds_addr & bmask) ==
                    628:                             (curaddr & bmask)))
                    629:                                map->dm_segs[seg].ds_len += sgsize;
                    630:                        else {
                    631:                                if (++seg >= map->_dm_segcnt)
                    632:                                        break;
                    633:                                map->dm_segs[seg].ds_addr = curaddr;
                    634:                                map->dm_segs[seg].ds_len = sgsize;
                    635:                        }
                    636:                }
                    637:
                    638:                lastaddr = curaddr + sgsize;
                    639:                vaddr += sgsize;
                    640:                buflen -= sgsize;
                    641:        }
                    642:
                    643:        *segp = seg;
                    644:        *lastaddrp = lastaddr;
                    645:
                    646:        /*
                    647:         * Did we fit?
                    648:         */
                    649:        if (buflen != 0)
                    650:                return (EFBIG);         /* XXX better return value here? */
                    651:        return (0);
                    652: }
                    653:
                    654: /*
                    655:  * Check to see if the specified page is in an allowed DMA range.
                    656:  */
                    657: int
                    658: _bus_dma_inrange(ranges, nranges, curaddr)
                    659:        bus_dma_segment_t *ranges;
                    660:        int nranges;
                    661:        bus_addr_t curaddr;
                    662: {
                    663:        bus_dma_segment_t *ds;
                    664:        int i;
                    665:
                    666:        for (i = 0, ds = ranges; i < nranges; i++, ds++) {
                    667:                if (curaddr >= ds->ds_addr &&
                    668:                    round_page(curaddr) <= (ds->ds_addr + ds->ds_len))
                    669:                        return (1);
                    670:        }
                    671:
                    672:        return (0);
                    673: }
                    674:
                    675: /*
                    676:  * Allocate physical memory from the given physical address range.
                    677:  * Called by DMA-safe memory allocation methods.
                    678:  */
                    679: int
                    680: _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs,
                    681:     flags, low, high)
                    682:        bus_dma_tag_t t;
                    683:        bus_size_t size, alignment, boundary;
                    684:        bus_dma_segment_t *segs;
                    685:        int nsegs;
                    686:        int *rsegs;
                    687:        int flags;
                    688:        vaddr_t low;
                    689:        vaddr_t high;
                    690: {
                    691:        paddr_t curaddr, lastaddr;
                    692:        struct vm_page *m;
                    693:        struct pglist mlist;
                    694:        int curseg, error;
                    695:
                    696: #ifdef DEBUG_DMA
                    697:        printf("alloc_range: t=%p size=%lx align=%lx boundary=%lx segs=%p nsegs=%x rsegs=%p flags=%x lo=%lx hi=%lx\n",
                    698:            t, size, alignment, boundary, segs, nsegs, rsegs, flags, low, high);
                    699: #endif /* DEBUG_DMA */
                    700:
                    701:        /* Always round the size. */
                    702:        size = round_page(size);
                    703:
                    704:        /*
                    705:         * Allocate pages from the VM system.
                    706:         */
                    707:        TAILQ_INIT(&mlist);
                    708:        error = uvm_pglistalloc(size, low, high, alignment, boundary,
                    709:            &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
                    710:        if (error)
                    711:                return (error);
                    712:
                    713:        /*
                    714:         * Compute the location, size, and number of segments actually
                    715:         * returned by the VM code.
                    716:         */
                    717:        m = TAILQ_FIRST(&mlist);
                    718:        curseg = 0;
                    719:        lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
                    720:        segs[curseg].ds_len = PAGE_SIZE;
                    721: #ifdef DEBUG_DMA
                    722:                printf("alloc: page %lx\n", lastaddr);
                    723: #endif /* DEBUG_DMA */
                    724:        m = TAILQ_NEXT(m, pageq);
                    725:
                    726:        for (; m != TAILQ_END(&mlist); m = TAILQ_NEXT(m, pageq)) {
                    727:                curaddr = VM_PAGE_TO_PHYS(m);
                    728: #ifdef DIAGNOSTIC
                    729:                if (curaddr < low || curaddr >= high) {
                    730:                        printf("vm_page_alloc_memory returned non-sensical"
                    731:                            " address 0x%lx\n", curaddr);
                    732:                        panic("_bus_dmamem_alloc_range");
                    733:                }
                    734: #endif /* DIAGNOSTIC */
                    735: #ifdef DEBUG_DMA
                    736:                printf("alloc: page %lx\n", curaddr);
                    737: #endif /* DEBUG_DMA */
                    738:                if (curaddr == (lastaddr + PAGE_SIZE))
                    739:                        segs[curseg].ds_len += PAGE_SIZE;
                    740:                else {
                    741:                        curseg++;
                    742:                        segs[curseg].ds_addr = curaddr;
                    743:                        segs[curseg].ds_len = PAGE_SIZE;
                    744:                }
                    745:                lastaddr = curaddr;
                    746:        }
                    747:
                    748:        *rsegs = curseg + 1;
                    749:
                    750:        return (0);
                    751: }
                    752:
                    753: /*
                    754:  * "generic" DMA struct, nothing special.
                    755:  */
                    756: struct vax_bus_dma_tag vax_bus_dma_tag = {
                    757:        NULL,
                    758:        0,
                    759:        0,
                    760:        0,
                    761:        0,
                    762:        0,
                    763:        _bus_dmamap_create,
                    764:        _bus_dmamap_destroy,
                    765:        _bus_dmamap_load,
                    766:        _bus_dmamap_load_mbuf,
                    767:        _bus_dmamap_load_uio,
                    768:        _bus_dmamap_load_raw,
                    769:        _bus_dmamap_unload,
                    770:        _bus_dmamap_sync,
                    771:        _bus_dmamem_alloc,
                    772:        _bus_dmamem_free,
                    773:        _bus_dmamem_map,
                    774:        _bus_dmamem_unmap,
                    775:        _bus_dmamem_mmap,
                    776: };

CVSweb