[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [local] / sys / arch / m88k / m88k

Annotation of sys/arch/m88k/m88k/pmap.c, Revision 1.1.1.1

1.1       nbrk        1: /*     $OpenBSD: pmap.c,v 1.31 2007/05/27 20:59:25 miod Exp $  */
                      2: /*
                      3:  * Copyright (c) 2001-2004, Miodrag Vallat
                      4:  * Copyright (c) 1998-2001 Steve Murphree, Jr.
                      5:  * Copyright (c) 1996 Nivas Madhur
                      6:  * All rights reserved.
                      7:  *
                      8:  * Redistribution and use in source and binary forms, with or without
                      9:  * modification, are permitted provided that the following conditions
                     10:  * are met:
                     11:  * 1. Redistributions of source code must retain the above copyright
                     12:  *    notice, this list of conditions and the following disclaimer.
                     13:  * 2. Redistributions in binary form must reproduce the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer in the
                     15:  *    documentation and/or other materials provided with the distribution.
                     16:  * 3. All advertising materials mentioning features or use of this software
                     17:  *    must display the following acknowledgement:
                     18:  *      This product includes software developed by Nivas Madhur.
                     19:  * 4. The name of the author may not be used to endorse or promote products
                     20:  *    derived from this software without specific prior written permission
                     21:  *
                     22:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     23:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     24:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     25:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     26:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     27:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     28:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     29:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     30:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     31:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     32:  *
                     33:  */
                     34: /*
                     35:  * Mach Operating System
                     36:  * Copyright (c) 1991 Carnegie Mellon University
                     37:  * Copyright (c) 1991 OMRON Corporation
                     38:  * All Rights Reserved.
                     39:  *
                     40:  * Permission to use, copy, modify and distribute this software and its
                     41:  * documentation is hereby granted, provided that both the copyright
                     42:  * notice and this permission notice appear in all copies of the
                     43:  * software, derivative works or modified versions, and any portions
                     44:  * thereof, and that both notices appear in supporting documentation.
                     45:  *
                     46:  */
                     47:
                     48: #include <sys/param.h>
                     49: #include <sys/systm.h>
                     50: #include <sys/proc.h>
                     51: #include <sys/malloc.h>
                     52: #include <sys/pool.h>
                     53: #include <sys/msgbuf.h>
                     54: #include <sys/user.h>
                     55:
                     56: #include <machine/asm_macro.h>
                     57: #include <machine/cmmu.h>
                     58: #include <machine/cpu.h>
                     59: #include <machine/lock.h>
                     60: #include <machine/pmap_table.h>
                     61: #ifdef M88100
                     62: #include <machine/m8820x.h>
                     63: #endif
                     64:
                     65: #include <uvm/uvm.h>
                     66:
                     67: /*
                     68:  * VM externals
                     69:  */
                     70: extern vaddr_t avail_start;
                     71: extern vaddr_t virtual_avail, virtual_end;
                     72: extern vaddr_t last_addr;
                     73:
                     74: /*
                     75:  * Macros to operate pm_cpus field
                     76:  */
                     77: #define SETBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) |= (1 << (cpu_number));
                     78: #define CLRBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) &= ~(1 << (cpu_number));
                     79:
                     80: #ifdef DEBUG
                     81: /*
                     82:  * Static variables, functions and variables for debugging
                     83:  */
                     84:
                     85: /*
                     86:  * conditional debugging
                     87:  */
                     88: #define CD_FULL                0x02
                     89:
                     90: #define CD_ACTIVATE    0x0000004       /* pmap_activate */
                     91: #define CD_KMAP                0x0000008       /* pmap_expand_kmap */
                     92: #define CD_MAP         0x0000010       /* pmap_map */
                     93: #define CD_CACHE       0x0000020       /* pmap_cache_ctrl */
                     94: #define CD_INIT                0x0000080       /* pmap_init */
                     95: #define CD_CREAT       0x0000100       /* pmap_create */
                     96: #define CD_FREE                0x0000200       /* pmap_release */
                     97: #define CD_DESTR       0x0000400       /* pmap_destroy */
                     98: #define CD_RM          0x0000800       /* pmap_remove */
                     99: #define CD_RMAL                0x0001000       /* pmap_remove_all */
                    100: #define CD_PROT                0x0002000       /* pmap_protect */
                    101: #define CD_EXP         0x0004000       /* pmap_expand */
                    102: #define CD_ENT         0x0008000       /* pmap_enter */
                    103: #define CD_UPD         0x0010000       /* pmap_update */
                    104: #define CD_COL         0x0020000       /* pmap_collect */
                    105: #define CD_CBIT                0x0040000       /* pmap_changebit */
                    106: #define CD_TBIT                0x0080000       /* pmap_testbit */
                    107: #define CD_USBIT       0x0100000       /* pmap_unsetbit */
                    108: #define CD_ALL         0x0FFFFFC
                    109:
                    110: int pmap_con_dbg = 0;
                    111:
                    112: #endif /* DEBUG */
                    113:
                    114: struct pool pmappool, pvpool;
                    115:
                    116: caddr_t vmmap;
                    117: pt_entry_t *vmpte, *msgbufmap;
                    118:
                    119: struct pmap kernel_pmap_store;
                    120: pmap_t kernel_pmap = &kernel_pmap_store;
                    121:
                    122: typedef struct kpdt_entry *kpdt_entry_t;
                    123: struct kpdt_entry {
                    124:        kpdt_entry_t    next;
                    125:        paddr_t         phys;
                    126: };
                    127:
                    128: kpdt_entry_t   kpdt_free;
                    129:
                    130: /*
                    131:  * Two pages of scratch space per cpu.
                    132:  * Used in pmap_copy_page() and pmap_zero_page().
                    133:  */
                    134: vaddr_t phys_map_vaddr, phys_map_vaddr_end;
                    135:
                    136: static pv_entry_t pg_to_pvh(struct vm_page *);
                    137:
                    138: static __inline pv_entry_t
                    139: pg_to_pvh(struct vm_page *pg)
                    140: {
                    141:        return &pg->mdpage.pvent;
                    142: }
                    143:
                    144: /*
                    145:  *     Locking primitives
                    146:  */
                    147:
                    148: #ifdef MULTIPROCESSOR
                    149: #define        PMAP_LOCK(pmap)         __cpu_simple_lock(&(pmap)->pm_lock)
                    150: #define        PMAP_UNLOCK(pmap)       __cpu_simple_unlock(&(pmap)->pm_lock)
                    151: #else
                    152: #define        PMAP_LOCK(pmap)         do { /* nothing */ } while (0)
                    153: #define        PMAP_UNLOCK(pmap)       do { /* nothing */ } while (0)
                    154: #endif
                    155:
                    156: vaddr_t kmapva = 0;
                    157:
                    158: /*
                    159:  * Internal routines
                    160:  */
                    161: static void flush_atc_entry(pmap_t, vaddr_t);
                    162: pt_entry_t *pmap_expand_kmap(vaddr_t, vm_prot_t, int);
                    163: void   pmap_remove_pte(pmap_t, vaddr_t, pt_entry_t *);
                    164: void   pmap_remove_range(pmap_t, vaddr_t, vaddr_t);
                    165: void   pmap_expand(pmap_t, vaddr_t);
                    166: void   pmap_release(pmap_t);
                    167: vaddr_t        pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t, u_int);
                    168: pt_entry_t *pmap_pte(pmap_t, vaddr_t);
                    169: void   pmap_remove_all(struct vm_page *);
                    170: void   pmap_changebit(struct vm_page *, int, int);
                    171: boolean_t pmap_unsetbit(struct vm_page *, int);
                    172: boolean_t pmap_testbit(struct vm_page *, int);
                    173:
                    174: /*
                    175:  * quick PTE field checking macros
                    176:  */
                    177: #define        pmap_pte_w(pte)         (*(pte) & PG_W)
                    178: #define        pmap_pte_prot(pte)      (*(pte) & PG_PROT)
                    179:
                    180: #define        pmap_pte_w_chg(pte, nw)         ((nw) ^ pmap_pte_w(pte))
                    181: #define        pmap_pte_prot_chg(pte, np)      ((np) ^ pmap_pte_prot(pte))
                    182:
                    183: #define        m88k_protection(prot)   ((prot) & VM_PROT_WRITE ? PG_RW : PG_RO)
                    184:
                    185: #define SDTENT(map, va)                ((sdt_entry_t *)((map)->pm_stab + SDTIDX(va)))
                    186:
                    187: /*
                    188:  * Routine:    FLUSH_ATC_ENTRY
                    189:  *
                    190:  * Function:
                    191:  *     Flush atc (TLB) which maps given pmap and virtual address.
                    192:  *
                    193:  * Parameters:
                    194:  *     pmap    affected pmap
                    195:  *     va      virtual address that should be flushed
                    196:  */
                    197: static
                    198: #ifndef MULTIPROCESSOR
                    199: __inline__
                    200: #endif
                    201: void
                    202: flush_atc_entry(pmap_t pmap, vaddr_t va)
                    203: {
                    204: #ifdef MULTIPROCESSOR
                    205:        u_int32_t users;
                    206:        int cpu;
                    207:        boolean_t kernel;
                    208:
                    209:        if ((users = pmap->pm_cpus) == 0)
                    210:                return;
                    211:
                    212:        kernel = pmap == kernel_pmap;
                    213:        while ((cpu = ff1(users)) != 32) {
                    214: #ifdef DIAGNOSTIC
                    215:                if (m88k_cpus[cpu].ci_alive)
                    216: #endif
                    217:                        cmmu_flush_tlb(cpu, kernel, va, 1);
                    218:                users ^= 1 << cpu;
                    219:        }
                    220: #else  /* MULTIPROCESSOR */
                    221:        if (pmap->pm_cpus != 0)
                    222:                cmmu_flush_tlb(cpu_number(), pmap == kernel_pmap, va, 1);
                    223: #endif /* MULTIPROCESSOR */
                    224: }
                    225:
                    226: /*
                    227:  * Routine:    PMAP_PTE
                    228:  *
                    229:  * Function:
                    230:  *     Given a map and a virtual address, compute a (virtual) pointer
                    231:  *     to the page table entry (PTE) which maps the address .
                    232:  *     If the page table associated with the address does not
                    233:  *     exist, NULL is returned (and the map may need to grow).
                    234:  *
                    235:  * Parameters:
                    236:  *     pmap    pointer to pmap structure
                    237:  *     virt    virtual address for which page table entry is desired
                    238:  *
                    239:  *    Otherwise the page table address is extracted from the segment table,
                    240:  *    the page table index is added, and the result is returned.
                    241:  */
                    242:
                    243: static __inline__
                    244: pt_entry_t *
                    245: sdt_pte(sdt_entry_t *sdt, vaddr_t va)
                    246: {
                    247:        return ((pt_entry_t *)
                    248:            (PG_PFNUM(*(sdt + SDT_ENTRIES)) << PDT_SHIFT) + PDTIDX(va));
                    249: }
                    250:
                    251: pt_entry_t *
                    252: pmap_pte(pmap_t pmap, vaddr_t virt)
                    253: {
                    254:        sdt_entry_t *sdt;
                    255:
                    256:        sdt = SDTENT(pmap, virt);
                    257:        /*
                    258:         * Check whether page table exists.
                    259:         */
                    260:        if (!SDT_VALID(sdt))
                    261:                return (NULL);
                    262:
                    263:        return (sdt_pte(sdt, virt));
                    264: }
                    265:
                    266: /*
                    267:  * Routine:    PMAP_EXPAND_KMAP (internal)
                    268:  *
                    269:  * Function:
                    270:  *    Allocate a page descriptor table (pte_table) and validate associated
                    271:  * segment table entry, returning pointer to page table entry. This is
                    272:  * much like 'pmap_expand', except that table space is acquired
                    273:  * from an area set up by pmap_bootstrap, instead of through
                    274:  * uvm_km_zalloc. (Obviously, because uvm_km_zalloc uses the kernel map
                    275:  * for allocation - which we can't do when trying to expand the
                    276:  * kernel map!) Note that segment tables for the kernel map were
                    277:  * all allocated at pmap_bootstrap time, so we only need to worry
                    278:  * about the page table here.
                    279:  *
                    280:  * Parameters:
                    281:  *     virt    VA for which translation tables are needed
                    282:  *     prot    protection attributes for segment entries
                    283:  *
                    284:  * Extern/Global:
                    285:  *     kpdt_free       kernel page table free queue
                    286:  *
                    287:  * This routine simply dequeues a table from the kpdt_free list,
                    288:  * initializes all its entries (invalidates them), and sets the
                    289:  * corresponding segment table entry to point to it. If the kpdt_free
                    290:  * list is empty - we panic (no other places to get memory, sorry). (Such
                    291:  * a panic indicates that pmap_bootstrap is not allocating enough table
                    292:  * space for the kernel virtual address space).
                    293:  *
                    294:  */
                    295: pt_entry_t *
                    296: pmap_expand_kmap(vaddr_t virt, vm_prot_t prot, int canfail)
                    297: {
                    298:        sdt_entry_t template, *sdt;
                    299:        kpdt_entry_t kpdt_ent;
                    300:
                    301: #ifdef DEBUG
                    302:        if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL))
                    303:                printf("(pmap_expand_kmap: %x) v %x\n", curproc, virt);
                    304: #endif
                    305:
                    306:        template = m88k_protection(prot) | PG_M | SG_V;
                    307:
                    308:        /* segment table entry derivate from map and virt. */
                    309:        sdt = SDTENT(kernel_pmap, virt);
                    310: #ifdef DEBUG
                    311:        if (SDT_VALID(sdt))
                    312:                panic("pmap_expand_kmap: segment table entry VALID");
                    313: #endif
                    314:
                    315:        kpdt_ent = kpdt_free;
                    316:        if (kpdt_ent == NULL) {
                    317:                if (canfail)
                    318:                        return (NULL);
                    319:                else
                    320:                        panic("pmap_expand_kmap: Ran out of kernel pte tables");
                    321:        }
                    322:
                    323:        kpdt_free = kpdt_free->next;
                    324:        /* physical table */
                    325:        *sdt = kpdt_ent->phys | template;
                    326:        /* virtual table */
                    327:        *(sdt + SDT_ENTRIES) = (vaddr_t)kpdt_ent | template;
                    328:
                    329:        /* Reinitialize this kpdt area to zero */
                    330:        bzero((void *)kpdt_ent, PDT_SIZE);
                    331:
                    332:        return (pt_entry_t *)(kpdt_ent) + PDTIDX(virt);
                    333: }
                    334:
                    335: /*
                    336:  * Routine:    PMAP_MAP
                    337:  *
                    338:  * Function:
                    339:  *    Map memory at initialization. The physical addresses being
                    340:  * mapped are not managed and are never unmapped.
                    341:  *
                    342:  * Parameters:
                    343:  *     virt    virtual address of range to map
                    344:  *     start   physical address of range to map
                    345:  *     end     physical address of end of range
                    346:  *     prot    protection attributes
                    347:  *     cmode   cache control attributes
                    348:  *
                    349:  * Calls:
                    350:  *     pmap_pte
                    351:  *     pmap_expand_kmap
                    352:  *
                    353:  * Special Assumptions
                    354:  *     For now, VM is already on, only need to map the specified
                    355:  * memory. Used only by pmap_bootstrap() and vm_page_startup().
                    356:  *
                    357:  * For each page that needs mapping:
                    358:  *     pmap_pte is called to obtain the address of the page table
                    359:  *     table entry (PTE). If the page table does not exist,
                    360:  *     pmap_expand_kmap is called to allocate it. Finally, the page table
                    361:  *     entry is set to point to the physical page.
                    362:  *
                    363:  *     initialize template with paddr, prot, dt
                    364:  *     look for number of phys pages in range
                    365:  *     {
                    366:  *             pmap_pte(virt)  - expand if necessary
                    367:  *             stuff pte from template
                    368:  *             increment virt one page
                    369:  *             increment template paddr one page
                    370:  *     }
                    371:  *
                    372:  */
                    373: vaddr_t
                    374: pmap_map(vaddr_t virt, paddr_t start, paddr_t end, vm_prot_t prot, u_int cmode)
                    375: {
                    376:        u_int npages;
                    377:        u_int num_phys_pages;
                    378:        pt_entry_t template, *pte;
                    379:        paddr_t  page;
                    380:
                    381: #ifdef DEBUG
                    382:        if (pmap_con_dbg & CD_MAP)
                    383:                printf ("(pmap_map: %x) phys address from %x to %x mapped at virtual %x, prot %x cmode %x\n",
                    384:                        curproc, start, end, virt, prot, cmode);
                    385: #endif
                    386:
                    387: #ifdef DEBUG
                    388:        /* Check for zero if we map the very end of the address space... */
                    389:        if (start > end && end != 0) {
                    390:                panic("pmap_map: start greater than end address");
                    391:        }
                    392: #endif
                    393:
                    394:        template = m88k_protection(prot) | cmode | PG_V;
                    395: #ifdef M88110
                    396:        if (CPU_IS88110 && m88k_protection(prot) != PG_RO)
                    397:                template |= PG_M;
                    398: #endif
                    399:
                    400:        page = trunc_page(start);
                    401:        npages = atop(round_page(end) - page);
                    402:        for (num_phys_pages = npages; num_phys_pages != 0; num_phys_pages--) {
                    403:                if ((pte = pmap_pte(kernel_pmap, virt)) == NULL)
                    404:                        pte = pmap_expand_kmap(virt,
                    405:                            VM_PROT_READ | VM_PROT_WRITE, 0);
                    406:
                    407: #ifdef DEBUG
                    408:                if ((pmap_con_dbg & (CD_MAP | CD_FULL)) == (CD_MAP | CD_FULL))
                    409:                        if (PDT_VALID(pte))
                    410:                                printf("(pmap_map: %x) pte @ %p already valid\n", curproc, pte);
                    411: #endif
                    412:
                    413:                *pte = template | page;
                    414:                virt += PAGE_SIZE;
                    415:                page += PAGE_SIZE;
                    416:        }
                    417:        return virt;
                    418: }
                    419:
                    420: /*
                    421:  * Routine:    PMAP_CACHE_CONTROL
                    422:  *
                    423:  * Function:
                    424:  *     Set the cache-control bits in the page table entries(PTE) which maps
                    425:  *     the specified virtual address range.
                    426:  *
                    427:  * Parameters:
                    428:  *     pmap_t          pmap
                    429:  *     vaddr_t         s
                    430:  *     vaddr_t         e
                    431:  *     u_int           mode
                    432:  *
                    433:  * Calls:
                    434:  *     pmap_pte
                    435:  *     invalidate_pte
                    436:  *     flush_atc_entry
                    437:  *
                    438:  *  This routine sequences through the pages of the specified range.
                    439:  * For each, it calls pmap_pte to acquire a pointer to the page table
                    440:  * entry (PTE). If the PTE is invalid, or non-existent, nothing is done.
                    441:  * Otherwise, the cache-control bits in the PTE's are adjusted as specified.
                    442:  *
                    443:  */
                    444: void
                    445: pmap_cache_ctrl(pmap_t pmap, vaddr_t s, vaddr_t e, u_int mode)
                    446: {
                    447:        int spl;
                    448:        pt_entry_t opte, *pte;
                    449:        vaddr_t va;
                    450:        paddr_t pa;
                    451:        cpuid_t cpu;
                    452:
                    453: #ifdef DEBUG
                    454:        if ((mode & CACHE_MASK) != mode) {
                    455:                printf("(cache_ctrl) illegal mode %x\n", mode);
                    456:                return;
                    457:        }
                    458:        if (pmap_con_dbg & CD_CACHE) {
                    459:                printf("(pmap_cache_ctrl: %x) pmap %x, va %x, mode %x\n", curproc, pmap, s, mode);
                    460:        }
                    461:
                    462:        if (pmap == NULL)
                    463:                panic("pmap_cache_ctrl: pmap is NULL");
                    464: #endif /* DEBUG */
                    465:
                    466:        spl = splvm();
                    467:        PMAP_LOCK(pmap);
                    468:
                    469:        for (va = s; va != e; va += PAGE_SIZE) {
                    470:                if ((pte = pmap_pte(pmap, va)) == NULL)
                    471:                        continue;
                    472: #ifdef DEBUG
                    473:                if (pmap_con_dbg & CD_CACHE) {
                    474:                        printf("(cache_ctrl) pte@%p\n", pte);
                    475:                }
                    476: #endif /* DEBUG */
                    477:                /*
                    478:                 * Invalidate pte temporarily to avoid being written back
                    479:                 * the modified bit and/or the reference bit by any other cpu.
                    480:                 * XXX
                    481:                 */
                    482:                opte = invalidate_pte(pte);
                    483:                *pte = (opte & ~CACHE_MASK) | mode;
                    484:                flush_atc_entry(pmap, va);
                    485:
                    486:                /*
                    487:                 * Data cache should be copied back and invalidated if
                    488:                 * the old mapping was cached.
                    489:                 */
                    490:                if ((opte & CACHE_INH) == 0) {
                    491:                        pa = ptoa(PG_PFNUM(opte));
                    492: #ifdef MULTIPROCESSOR
                    493:                        for (cpu = 0; cpu < MAX_CPUS; cpu++)
                    494:                                if (m88k_cpus[cpu].ci_alive != 0)
                    495: #else
                    496:                        cpu = cpu_number();
                    497: #endif
                    498:                                        cmmu_flush_cache(cpu, pa, PAGE_SIZE);
                    499:                }
                    500:        }
                    501:        PMAP_UNLOCK(pmap);
                    502:        splx(spl);
                    503: }
                    504:
                    505: /*
                    506:  * Routine:    PMAP_BOOTSTRAP
                    507:  *
                    508:  * Function:
                    509:  *     Bootstrap the system enough to run with virtual memory.
                    510:  *     Map the kernel's code and data, allocate the kernel
                    511:  *     translation table space, and map control registers
                    512:  *     and other IO addresses.
                    513:  *
                    514:  * Parameters:
                    515:  *     load_start      PA where kernel was loaded
                    516:  *
                    517:  * Extern/Global:
                    518:  *
                    519:  *     PAGE_SIZE       VM (software) page size
                    520:  *     etext           end of kernel text
                    521:  *     phys_map_vaddr  VA of page mapped arbitrarily for debug/IO
                    522:  *
                    523:  * Calls:
                    524:  *     __cpu_simple_lock_init
                    525:  *     pmap_map
                    526:  *
                    527:  *    The physical address 'load_start' is mapped at
                    528:  * VM_MIN_KERNEL_ADDRESS, which maps the kernel code and data at the
                    529:  * virtual address for which it was (presumably) linked. Immediately
                    530:  * following the end of the kernel code/data, sufficient page of
                    531:  * physical memory are reserved to hold translation tables for the kernel
                    532:  * address space.
                    533:  *
                    534:  *    A pair of virtual pages per cpu are reserved for debugging and
                    535:  * IO purposes. They are arbitrarily mapped when needed. They are used,
                    536:  * for example, by pmap_copy_page and pmap_zero_page.
                    537:  *
                    538:  *    This implementation also assumes that the space below the kernel
                    539:  * is reserved (typically from PROM purposes). We should ideally map it
                    540:  * read only except when invoking its services...
                    541:  */
                    542:
                    543: void
                    544: pmap_bootstrap(vaddr_t load_start)
                    545: {
                    546:        kpdt_entry_t kpdt_virt;
                    547:        sdt_entry_t *kmap;
                    548:        vaddr_t vaddr, virt;
                    549:        paddr_t s_text, e_text, kpdt_phys;
                    550:        unsigned int kernel_pmap_size, pdt_size;
                    551:        int i;
                    552: #ifndef MULTIPROCESSOR
                    553:        cpuid_t cpu;
                    554: #endif
                    555:        pmap_table_t ptable;
                    556:        extern void *etext;
                    557:
                    558: #ifdef MULTIPROCESSOR
                    559:        __cpu_simple_lock_init(&kernel_pmap->pm_lock);
                    560: #endif
                    561:
                    562:        /*
                    563:         * Allocate the kernel page table from the front of available
                    564:         * physical memory, i.e. just after where the kernel image was loaded.
                    565:         */
                    566:        /*
                    567:         * The calling sequence is
                    568:         *    ...
                    569:         *  pmap_bootstrap(&kernelstart, ...);
                    570:         * kernelstart being the first symbol in the load image.
                    571:         */
                    572:
                    573:        avail_start = round_page(avail_start);
                    574:        virtual_avail = avail_start;
                    575:
                    576:        /*
                    577:         * Initialize kernel_pmap structure
                    578:         */
                    579:        kernel_pmap->pm_count = 1;
                    580:        kernel_pmap->pm_cpus = 0;
                    581:        kmap = (sdt_entry_t *)(avail_start);
                    582:        kernel_pmap->pm_stab = (sdt_entry_t *)virtual_avail;
                    583:        kmapva = virtual_avail;
                    584:
                    585:        /*
                    586:         * Reserve space for segment table entries.
                    587:         * One for the regular segment table and one for the shadow table
                    588:         * The shadow table keeps track of the virtual address of page
                    589:         * tables. This is used in virtual-to-physical address translation
                    590:         * functions. Remember, MMU cares only for physical addresses of
                    591:         * segment and page table addresses. For kernel page tables, we
                    592:         * really don't need this virtual stuff (since the kernel will
                    593:         * be mapped 1-to-1) but for user page tables, this is required.
                    594:         * Just to be consistent, we will maintain the shadow table for
                    595:         * kernel pmap also.
                    596:         */
                    597:        kernel_pmap_size = 2 * SDT_SIZE;
                    598:
                    599: #ifdef DEBUG
                    600:        printf("kernel segment table size = 0x%x\n", kernel_pmap_size);
                    601: #endif
                    602:        /* init all segment descriptors to zero */
                    603:        bzero(kernel_pmap->pm_stab, kernel_pmap_size);
                    604:
                    605:        avail_start += kernel_pmap_size;
                    606:        virtual_avail += kernel_pmap_size;
                    607:
                    608:        /* make sure page tables are page aligned!! XXX smurph */
                    609:        avail_start = round_page(avail_start);
                    610:        virtual_avail = round_page(virtual_avail);
                    611:
                    612:        /* save pointers to where page table entries start in physical memory */
                    613:        kpdt_phys = avail_start;
                    614:        kpdt_virt = (kpdt_entry_t)virtual_avail;
                    615:
                    616:        /* Compute how much space we need for the kernel page table */
                    617:        pdt_size = atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
                    618:            * sizeof(pt_entry_t);
                    619:        for (ptable = pmap_table_build(); ptable->size != (vsize_t)-1; ptable++)
                    620:                pdt_size += atop(ptable->size) * sizeof(pt_entry_t);
                    621:        pdt_size = round_page(pdt_size);
                    622:        kernel_pmap_size += pdt_size;
                    623:        avail_start += pdt_size;
                    624:        virtual_avail += pdt_size;
                    625:
                    626:        /* init all page descriptors to zero */
                    627:        bzero((void *)kpdt_phys, pdt_size);
                    628: #ifdef DEBUG
                    629:        printf("--------------------------------------\n");
                    630:        printf("        kernel page start = 0x%x\n", kpdt_phys);
                    631:        printf("   kernel page table size = 0x%x\n", pdt_size);
                    632:        printf("          kernel page end = 0x%x\n", avail_start);
                    633:
                    634:        printf("kpdt_virt = 0x%x\n", kpdt_virt);
                    635: #endif
                    636:        /*
                    637:         * init the kpdt queue
                    638:         */
                    639:        kpdt_free = kpdt_virt;
                    640:        for (i = pdt_size / PDT_SIZE; i != 0; i--) {
                    641:                kpdt_virt->next = (kpdt_entry_t)((vaddr_t)kpdt_virt + PDT_SIZE);
                    642:                kpdt_virt->phys = kpdt_phys;
                    643:                kpdt_virt = kpdt_virt->next;
                    644:                kpdt_phys += PDT_SIZE;
                    645:        }
                    646:        kpdt_virt->next = NULL; /* terminate the list */
                    647:
                    648:        /*
                    649:         * Map the kernel image into virtual space
                    650:         */
                    651:
                    652:        s_text = trunc_page(load_start);        /* paddr of text */
                    653:        e_text = round_page((vaddr_t)&etext);   /* paddr of end of text */
                    654:
                    655:        /* map the PROM area */
                    656:        vaddr = pmap_map(0, 0, s_text, VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
                    657:
                    658:        /* map the kernel text read only */
                    659:        vaddr = pmap_map(s_text, s_text, e_text, VM_PROT_READ, 0);
                    660:
                    661:        vaddr = pmap_map(vaddr, e_text, (paddr_t)kmap,
                    662:            VM_PROT_WRITE | VM_PROT_READ, 0);
                    663:
                    664:        /*
                    665:         * Map system segment & page tables - should be cache inhibited?
                    666:         * 88200 manual says that CI bit is driven on the Mbus while accessing
                    667:         * the translation tree. I don't think we need to map it CACHE_INH
                    668:         * here...
                    669:         */
                    670:        if (kmapva != vaddr) {
                    671:                while (vaddr < (virtual_avail - kernel_pmap_size))
                    672:                        vaddr = round_page(vaddr + 1);
                    673:        }
                    674:        vaddr = pmap_map(vaddr, (paddr_t)kmap, avail_start,
                    675:            VM_PROT_WRITE | VM_PROT_READ, CACHE_INH);
                    676:
                    677:        vaddr = pmap_bootstrap_md(vaddr);
                    678:
                    679:        virtual_avail = round_page(virtual_avail);
                    680:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                    681:
                    682:        /*
                    683:         * Map two pages per cpu for copying/zeroing.
                    684:         */
                    685:
                    686:        phys_map_vaddr = virtual_avail;
                    687:        phys_map_vaddr_end = virtual_avail + 2 * (max_cpus << PAGE_SHIFT);
                    688:        avail_start += 2 * (max_cpus << PAGE_SHIFT);
                    689:        virtual_avail += 2 * (max_cpus << PAGE_SHIFT);
                    690:
                    691:        /*
                    692:         * Create all the machine-specific mappings.
                    693:         */
                    694:
                    695:        for (ptable = pmap_table_build(); ptable->size != (vsize_t)-1; ptable++)
                    696:                if (ptable->size != 0) {
                    697:                        pmap_map(ptable->virt_start, ptable->phys_start,
                    698:                            ptable->phys_start + ptable->size,
                    699:                            ptable->prot, ptable->cacheability);
                    700:                }
                    701:
                    702:        /*
                    703:         * Allocate all the submaps we need. Note that SYSMAP just allocates
                    704:         * kernel virtual address with no physical backing memory. The idea
                    705:         * is physical memory will be mapped at this va before using that va.
                    706:         * This means that if different physical pages are going to be mapped
                    707:         * at different times, we better do a tlb flush before using it -
                    708:         * else we will be referencing the wrong page.
                    709:         */
                    710:
                    711: #define        SYSMAP(c, p, v, n)      \
                    712: ({ \
                    713:        v = (c)virt; \
                    714:        if ((p = pmap_pte(kernel_pmap, virt)) == NULL) \
                    715:                pmap_expand_kmap(virt, VM_PROT_READ | VM_PROT_WRITE, 0); \
                    716:        virt += ((n) * PAGE_SIZE); \
                    717: })
                    718:
                    719:        virt = virtual_avail;
                    720:
                    721:        SYSMAP(caddr_t, vmpte, vmmap, 1);
                    722:        invalidate_pte(vmpte);
                    723:
                    724:        SYSMAP(struct msgbuf *, msgbufmap, msgbufp, btoc(MSGBUFSIZE));
                    725:
                    726:        virtual_avail = virt;
                    727:
                    728:        /*
                    729:         * Switch to using new page tables
                    730:         */
                    731:
                    732:        kernel_pmap->pm_apr = (atop((paddr_t)kmap) << PG_SHIFT) |
                    733:            CACHE_GLOBAL | CACHE_WT | APR_V;
                    734:
                    735:        /* Invalidate entire kernel TLB and get ready for address translation */
                    736: #ifdef MULTIPROCESSOR
                    737:        pmap_bootstrap_cpu(cpu_number());
                    738: #else
                    739:        cpu = cpu_number();
                    740:        cmmu_flush_tlb(cpu, TRUE, 0, -1);
                    741:        /* Load supervisor pointer to segment table. */
                    742:        cmmu_set_sapr(cpu, kernel_pmap->pm_apr);
                    743: #ifdef DEBUG
                    744:        printf("cpu%d: running virtual\n", cpu);
                    745: #endif
                    746:        SETBIT_CPUSET(cpu, &kernel_pmap->pm_cpus);
                    747: #endif /* MULTIPROCESSOR */
                    748: }
                    749:
                    750: #ifdef MULTIPROCESSOR
                    751: void
                    752: pmap_bootstrap_cpu(cpuid_t cpu)
                    753: {
                    754:        if (cpu != master_cpu) {
                    755:                cmmu_initialize_cpu(cpu);
                    756:        } else {
                    757:                cmmu_flush_tlb(cpu, TRUE, 0, -1);
                    758:        }
                    759:        /* Load supervisor pointer to segment table. */
                    760:        cmmu_set_sapr(cpu, kernel_pmap->pm_apr);
                    761: #ifdef DEBUG
                    762:        printf("cpu%d: running virtual\n", cpu);
                    763: #endif
                    764:        SETBIT_CPUSET(cpu, &kernel_pmap->pm_cpus);
                    765: }
                    766: #endif
                    767:
                    768: /*
                    769:  * Routine:    PMAP_INIT
                    770:  *
                    771:  * Function:
                    772:  *     Initialize the pmap module. It is called by vm_init, to initialize
                    773:  *     any structures that the pmap system needs to map virtual memory.
                    774:  *
                    775:  * Calls:
                    776:  *     pool_init
                    777:  *
                    778:  *   This routine does not really have much to do. It initializes
                    779:  * pools for pmap structures and pv_entry structures.
                    780:  */
                    781: void
                    782: pmap_init(void)
                    783: {
                    784: #ifdef DEBUG
                    785:        if (pmap_con_dbg & CD_INIT)
                    786:                printf("pmap_init()\n");
                    787: #endif
                    788:
                    789:        pool_init(&pmappool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                    790:            &pool_allocator_nointr);
                    791:        pool_init(&pvpool, sizeof(pv_entry_t), 0, 0, 0, "pvpl", NULL);
                    792: } /* pmap_init() */
                    793:
                    794: /*
                    795:  * Routine:    PMAP_ZERO_PAGE
                    796:  *
                    797:  * Function:
                    798:  *     Zeroes the specified page.
                    799:  *
                    800:  * Parameters:
                    801:  *     pg              page to zero
                    802:  *
                    803:  * Extern/Global:
                    804:  *     phys_map_vaddr
                    805:  *
                    806:  * Special Assumptions:
                    807:  *     no locking required
                    808:  *
                    809:  *     This routine maps the physical pages at the 'phys_map' virtual
                    810:  * address set up in pmap_bootstrap. It flushes the TLB to make the new
                    811:  * mappings effective, and zeros all the bits.
                    812:  */
                    813: void
                    814: pmap_zero_page(struct vm_page *pg)
                    815: {
                    816:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                    817:        vaddr_t va;
                    818:        int spl;
                    819:        int cpu = cpu_number();
                    820:        pt_entry_t *pte;
                    821:
                    822:        va = (vaddr_t)(phys_map_vaddr + 2 * (cpu << PAGE_SHIFT));
                    823:        pte = pmap_pte(kernel_pmap, va);
                    824:
                    825:        spl = splvm();
                    826:
                    827:        *pte = m88k_protection(VM_PROT_READ | VM_PROT_WRITE) |
                    828:            PG_M /* 88110 */ | PG_V | pa;
                    829:
                    830:        /*
                    831:         * We don't need the flush_atc_entry() dance, as these pages are
                    832:         * bound to only one cpu.
                    833:         */
                    834:        cmmu_flush_tlb(cpu, TRUE, va, 1);
                    835:
                    836:        /*
                    837:         * The page is likely to be a non-kernel mapping, and as
                    838:         * such write back. Also, we might have split U/S caches!
                    839:         * So be sure to have the pa flushed after the filling.
                    840:         */
                    841:        bzero((void *)va, PAGE_SIZE);
                    842:        cmmu_flush_data_page(cpu, pa);
                    843:
                    844:        splx(spl);
                    845: }
                    846:
                    847: /*
                    848:  * Routine:    PMAP_CREATE
                    849:  *
                    850:  * Function:
                    851:  *     Create and return a physical map. If the size specified for the
                    852:  *     map is zero, the map is an actual physical map, and may be referenced
                    853:  *     by the hardware. If the size specified is non-zero, the map will be
                    854:  *     used in software only, and is bounded by that size.
                    855:  *
                    856:  *  This routines allocates a pmap structure.
                    857:  */
                    858: pmap_t
                    859: pmap_create(void)
                    860: {
                    861:        pmap_t pmap;
                    862:        sdt_entry_t *segdt;
                    863:        paddr_t stpa;
                    864:        u_int s;
                    865:
                    866:        pmap = pool_get(&pmappool, PR_WAITOK);
                    867:        bzero(pmap, sizeof(*pmap));
                    868:
                    869:        /*
                    870:         * Allocate memory for *actual* segment table and *shadow* table.
                    871:         */
                    872:        s = round_page(2 * SDT_SIZE);
                    873: #ifdef DEBUG
                    874:        if (pmap_con_dbg & CD_CREAT) {
                    875:                printf("(pmap_create: %x) need %d pages for sdt\n",
                    876:                    curproc, atop(s));
                    877:        }
                    878: #endif
                    879:
                    880:        segdt = (sdt_entry_t *)uvm_km_zalloc(kernel_map, s);
                    881:        if (segdt == NULL)
                    882:                panic("pmap_create: uvm_km_zalloc failure");
                    883:
                    884:        /*
                    885:         * Initialize pointer to segment table both virtual and physical.
                    886:         */
                    887:        pmap->pm_stab = segdt;
                    888:        if (pmap_extract(kernel_pmap, (vaddr_t)segdt,
                    889:            (paddr_t *)&stpa) == FALSE)
                    890:                panic("pmap_create: pmap_extract failed!");
                    891:        pmap->pm_apr = (atop(stpa) << PG_SHIFT) | CACHE_GLOBAL | APR_V;
                    892:
                    893: #ifdef DEBUG
                    894:        if (stpa & PAGE_MASK)
                    895:                panic("pmap_create: sdt_table 0x%x not aligned on page boundary",
                    896:                    (int)stpa);
                    897:
                    898:        if (pmap_con_dbg & CD_CREAT) {
                    899:                printf("(pmap_create: %x) pmap=%p, pm_stab=0x%x (pa 0x%x)\n",
                    900:                    curproc, pmap, pmap->pm_stab, stpa);
                    901:        }
                    902: #endif
                    903:
                    904:        /* memory for page tables should not be writeback or local */
                    905:        pmap_cache_ctrl(kernel_pmap,
                    906:            (vaddr_t)segdt, (vaddr_t)segdt + s, CACHE_WT);
                    907:
                    908:        /*
                    909:         * Initialize SDT_ENTRIES.
                    910:         */
                    911:        /*
                    912:         * There is no need to clear segment table, since uvm_km_zalloc
                    913:         * provides us clean pages.
                    914:         */
                    915:
                    916:        /*
                    917:         * Initialize pmap structure.
                    918:         */
                    919:        pmap->pm_count = 1;
                    920: #ifdef MULTIPROCESSOR
                    921:        __cpu_simple_lock_init(&pmap->pm_lock);
                    922: #endif
                    923:        pmap->pm_cpus = 0;
                    924:
                    925:        return pmap;
                    926: }
                    927:
                    928: /*
                    929:  * Routine:    PMAP_RELEASE
                    930:  *
                    931:  *     Internal procedure used by pmap_destroy() to actualy deallocate
                    932:  *     the tables.
                    933:  *
                    934:  * Parameters:
                    935:  *     pmap            pointer to pmap structure
                    936:  *
                    937:  * Calls:
                    938:  *     pmap_pte
                    939:  *     uvm_km_free
                    940:  *
                    941:  * Special Assumptions:
                    942:  *     No locking is needed, since this is only called which the
                    943:  *     pm_count field of the pmap structure goes to zero.
                    944:  *
                    945:  * This routine sequences of through the user address space, releasing
                    946:  * all translation table space back to the system using uvm_km_free.
                    947:  * The loops are indexed by the virtual address space
                    948:  * ranges represented by the table group sizes (1 << SDT_SHIFT).
                    949:  */
                    950: void
                    951: pmap_release(pmap_t pmap)
                    952: {
                    953:        u_int sdt;              /* outer loop index */
                    954:        sdt_entry_t *sdttbl;    /* ptr to first entry in the segment table */
                    955:        pt_entry_t *gdttbl;     /* ptr to first entry in a page table */
                    956:
                    957: #ifdef DEBUG
                    958:        if (pmap_con_dbg & CD_FREE)
                    959:                printf("(pmap_release: %x) pmap %x\n", curproc, pmap);
                    960: #endif
                    961:
                    962:        /* segment table loop */
                    963:        for (sdt = VM_MIN_ADDRESS >> SDT_SHIFT;
                    964:            sdt <= VM_MAX_ADDRESS >> SDT_SHIFT; sdt++) {
                    965:                if ((gdttbl = pmap_pte(pmap, sdt << SDT_SHIFT)) != NULL) {
                    966: #ifdef DEBUG
                    967:                        if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
                    968:                                printf("(pmap_release: %x) free page table = 0x%x\n",
                    969:                                    curproc, gdttbl);
                    970: #endif
                    971:                        uvm_km_free(kernel_map, (vaddr_t)gdttbl, PAGE_SIZE);
                    972:                }
                    973:        }
                    974:
                    975:        /*
                    976:         * Freeing both *actual* and *shadow* segment tables
                    977:         */
                    978:        sdttbl = pmap->pm_stab;         /* addr of segment table */
                    979: #ifdef DEBUG
                    980:        if ((pmap_con_dbg & (CD_FREE | CD_FULL)) == (CD_FREE | CD_FULL))
                    981:                printf("(pmap_release: %x) free segment table = 0x%x\n",
                    982:                    curproc, sdttbl);
                    983: #endif
                    984:        uvm_km_free(kernel_map, (vaddr_t)sdttbl, round_page(2 * SDT_SIZE));
                    985:
                    986: #ifdef DEBUG
                    987:        if (pmap_con_dbg & CD_FREE)
                    988:                printf("(pmap_release: %x) pm_count = 0\n", curproc);
                    989: #endif
                    990: }
                    991:
                    992: /*
                    993:  * Routine:    PMAP_DESTROY
                    994:  *
                    995:  * Function:
                    996:  *     Retire the given physical map from service. Should only be called
                    997:  *     if the map contains no valid mappings.
                    998:  *
                    999:  * Parameters:
                   1000:  *     pmap            pointer to pmap structure
                   1001:  *
                   1002:  * Calls:
                   1003:  *     pmap_release
                   1004:  *     pool_put
                   1005:  *
                   1006:  * Special Assumptions:
                   1007:  *     Map contains no valid mappings.
                   1008:  *
                   1009:  *  This routine decrements the reference count in the pmap
                   1010:  * structure. If it goes to zero, pmap_release is called to release
                   1011:  * the memory space to the system. Then, call pool_put to free the
                   1012:  * pmap structure.
                   1013:  */
                   1014: void
                   1015: pmap_destroy(pmap_t pmap)
                   1016: {
                   1017:        int count;
                   1018:
                   1019: #ifdef DEBUG
                   1020:        if (pmap == kernel_pmap)
                   1021:                panic("pmap_destroy: Attempt to destroy kernel pmap");
                   1022: #endif
                   1023:
                   1024:        PMAP_LOCK(pmap);
                   1025:        count = --pmap->pm_count;
                   1026:        PMAP_UNLOCK(pmap);
                   1027:        if (count == 0) {
                   1028:                pmap_release(pmap);
                   1029:                pool_put(&pmappool, pmap);
                   1030:        }
                   1031: }
                   1032:
                   1033:
                   1034: /*
                   1035:  * Routine:    PMAP_REFERENCE
                   1036:  *
                   1037:  * Function:
                   1038:  *     Add a reference to the specified pmap.
                   1039:  *
                   1040:  * Parameters:
                   1041:  *     pmap            pointer to pmap structure
                   1042:  *
                   1043:  * Under a pmap read lock, the pm_count field of the pmap structure
                   1044:  * is incremented. The function then returns.
                   1045:  */
                   1046: void
                   1047: pmap_reference(pmap_t pmap)
                   1048: {
                   1049:        PMAP_LOCK(pmap);
                   1050:        pmap->pm_count++;
                   1051:        PMAP_UNLOCK(pmap);
                   1052: }
                   1053:
                   1054: /*
                   1055:  * Routine:    PMAP_REMOVE_PTE (internal)
                   1056:  *
                   1057:  * Function:
                   1058:  *     Invalidate a given page table entry associated with the
                   1059:  *     given virtual address.
                   1060:  *
                   1061:  * Parameters:
                   1062:  *     pmap            pointer to pmap structure
                   1063:  *     va              virtual address of page to remove
                   1064:  *     pte             existing pte
                   1065:  *
                   1066:  * External/Global:
                   1067:  *     pv lists
                   1068:  *
                   1069:  * Calls:
                   1070:  *     pool_put
                   1071:  *     invalidate_pte
                   1072:  *     flush_atc_entry
                   1073:  *
                   1074:  * Special Assumptions:
                   1075:  *     The pmap must be locked.
                   1076:  *
                   1077:  *  If the PTE is valid, the routine must invalidate the entry. The
                   1078:  * 'modified' bit, if on, is referenced to the VM, and into the appropriate
                   1079:  * entry in the PV list entry. Next, the function must find the PV
                   1080:  * list entry associated with this pmap/va (if it doesn't exist - the function
                   1081:  * panics). The PV list entry is unlinked from the list, and returned to
                   1082:  * its zone.
                   1083:  */
                   1084: void
                   1085: pmap_remove_pte(pmap_t pmap, vaddr_t va, pt_entry_t *pte)
                   1086: {
                   1087:        pt_entry_t opte;
                   1088:        pv_entry_t prev, cur, pvl;
                   1089:        struct vm_page *pg;
                   1090:        paddr_t pa;
                   1091:
                   1092: #ifdef DEBUG
                   1093:        if (pmap_con_dbg & CD_RM) {
                   1094:                if (pmap == kernel_pmap)
                   1095:                        printf("(pmap_remove_pte: %x) pmap kernel va %x\n", curproc, va);
                   1096:                else
                   1097:                        printf("(pmap_remove_pte: %x) pmap %x va %x\n", curproc, pmap, va);
                   1098:        }
                   1099: #endif
                   1100:
                   1101:        if (pte == NULL || !PDT_VALID(pte)) {
                   1102:                return;         /* no page mapping, nothing to do! */
                   1103:        }
                   1104:
                   1105:        /*
                   1106:         * Update statistics.
                   1107:         */
                   1108:        pmap->pm_stats.resident_count--;
                   1109:        if (pmap_pte_w(pte))
                   1110:                pmap->pm_stats.wired_count--;
                   1111:
                   1112:        pa = ptoa(PG_PFNUM(*pte));
                   1113:
                   1114:        /*
                   1115:         * Invalidate the pte.
                   1116:         */
                   1117:
                   1118:        opte = invalidate_pte(pte) & PG_M_U;
                   1119:        flush_atc_entry(pmap, va);
                   1120:
                   1121:        pg = PHYS_TO_VM_PAGE(pa);
                   1122:
                   1123:        /* If this isn't a managed page, just return. */
                   1124:        if (pg == NULL)
                   1125:                return;
                   1126:
                   1127:        /*
                   1128:         * Remove the mapping from the pvlist for
                   1129:         * this physical page.
                   1130:         */
                   1131:        pvl = pg_to_pvh(pg);
                   1132:
                   1133: #ifdef DIAGNOSTIC
                   1134:        if (pvl->pv_pmap == NULL)
                   1135:                panic("pmap_remove_pte: null pv_list");
                   1136: #endif
                   1137:
                   1138:        prev = NULL;
                   1139:        for (cur = pvl; cur != NULL; cur = cur->pv_next) {
                   1140:                if (cur->pv_va == va && cur->pv_pmap == pmap)
                   1141:                        break;
                   1142:                prev = cur;
                   1143:        }
                   1144:        if (cur == NULL) {
                   1145:                panic("pmap_remove_pte: mapping for va "
                   1146:                    "0x%lx (pa 0x%lx) not in pv list at %p",
                   1147:                    va, pa, pvl);
                   1148:        }
                   1149:
                   1150:        if (prev == NULL) {
                   1151:                /*
                   1152:                 * Hander is the pv_entry. Copy the next one
                   1153:                 * to hander and free the next one (we can't
                   1154:                 * free the hander)
                   1155:                 */
                   1156:                cur = cur->pv_next;
                   1157:                if (cur != NULL) {
                   1158:                        cur->pv_flags = pvl->pv_flags;
                   1159:                        *pvl = *cur;
                   1160:                        pool_put(&pvpool, cur);
                   1161:                } else {
                   1162:                        pvl->pv_pmap = NULL;
                   1163:                }
                   1164:        } else {
                   1165:                prev->pv_next = cur->pv_next;
                   1166:                pool_put(&pvpool, cur);
                   1167:        }
                   1168:
                   1169:        /* Update saved attributes for managed page */
                   1170:        pvl->pv_flags |= opte;
                   1171: }
                   1172:
                   1173: /*
                   1174:  * Routine:    PMAP_REMOVE_RANGE (internal)
                   1175:  *
                   1176:  * Function:
                   1177:  *     Invalidate page table entries associated with the
                   1178:  *     given virtual address range. The entries given are the first
                   1179:  *     (inclusive) and last (exclusive) entries for the VM pages.
                   1180:  *
                   1181:  * Parameters:
                   1182:  *     pmap            pointer to pmap structure
                   1183:  *     s               virtual address of start of range to remove
                   1184:  *     e               virtual address of end of range to remove
                   1185:  *
                   1186:  * External/Global:
                   1187:  *     pv lists
                   1188:  *
                   1189:  * Calls:
                   1190:  *     pmap_pte
                   1191:  *     pmap_remove_pte
                   1192:  *
                   1193:  * Special Assumptions:
                   1194:  *     The pmap must be locked.
                   1195:  *
                   1196:  *   This routine sequences through the pages defined by the given
                   1197:  * range. For each page, the associated page table entry (PTE) is
                   1198:  * invalidated via pmap_remove_pte().
                   1199:  *
                   1200:  * Empty segments are skipped for performance.
                   1201:  */
                   1202: void
                   1203: pmap_remove_range(pmap_t pmap, vaddr_t s, vaddr_t e)
                   1204: {
                   1205:        vaddr_t va, eseg;
                   1206:
                   1207: #ifdef DEBUG
                   1208:        if (pmap_con_dbg & CD_RM) {
                   1209:                if (pmap == kernel_pmap)
                   1210:                        printf("(pmap_remove_range: %x) pmap kernel s %x e %x\n", curproc, s, e);
                   1211:                else
                   1212:                        printf("(pmap_remove_range: %x) pmap %x s %x e %x\n", curproc, pmap, s, e);
                   1213:        }
                   1214: #endif
                   1215:
                   1216:        /*
                   1217:         * Loop through the range in PAGE_SIZE increments.
                   1218:         */
                   1219:        va = s;
                   1220:        while (va != e) {
                   1221:                sdt_entry_t *sdt;
                   1222:
                   1223:                eseg = (va & SDT_MASK) + (1 << SDT_SHIFT);
                   1224:                if (eseg > e || eseg == 0)
                   1225:                        eseg = e;
                   1226:
                   1227:                sdt = SDTENT(pmap, va);
                   1228:
                   1229:                /* If no segment table, skip a whole segment */
                   1230:                if (!SDT_VALID(sdt))
                   1231:                        va = eseg;
                   1232:                else {
                   1233:                        while (va != eseg) {
                   1234:                                pmap_remove_pte(pmap, va, sdt_pte(sdt, va));
                   1235:                                va += PAGE_SIZE;
                   1236:                        }
                   1237:                }
                   1238:        }
                   1239: }
                   1240:
                   1241: /*
                   1242:  * Routine:    PMAP_REMOVE
                   1243:  *
                   1244:  * Function:
                   1245:  *     Remove the given range of addresses from the specified map.
                   1246:  *     It is assumed that start and end are properly rounded to the VM page
                   1247:  *     size.
                   1248:  *
                   1249:  * Parameters:
                   1250:  *     pmap            pointer to pmap structure
                   1251:  *     s
                   1252:  *     e
                   1253:  *
                   1254:  * Special Assumptions:
                   1255:  *     Assumes not all entries must be valid in specified range.
                   1256:  *
                   1257:  * Calls:
                   1258:  *     pmap_remove_range
                   1259:  *
                   1260:  *  After taking pmap read lock, pmap_remove_range is called to do the
                   1261:  * real work.
                   1262:  */
                   1263: void
                   1264: pmap_remove(pmap_t pmap, vaddr_t s, vaddr_t e)
                   1265: {
                   1266:        int spl;
                   1267:
                   1268:        if (pmap == NULL)
                   1269:                return;
                   1270:
                   1271: #ifdef DEBUG
                   1272:        if (s >= e)
                   1273:                panic("pmap_remove: start greater than end address");
                   1274: #endif
                   1275:
                   1276:        spl = splvm();
                   1277:        PMAP_LOCK(pmap);
                   1278:        pmap_remove_range(pmap, s, e);
                   1279:        PMAP_UNLOCK(pmap);
                   1280:        splx(spl);
                   1281: }
                   1282:
                   1283: /*
                   1284:  * Routine:    PMAP_REMOVE_ALL
                   1285:  *
                   1286:  * Function:
                   1287:  *     Removes this physical page from all physical maps in which it
                   1288:  *     resides. Reflects back modify bits to the pager.
                   1289:  *
                   1290:  * Parameters:
                   1291:  *     pg              physical pages which is to
                   1292:  *                     be removed from all maps
                   1293:  *
                   1294:  * Extern/Global:
                   1295:  *     pv lists
                   1296:  *
                   1297:  * Calls:
                   1298:  *     pmap_pte
                   1299:  *     pool_put
                   1300:  *
                   1301:  *  If the page specified by the given address is not a managed page,
                   1302:  * this routine simply returns. Otherwise, the PV list associated with
                   1303:  * that page is traversed. For each pmap/va pair pmap_pte is called to
                   1304:  * obtain a pointer to the page table entry (PTE) associated with the
                   1305:  * va (the PTE must exist and be valid, otherwise the routine panics).
                   1306:  * The hardware 'modified' bit in the PTE is examined. If it is on, the
                   1307:  * corresponding bit in the PV list entry corresponding
                   1308:  * to the physical page is set to 1.
                   1309:  * Then, the PTE is invalidated, and the PV list entry is unlinked and
                   1310:  * freed.
                   1311:  *
                   1312:  *  At the end of this function, the PV list for the specified page
                   1313:  * will be null.
                   1314:  */
                   1315: void
                   1316: pmap_remove_all(struct vm_page *pg)
                   1317: {
                   1318:        pt_entry_t *pte;
                   1319:        pv_entry_t pvl;
                   1320:        vaddr_t va;
                   1321:        pmap_t pmap;
                   1322:        int spl;
                   1323:
                   1324:        if (pg == NULL) {
                   1325:                /* not a managed page. */
                   1326: #ifdef DEBUG
                   1327:                if (pmap_con_dbg & CD_RMAL)
                   1328:                        printf("(pmap_remove_all: %x) vm page 0x%x not a managed page\n", curproc, pg);
                   1329: #endif
                   1330:                return;
                   1331:        }
                   1332:
                   1333: #ifdef DEBUG
                   1334:        if (pmap_con_dbg & CD_RMAL)
                   1335:                printf("(pmap_remove_all: %x) va %x\n", curproc, pg, pg_to_pvh(pg)->pv_va);
                   1336: #endif
                   1337:
                   1338:        spl = splvm();
                   1339:        /*
                   1340:         * Walk down PV list, removing all mappings.
                   1341:         * We don't have to lock the pv list, since we have the entire pmap
                   1342:         * system.
                   1343:         */
                   1344: #ifdef MULTIPROCESSOR
                   1345: remove_all_Retry:
                   1346: #endif
                   1347:
                   1348:        pvl = pg_to_pvh(pg);
                   1349:
                   1350:        /*
                   1351:         * Loop for each entry on the pv list
                   1352:         */
                   1353:        while (pvl != NULL && (pmap = pvl->pv_pmap) != NULL) {
                   1354: #ifdef MULTIPROCESSOR
                   1355:                if (!__cpu_simple_lock_try(&pmap->pm_lock))
                   1356:                        goto remove_all_Retry;
                   1357: #endif
                   1358:
                   1359:                va = pvl->pv_va;
                   1360:                pte = pmap_pte(pmap, va);
                   1361:
                   1362:                if (pte == NULL || !PDT_VALID(pte)) {
                   1363:                        pvl = pvl->pv_next;
                   1364:                        goto next;      /* no page mapping */
                   1365:                }
                   1366:                if (pmap_pte_w(pte)) {
                   1367: #ifdef DEBUG
                   1368:                        if (pmap_con_dbg & CD_RMAL)
                   1369:                                printf("pmap_remove_all: wired mapping for %lx not removed\n",
                   1370:                                    pg);
                   1371: #endif
                   1372:                        pvl = pvl->pv_next;
                   1373:                        goto next;
                   1374:                }
                   1375:
                   1376:                pmap_remove_pte(pmap, va, pte);
                   1377:
                   1378:                /*
                   1379:                 * Do not free any page tables,
                   1380:                 * leaves that for when VM calls pmap_collect().
                   1381:                 */
                   1382: next:
                   1383:                PMAP_UNLOCK(pmap);
                   1384:        }
                   1385:        splx(spl);
                   1386: }
                   1387:
                   1388: /*
                   1389:  * Routine:    PMAP_PROTECT
                   1390:  *
                   1391:  * Function:
                   1392:  *     Sets the physical protection on the specified range of this map
                   1393:  *     as requested.
                   1394:  *
                   1395:  * Parameters:
                   1396:  *     pmap            pointer to pmap structure
                   1397:  *     s               start address of start of range
                   1398:  *     e               end address of end of range
                   1399:  *     prot            desired protection attributes
                   1400:  *
                   1401:  *     Calls:
                   1402:  *             PMAP_LOCK, PMAP_UNLOCK
                   1403:  *             pmap_pte
                   1404:  *             PDT_VALID
                   1405:  *
                   1406:  *  This routine sequences through the pages of the specified range.
                   1407:  * For each, it calls pmap_pte to acquire a pointer to the page table
                   1408:  * entry (PTE). If the PTE is invalid, or non-existent, nothing is done.
                   1409:  * Otherwise, the PTE's protection attributes are adjusted as specified.
                   1410:  */
                   1411: void
                   1412: pmap_protect(pmap_t pmap, vaddr_t s, vaddr_t e, vm_prot_t prot)
                   1413: {
                   1414:        int spl;
                   1415:        pt_entry_t *pte, ap;
                   1416:        vaddr_t va, eseg;
                   1417:
                   1418:        if ((prot & VM_PROT_READ) == 0) {
                   1419:                pmap_remove(pmap, s, e);
                   1420:                return;
                   1421:        }
                   1422:
                   1423:        ap = m88k_protection(prot);
                   1424:
                   1425:        spl = splvm();
                   1426:        PMAP_LOCK(pmap);
                   1427:
                   1428:        /*
                   1429:         * Loop through the range in PAGE_SIZE increments.
                   1430:         */
                   1431:        va = s;
                   1432:        while (va != e) {
                   1433:                sdt_entry_t *sdt;
                   1434:
                   1435:                eseg = (va & SDT_MASK) + (1 << SDT_SHIFT);
                   1436:                if (eseg > e || eseg == 0)
                   1437:                        eseg = e;
                   1438:
                   1439:                sdt = SDTENT(pmap, va);
                   1440:
                   1441:                /* If no segment table, skip a whole segment */
                   1442:                if (!SDT_VALID(sdt))
                   1443:                        va = eseg;
                   1444:                else {
                   1445:                        while (va != eseg) {
                   1446:                                pte = sdt_pte(sdt, va);
                   1447:                                if (pte != NULL && PDT_VALID(pte)) {
                   1448:                                        /*
                   1449:                                         * Invalidate pte temporarily to avoid
                   1450:                                         * the modified bit and/or the
                   1451:                                         * reference bit being written back by
                   1452:                                         * any other cpu.
                   1453:                                         */
                   1454:                                        *pte = ap |
                   1455:                                            (invalidate_pte(pte) & ~PG_PROT);
                   1456:                                        flush_atc_entry(pmap, va);
                   1457:                                }
                   1458:                                va += PAGE_SIZE;
                   1459:                        }
                   1460:                }
                   1461:        }
                   1462:        PMAP_UNLOCK(pmap);
                   1463:        splx(spl);
                   1464: }
                   1465:
                   1466: /*
                   1467:  * Routine:    PMAP_EXPAND
                   1468:  *
                   1469:  * Function:
                   1470:  *     Expands a pmap to be able to map the specified virtual address.
                   1471:  *     New kernel virtual memory is allocated for a page table.
                   1472:  *
                   1473:  *     Must be called with the pmap system and the pmap unlocked, since
                   1474:  *     these must be unlocked to use vm_allocate or vm_deallocate (via
                   1475:  *     uvm_km_zalloc). Thus it must be called in a unlock/lock loop
                   1476:  *     that checks whether the map has been expanded enough. (We won't loop
                   1477:  *     forever, since page table aren't shrunk.)
                   1478:  *
                   1479:  * Parameters:
                   1480:  *     pmap    point to pmap structure
                   1481:  *     v       VA indicating which tables are needed
                   1482:  *
                   1483:  * Extern/Global:
                   1484:  *     user_pt_map
                   1485:  *     kernel_pmap
                   1486:  *
                   1487:  * Calls:
                   1488:  *     pmap_pte
                   1489:  *     uvm_km_free
                   1490:  *     uvm_km_zalloc
                   1491:  *     pmap_extract
                   1492:  *
                   1493:  * Special Assumptions
                   1494:  *     no pmap locks held
                   1495:  *     pmap != kernel_pmap
                   1496:  *
                   1497:  * 1:  This routine immediately allocates space for a page table.
                   1498:  *
                   1499:  * 2:  The page table entries (PTEs) are initialized (set invalid), and
                   1500:  *     the corresponding segment table entry is set to point to the new
                   1501:  *     page table.
                   1502:  */
                   1503: void
                   1504: pmap_expand(pmap_t pmap, vaddr_t v)
                   1505: {
                   1506:        int spl;
                   1507:        vaddr_t pdt_vaddr;
                   1508:        paddr_t pdt_paddr;
                   1509:        sdt_entry_t *sdt;
                   1510:        pt_entry_t *pte;
                   1511:
                   1512: #ifdef DEBUG
                   1513:        if (pmap_con_dbg & CD_EXP)
                   1514:                printf ("(pmap_expand: %x) map %x v %x\n", curproc, pmap, v);
                   1515: #endif
                   1516:
                   1517:        /* XXX */
                   1518:        pdt_vaddr = uvm_km_zalloc(kernel_map, PAGE_SIZE);
                   1519:        if (pmap_extract(kernel_pmap, pdt_vaddr, &pdt_paddr) == FALSE)
                   1520:                panic("pmap_expand: pmap_extract failed");
                   1521:
                   1522:        /* memory for page tables should not be writeback or local */
                   1523:        pmap_cache_ctrl(kernel_pmap,
                   1524:            pdt_vaddr, pdt_vaddr + PAGE_SIZE, CACHE_WT);
                   1525:
                   1526:        spl = splvm();
                   1527:        PMAP_LOCK(pmap);
                   1528:
                   1529:        if ((pte = pmap_pte(pmap, v)) != NULL) {
                   1530:                /*
                   1531:                 * Someone else caused us to expand
                   1532:                 * during our vm_allocate.
                   1533:                 */
                   1534:                PMAP_UNLOCK(pmap);
                   1535:                uvm_km_free(kernel_map, pdt_vaddr, PAGE_SIZE);
                   1536:
                   1537: #ifdef DEBUG
                   1538:                if (pmap_con_dbg & CD_EXP)
                   1539:                        printf("(pmap_expand: %x) table has already been allocated\n", curproc);
                   1540: #endif
                   1541:                splx(spl);
                   1542:                return;
                   1543:        }
                   1544:        /*
                   1545:         * Apply a mask to V to obtain the vaddr of the beginning of
                   1546:         * its containing page 'table group', i.e. the group of
                   1547:         * page tables that fit eithin a single VM page.
                   1548:         * Using that, obtain the segment table pointer that references the
                   1549:         * first page table in the group, and initialize all the
                   1550:         * segment table descriptions for the page 'table group'.
                   1551:         */
                   1552:        v &= ~((1 << (PDT_BITS + PG_BITS)) - 1);
                   1553:
                   1554:        sdt = SDTENT(pmap, v);
                   1555:
                   1556:        /*
                   1557:         * Init each of the segment entries to point the freshly allocated
                   1558:         * page tables.
                   1559:         */
                   1560:        *((sdt_entry_t *)sdt) = pdt_paddr | SG_RW | SG_V;
                   1561:        *((sdt_entry_t *)(sdt + SDT_ENTRIES)) = pdt_vaddr | SG_RW | SG_V;
                   1562:
                   1563:        PMAP_UNLOCK(pmap);
                   1564:        splx(spl);
                   1565: }
                   1566:
                   1567: /*
                   1568:  * Routine:    PMAP_ENTER
                   1569:  *
                   1570:  * Function:
                   1571:  *     Insert the given physical page (p) at the specified virtual
                   1572:  *     address (v) in the target phisical map with the protecton requested.
                   1573:  *     If specified, the page will be wired down, meaning that the
                   1574:  *     related pte can not be reclaimed.
                   1575:  *
                   1576:  * N.B.: This is the only routine which MAY NOT lazy-evaluation or lose
                   1577:  *     information. That is, this routine must actually insert this page
                   1578:  *     into the given map NOW.
                   1579:  *
                   1580:  * Parameters:
                   1581:  *     pmap    pointer to pmap structure
                   1582:  *     va      VA of page to be mapped
                   1583:  *     pa      PA of page to be mapped
                   1584:  *     prot    protection attributes for page
                   1585:  *     wired   wired attribute for page
                   1586:  *
                   1587:  * Extern/Global:
                   1588:  *     pv lists
                   1589:  *
                   1590:  * Calls:
                   1591:  *     pmap_pte
                   1592:  *     pmap_expand
                   1593:  *     pmap_remove_pte
                   1594:  *
                   1595:  *     This routine starts off by calling pmap_pte to obtain a (virtual)
                   1596:  * pointer to the page table entry corresponding to given virtual
                   1597:  * address. If the page table itself does not exist, pmap_expand is
                   1598:  * called to allocate it.
                   1599:  *
                   1600:  *     If the page table entry (PTE) already maps the given physical page,
                   1601:  * all that is needed is to set the protection and wired attributes as
                   1602:  * given. TLB entries are flushed and pmap_enter returns.
                   1603:  *
                   1604:  *     If the page table entry (PTE) maps a different physical page than
                   1605:  * that given, the old mapping is removed by a call to map_remove_range.
                   1606:  * And execution of pmap_enter continues.
                   1607:  *
                   1608:  *     To map the new physical page, the routine first inserts a new
                   1609:  * entry in the PV list exhibiting the given pmap and virtual address.
                   1610:  * It then inserts the physical page address, protection attributes, and
                   1611:  * wired attributes into the page table entry (PTE).
                   1612:  *
                   1613:  *
                   1614:  *     get machine-dependent prot code
                   1615:  *     get the pte for this page
                   1616:  *     if necessary pmap_expand(pmap, v)
                   1617:  *     if (changing wired attribute or protection) {
                   1618:  *             flush entry from TLB
                   1619:  *             update template
                   1620:  *             for (ptes per vm page)
                   1621:  *                     stuff pte
                   1622:  *     } else if (mapped at wrong addr)
                   1623:  *             flush entry from TLB
                   1624:  *             pmap_remove_pte
                   1625:  *     } else {
                   1626:  *             enter mapping in pv_list
                   1627:  *             setup template and stuff ptes
                   1628:  *     }
                   1629:  *
                   1630:  */
                   1631: int
                   1632: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
                   1633: {
                   1634:        int spl;
                   1635:        pt_entry_t *pte, template;
                   1636:        paddr_t old_pa;
                   1637:        pv_entry_t pv_e, pvl;
                   1638:        boolean_t wired = (flags & PMAP_WIRED) != 0;
                   1639:        struct vm_page *pg;
                   1640:
                   1641: #ifdef DEBUG
                   1642:        if (pmap_con_dbg & CD_ENT) {
                   1643:                if (pmap == kernel_pmap)
                   1644:                        printf("(pmap_enter: %x) pmap kernel va %x pa %x\n", curproc, va, pa);
                   1645:                else
                   1646:                        printf("(pmap_enter: %x) pmap %x va %x pa %x\n", curproc, pmap, va, pa);
                   1647:        }
                   1648: #endif
                   1649:
                   1650:        template = m88k_protection(prot);
                   1651:
                   1652:        spl = splvm();
                   1653:        PMAP_LOCK(pmap);
                   1654:
                   1655:        /*
                   1656:         * Expand pmap to include this pte.
                   1657:         */
                   1658:        while ((pte = pmap_pte(pmap, va)) == NULL) {
                   1659:                if (pmap == kernel_pmap) {
                   1660:                        /* will only return NULL if PMAP_CANFAIL is set */
                   1661:                        if (pmap_expand_kmap(va, VM_PROT_READ | VM_PROT_WRITE,
                   1662:                            flags & PMAP_CANFAIL) == NULL)
                   1663:                                return (ENOMEM);
                   1664:                } else {
                   1665:                        /*
                   1666:                         * Must unlock to expand the pmap.
                   1667:                         */
                   1668:                        PMAP_UNLOCK(pmap);
                   1669:                        pmap_expand(pmap, va);
                   1670:                        PMAP_LOCK(pmap);
                   1671:                }
                   1672:        }
                   1673:        /*
                   1674:         * Special case if the physical page is already mapped at this address.
                   1675:         */
                   1676:        old_pa = ptoa(PG_PFNUM(*pte));
                   1677: #ifdef DEBUG
                   1678:        if (pmap_con_dbg & CD_ENT)
                   1679:                printf("(pmap_enter) old_pa %x pte %x\n", old_pa, *pte);
                   1680: #endif
                   1681:
                   1682:        pg = PHYS_TO_VM_PAGE(pa);
                   1683:        if (pg != NULL)
                   1684:                pvl = pg_to_pvh(pg);
                   1685:        else
                   1686:                pvl = NULL;
                   1687:
                   1688:        if (old_pa == pa) {
                   1689:                /* May be changing its wired attributes or protection */
                   1690:                if (wired && !(pmap_pte_w(pte)))
                   1691:                        pmap->pm_stats.wired_count++;
                   1692:                else if (!wired && pmap_pte_w(pte))
                   1693:                        pmap->pm_stats.wired_count--;
                   1694:        } else {
                   1695:                /* Remove old mapping from the PV list if necessary. */
                   1696:                pmap_remove_pte(pmap, va, pte);
                   1697:
                   1698:                if (pvl != NULL) {
                   1699:                        /*
                   1700:                         * Enter the mapping in the PV list for this
                   1701:                         * managed page.
                   1702:                         */
                   1703:                        if (pvl->pv_pmap == NULL) {
                   1704:                                /*
                   1705:                                 *      No mappings yet
                   1706:                                 */
                   1707:                                pvl->pv_va = va;
                   1708:                                pvl->pv_pmap = pmap;
                   1709:                                pvl->pv_next = NULL;
                   1710:                                pvl->pv_flags = 0;
                   1711:
                   1712:                        } else {
                   1713:                                /*
                   1714:                                 * Add new pv_entry after header.
                   1715:                                 */
                   1716:                                pv_e = pool_get(&pvpool, PR_NOWAIT);
                   1717:                                if (pv_e == NULL) {
                   1718:                                        if (flags & PMAP_CANFAIL) {
                   1719:                                                PMAP_UNLOCK(pmap);
                   1720:                                                splx(spl);
                   1721:                                                return (ENOMEM);
                   1722:                                        } else
                   1723:                                                panic("pmap_enter: "
                   1724:                                                    "pvpool exhausted");
                   1725:                                }
                   1726:                                pv_e->pv_va = va;
                   1727:                                pv_e->pv_pmap = pmap;
                   1728:                                pv_e->pv_next = pvl->pv_next;
                   1729:                                pv_e->pv_flags = 0;
                   1730:                                pvl->pv_next = pv_e;
                   1731:                        }
                   1732:                }
                   1733:
                   1734:                /*
                   1735:                 * And count the mapping.
                   1736:                 */
                   1737:                pmap->pm_stats.resident_count++;
                   1738:                if (wired)
                   1739:                        pmap->pm_stats.wired_count++;
                   1740:        } /* if (pa == old_pa) ... else */
                   1741:
                   1742:        template |= PG_V;
                   1743:        if (wired)
                   1744:                template |= PG_W;
                   1745:
                   1746:        /*
                   1747:         * If outside physical memory, disable cache on this (I/O) page.
                   1748:         */
                   1749:        if ((unsigned long)pa >= last_addr)
                   1750:                template |= CACHE_INH;
                   1751:
                   1752:        if (flags & VM_PROT_WRITE)
                   1753:                template |= PG_M_U;
                   1754:        else if (flags & VM_PROT_ALL)
                   1755:                template |= PG_U;
                   1756:
                   1757:        /*
                   1758:         * Invalidate pte temporarily to avoid being written
                   1759:         * back the modified bit and/or the reference bit by
                   1760:         * any other cpu.
                   1761:         */
                   1762:        template |= invalidate_pte(pte) & PG_M_U;
                   1763:        *pte = template | pa;
                   1764:        flush_atc_entry(pmap, va);
                   1765: #ifdef DEBUG
                   1766:        if (pmap_con_dbg & CD_ENT)
                   1767:                printf("(pmap_enter) set pte to %x\n", *pte);
                   1768: #endif
                   1769:
                   1770:        /*
                   1771:         * Cache attribute flags
                   1772:         */
                   1773:        if (pvl != NULL)
                   1774:                pvl->pv_flags |= template & PG_M_U;
                   1775:
                   1776:        PMAP_UNLOCK(pmap);
                   1777:        splx(spl);
                   1778:
                   1779:        return 0;
                   1780: }
                   1781:
                   1782: /*
                   1783:  * Routine:    pmap_unwire
                   1784:  *
                   1785:  * Function:   Change the wiring attributes for a map/virtual-address pair.
                   1786:  *
                   1787:  * Parameters:
                   1788:  *     pmap    pointer to pmap structure
                   1789:  *     v       virtual address of page to be unwired
                   1790:  *
                   1791:  * Calls:
                   1792:  *     pmap_pte
                   1793:  *
                   1794:  * Special Assumptions:
                   1795:  *     The mapping must already exist in the pmap.
                   1796:  */
                   1797: void
                   1798: pmap_unwire(pmap_t pmap, vaddr_t v)
                   1799: {
                   1800:        pt_entry_t *pte;
                   1801:        int spl;
                   1802:
                   1803:        spl = splvm();
                   1804:        PMAP_LOCK(pmap);
                   1805:
                   1806:        if ((pte = pmap_pte(pmap, v)) == NULL)
                   1807:                panic("pmap_unwire: pte missing");
                   1808:
                   1809:        if (pmap_pte_w(pte)) {
                   1810:                /* unwired mapping */
                   1811:                pmap->pm_stats.wired_count--;
                   1812:                *pte &= ~PG_W;
                   1813:        }
                   1814:
                   1815:        PMAP_UNLOCK(pmap);
                   1816:        splx(spl);
                   1817: }
                   1818:
                   1819: /*
                   1820:  * Routine:    PMAP_EXTRACT
                   1821:  *
                   1822:  * Function:
                   1823:  *     Extract the physical page address associoated
                   1824:  *     with the given map/virtual_address pair.
                   1825:  *
                   1826:  * Parameters:
                   1827:  *     pmap            pointer to pmap structure
                   1828:  *     va              virtual address
                   1829:  *     pap             storage for result.
                   1830:  *
                   1831:  * Calls:
                   1832:  *     PMAP_LOCK, PMAP_UNLOCK
                   1833:  *     pmap_pte
                   1834:  *
                   1835:  * The routine calls pmap_pte to get a (virtual) pointer to
                   1836:  * the page table entry (PTE) associated with the given virtual
                   1837:  * address. If the page table does not exist, or if the PTE is not valid,
                   1838:  * then 0 address is returned. Otherwise, the physical page address from
                   1839:  * the PTE is returned.
                   1840:  */
                   1841: boolean_t
                   1842: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
                   1843: {
                   1844:        pt_entry_t *pte;
                   1845:        paddr_t pa;
                   1846:        int spl;
                   1847:        boolean_t rv = FALSE;
                   1848:
                   1849: #ifdef DIAGNOSTIC
                   1850:        if (pmap == NULL)
                   1851:                panic("pmap_extract: pmap is NULL");
                   1852: #endif
                   1853:
                   1854: #ifdef M88100
                   1855:        /*
                   1856:         * 88100-based designs have two hardwired BATC entries which map
                   1857:         * the upper 1MB 1:1 in supervisor space.
                   1858:         */
                   1859:        if (CPU_IS88100) {
                   1860:                if (va >= BATC8_VA && pmap == kernel_pmap) {
                   1861:                        *pap = va;
                   1862:                        return (TRUE);
                   1863:                }
                   1864:        }
                   1865: #endif
                   1866:
                   1867:        spl = splvm();
                   1868:        PMAP_LOCK(pmap);
                   1869:
                   1870:        pte = pmap_pte(pmap, va);
                   1871:        if (pte != NULL && PDT_VALID(pte)) {
                   1872:                rv = TRUE;
                   1873:                if (pap != NULL) {
                   1874:                        pa = ptoa(PG_PFNUM(*pte));
                   1875:                        pa |= (va & PAGE_MASK); /* offset within page */
                   1876:                        *pap = pa;
                   1877:                }
                   1878:        }
                   1879:
                   1880:        PMAP_UNLOCK(pmap);
                   1881:        splx(spl);
                   1882:        return rv;
                   1883: }
                   1884:
                   1885: /*
                   1886:  * Routine:    PMAP_COLLECT
                   1887:  *
                   1888:  * Runction:
                   1889:  *     Garbage collects the physical map system for pages which are
                   1890:  *     no longer used. there may well be pages which are not
                   1891:  *     referenced, but others may be collected as well.
                   1892:  *     Called by the pageout daemon when pages are scarce.
                   1893:  *
                   1894:  * Parameters:
                   1895:  *     pmap            pointer to pmap structure
                   1896:  *
                   1897:  * Calls:
                   1898:  *     pmap_pte
                   1899:  *     pmap_remove_range
                   1900:  *     uvm_km_free
                   1901:  *
                   1902:  *     The intent of this routine is to release memory pages being used
                   1903:  * by translation tables. They can be release only if they contain no
                   1904:  * valid mappings, and their parent table entry has been invalidated.
                   1905:  *
                   1906:  *     The routine sequences through the entries user address space,
                   1907:  * inspecting page-sized groups of page tables for wired entries. If
                   1908:  * a full page of tables has no wired enties, any otherwise valid
                   1909:  * entries are invalidated (via pmap_remove_range). Then, the segment
                   1910:  * table entries corresponding to this group of page tables are
                   1911:  * invalidated. Finally, uvm_km_free is called to return the page to the
                   1912:  * system.
                   1913:  *
                   1914:  *     If all entries in a segment table are invalidated, it too can
                   1915:  * be returned to the system.
                   1916:  */
                   1917: void
                   1918: pmap_collect(pmap_t pmap)
                   1919: {
                   1920:        u_int sdt;              /* outer loop index */
                   1921:        vaddr_t sdt_va;
                   1922:        sdt_entry_t *sdtp;      /* ptr to index into segment table */
                   1923:        pt_entry_t *gdttbl;     /* ptr to first entry in a page table */
                   1924:        pt_entry_t *gdttblend;  /* ptr to byte after last entry in
                   1925:                                   table group */
                   1926:        pt_entry_t *gdtp;       /* ptr to index into a page table */
                   1927:        boolean_t found_gdt_wired; /* flag indicating a wired page exists
                   1928:                                   in a page table's address range */
                   1929:        int spl;
                   1930:
                   1931: #ifdef DEBUG
                   1932:        if (pmap_con_dbg & CD_COL)
                   1933:                printf ("(pmap_collect: %x) pmap %x\n", curproc, pmap);
                   1934: #endif
                   1935:
                   1936:        spl = splvm();
                   1937:        PMAP_LOCK(pmap);
                   1938:
                   1939:        sdtp = pmap->pm_stab; /* addr of segment table */
                   1940:
                   1941:        /* segment table loop */
                   1942:        for (sdt = VM_MIN_ADDRESS >> SDT_SHIFT;
                   1943:            sdt <= VM_MAX_ADDRESS >> SDT_SHIFT; sdt++, sdtp++) {
                   1944:                sdt_va = sdt << SDT_SHIFT;
                   1945:                gdttbl = pmap_pte(pmap, sdt_va);
                   1946:                if (gdttbl == NULL)
                   1947:                        continue; /* no maps in this range */
                   1948:
                   1949:                gdttblend = gdttbl + PDT_ENTRIES;
                   1950:
                   1951:                /* scan page maps for wired pages */
                   1952:                found_gdt_wired = FALSE;
                   1953:                for (gdtp = gdttbl; gdtp < gdttblend; gdtp++) {
                   1954:                        if (pmap_pte_w(gdtp)) {
                   1955:                                found_gdt_wired = TRUE;
                   1956:                                break;
                   1957:                        }
                   1958:                }
                   1959:
                   1960:                if (found_gdt_wired)
                   1961:                        continue; /* can't free this range */
                   1962:
                   1963:                /* invalidate all maps in this range */
                   1964:                pmap_remove_range(pmap, sdt_va, sdt_va + (1 << SDT_SHIFT));
                   1965:
                   1966:                /*
                   1967:                 * we can safely deallocate the page map(s)
                   1968:                 */
                   1969:                *((sdt_entry_t *) sdtp) = 0;
                   1970:                *((sdt_entry_t *)(sdtp + SDT_ENTRIES)) = 0;
                   1971:
                   1972:                /*
                   1973:                 * we have to unlock before freeing the table, since
                   1974:                 * uvm_km_free will invoke another pmap routine
                   1975:                 */
                   1976:                PMAP_UNLOCK(pmap);
                   1977:                uvm_km_free(kernel_map, (vaddr_t)gdttbl, PAGE_SIZE);
                   1978:                PMAP_LOCK(pmap);
                   1979:        }
                   1980:
                   1981:        PMAP_UNLOCK(pmap);
                   1982:        splx(spl);
                   1983:
                   1984: #ifdef DEBUG
                   1985:        if (pmap_con_dbg & CD_COL)
                   1986:                printf("(pmap_collect: %x) done\n", curproc);
                   1987: #endif
                   1988: }
                   1989:
                   1990: /*
                   1991:  * Routine:    PMAP_ACTIVATE
                   1992:  *
                   1993:  * Function:
                   1994:  *     Binds the pmap associated to the process to the current processor.
                   1995:  *
                   1996:  * Parameters:
                   1997:  *     p       pointer to proc structure
                   1998:  *
                   1999:  * Notes:
                   2000:  *     If the specified pmap is not kernel_pmap, this routine stores its
                   2001:  *     apr template into UAPR (user area pointer register) in the
                   2002:  *     CMMUs connected to the specified CPU.
                   2003:  *
                   2004:  *     Then it flushes the TLBs mapping user virtual space, in the CMMUs
                   2005:  *     connected to the specified CPU.
                   2006:  */
                   2007: void
                   2008: pmap_activate(struct proc *p)
                   2009: {
                   2010:        pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map);
                   2011:        int cpu = cpu_number();
                   2012:
                   2013: #ifdef DEBUG
                   2014:        if (pmap_con_dbg & CD_ACTIVATE)
                   2015:                printf("(pmap_activate: %x) pmap %p\n", p, pmap);
                   2016: #endif
                   2017:
                   2018:        if (pmap != kernel_pmap) {
                   2019:                /*
                   2020:                 * Lock the pmap to put this cpu in its active set.
                   2021:                 */
                   2022:                PMAP_LOCK(pmap);
                   2023:
                   2024:                cmmu_set_uapr(pmap->pm_apr);
                   2025:                cmmu_flush_tlb(cpu, FALSE, 0, -1);
                   2026:
                   2027:                /*
                   2028:                 * Mark that this cpu is using the pmap.
                   2029:                 */
                   2030:                SETBIT_CPUSET(cpu, &(pmap->pm_cpus));
                   2031:
                   2032:                PMAP_UNLOCK(pmap);
                   2033:        }
                   2034: }
                   2035:
                   2036: /*
                   2037:  * Routine:    PMAP_DEACTIVATE
                   2038:  *
                   2039:  * Function:
                   2040:  *     Unbinds the pmap associated to the process from the current processor.
                   2041:  *
                   2042:  * Parameters:
                   2043:  *     p               pointer to proc structure
                   2044:  */
                   2045: void
                   2046: pmap_deactivate(struct proc *p)
                   2047: {
                   2048:        pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map);
                   2049:        int cpu = cpu_number();
                   2050:
                   2051:        if (pmap != kernel_pmap) {
                   2052:                /*
                   2053:                 * We expect the spl to already have been raised to sched level.
                   2054:                 */
                   2055:                PMAP_LOCK(pmap);
                   2056:                CLRBIT_CPUSET(cpu, &(pmap->pm_cpus));
                   2057:                PMAP_UNLOCK(pmap);
                   2058:        }
                   2059: }
                   2060:
                   2061: /*
                   2062:  * Routine:    PMAP_COPY_PAGE
                   2063:  *
                   2064:  * Function:
                   2065:  *     Copies the specified pages.
                   2066:  *
                   2067:  * Parameters:
                   2068:  *     src     PA of source page
                   2069:  *     dst     PA of destination page
                   2070:  *
                   2071:  * Extern/Global:
                   2072:  *     phys_map_vaddr
                   2073:  *
                   2074:  * Special Assumptions:
                   2075:  *     no locking required
                   2076:  *
                   2077:  * This routine maps the physical pages at the 'phys_map' virtual
                   2078:  * addresses set up in pmap_bootstrap. It flushes the TLB to make the
                   2079:  * new mappings effective, and performs the copy.
                   2080:  */
                   2081: void
                   2082: pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
                   2083: {
                   2084:        paddr_t src = VM_PAGE_TO_PHYS(srcpg);
                   2085:        paddr_t dst = VM_PAGE_TO_PHYS(dstpg);
                   2086:        vaddr_t dstva, srcva;
                   2087:        int spl;
                   2088:        pt_entry_t *dstpte, *srcpte;
                   2089:        int cpu = cpu_number();
                   2090:
                   2091:        dstva = (vaddr_t)(phys_map_vaddr + 2 * (cpu << PAGE_SHIFT));
                   2092:        srcva = dstva + PAGE_SIZE;
                   2093:        dstpte = pmap_pte(kernel_pmap, dstva);
                   2094:        srcpte = pmap_pte(kernel_pmap, srcva);
                   2095:
                   2096:        spl = splvm();
                   2097:
                   2098:        *dstpte = m88k_protection(VM_PROT_READ | VM_PROT_WRITE) |
                   2099:            PG_M /* 88110 */ | PG_V | dst;
                   2100:        *srcpte = m88k_protection(VM_PROT_READ) |
                   2101:            PG_V | src;
                   2102:
                   2103:        /*
                   2104:         * We don't need the flush_atc_entry() dance, as these pages are
                   2105:         * bound to only one cpu.
                   2106:         */
                   2107:        cmmu_flush_tlb(cpu, TRUE, dstva, 2);
                   2108:
                   2109:        /*
                   2110:         * The source page is likely to be a non-kernel mapping, and as
                   2111:         * such write back. Also, we might have split U/S caches!
                   2112:         * So be sure to have the source pa flushed before the copy is
                   2113:         * attempted, and the destination pa flushed afterwards.
                   2114:         */
                   2115:        cmmu_flush_data_page(cpu, src);
                   2116:        bcopy((const void *)srcva, (void *)dstva, PAGE_SIZE);
                   2117:        cmmu_flush_data_page(cpu, dst);
                   2118:
                   2119:        splx(spl);
                   2120: }
                   2121:
                   2122: /*
                   2123:  * Routine:    PMAP_CHANGEBIT
                   2124:  *
                   2125:  * Function:
                   2126:  *     Update the pte bits on the specified physical page.
                   2127:  *
                   2128:  * Parameters:
                   2129:  *     pg      physical page
                   2130:  *     set     bits to set
                   2131:  *     mask    bits to mask
                   2132:  *
                   2133:  * Extern/Global:
                   2134:  *     pv_lists
                   2135:  *
                   2136:  * Calls:
                   2137:  *     pmap_pte
                   2138:  *
                   2139:  * The pte bits corresponding to the page's frame index will be changed as
                   2140:  * requested. The PV list will be traversed.
                   2141:  * For each pmap/va the hardware the necessary bits in the page descriptor
                   2142:  * table entry will be altered as well if necessary. If any bits were changed,
                   2143:  * a TLB flush will be performed.
                   2144:  */
                   2145: void
                   2146: pmap_changebit(struct vm_page *pg, int set, int mask)
                   2147: {
                   2148:        pv_entry_t pvl, pvep;
                   2149:        pt_entry_t *pte, npte, opte;
                   2150:        pmap_t pmap;
                   2151:        int spl;
                   2152:        vaddr_t va;
                   2153:
                   2154:        spl = splvm();
                   2155:
                   2156: #ifdef MULTIPROCESSOR
                   2157: changebit_Retry:
                   2158: #endif
                   2159:        pvl = pg_to_pvh(pg);
                   2160:
                   2161:        /*
                   2162:         * Clear saved attributes (modify, reference)
                   2163:         */
                   2164:        pvl->pv_flags &= mask;
                   2165:
                   2166:        if (pvl->pv_pmap == NULL) {
                   2167: #ifdef DEBUG
                   2168:                if (pmap_con_dbg & CD_CBIT)
                   2169:                        printf("(pmap_changebit: %x) vm page 0x%x not mapped\n",
                   2170:                            curproc, pg);
                   2171: #endif
                   2172:                splx(spl);
                   2173:                return;
                   2174:        }
                   2175:
                   2176:        /* for each listed pmap, update the affected bits */
                   2177:        for (pvep = pvl; pvep != NULL; pvep = pvep->pv_next) {
                   2178:                pmap = pvep->pv_pmap;
                   2179: #ifdef MULTIPROCESSOR
                   2180:                if (!__cpu_simple_lock_try(&pmap->pm_lock)) {
                   2181:                        goto changebit_Retry;
                   2182:                }
                   2183: #endif
                   2184:                va = pvep->pv_va;
                   2185:                pte = pmap_pte(pmap, va);
                   2186:
                   2187:                /*
                   2188:                 * Check for existing and valid pte
                   2189:                 */
                   2190:                if (pte == NULL || !PDT_VALID(pte)) {
                   2191:                        goto next;       /* no page mapping */
                   2192:                }
                   2193: #ifdef DIAGNOSTIC
                   2194:                if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg))
                   2195:                        panic("pmap_changebit: pte %x in pmap %p doesn't point to page %p %lx",
                   2196:                            *pte, pmap, pg, VM_PAGE_TO_PHYS(pg));
                   2197: #endif
                   2198:
                   2199:                /*
                   2200:                 * Update bits
                   2201:                 */
                   2202:                opte = *pte;
                   2203:                npte = (opte | set) & mask;
                   2204:
                   2205:                /*
                   2206:                 * Flush TLB of which cpus using pmap.
                   2207:                 *
                   2208:                 * Invalidate pte temporarily to avoid the modified bit
                   2209:                 * and/or the reference being written back by any other cpu.
                   2210:                 */
                   2211:                if (npte != opte) {
                   2212:                        invalidate_pte(pte);
                   2213:                        *pte = npte;
                   2214:                        flush_atc_entry(pmap, va);
                   2215:                }
                   2216: next:
                   2217:                PMAP_UNLOCK(pmap);
                   2218:        }
                   2219:        splx(spl);
                   2220: }
                   2221:
                   2222: /*
                   2223:  * Routine:    PMAP_TESTBIT
                   2224:  *
                   2225:  * Function:
                   2226:  *     Test the modified/referenced bits of a physical page.
                   2227:  *
                   2228:  * Parameters:
                   2229:  *     pg      physical page
                   2230:  *     bit     bit to test
                   2231:  *
                   2232:  * Extern/Global:
                   2233:  *     pv lists
                   2234:  *
                   2235:  * Calls:
                   2236:  *     pmap_pte
                   2237:  *
                   2238:  * If the attribute list for the given page has the bit, this routine
                   2239:  * returns TRUE.
                   2240:  *
                   2241:  * Otherwise, this routine walks the PV list corresponding to the
                   2242:  * given page. For each pmap/va pair, the page descriptor table entry is
                   2243:  * examined. If the selected bit is found on, the function returns TRUE
                   2244:  * immediately (doesn't need to walk remainder of list), and updates the
                   2245:  * attribute list.
                   2246:  */
                   2247: boolean_t
                   2248: pmap_testbit(struct vm_page *pg, int bit)
                   2249: {
                   2250:        pv_entry_t pvl, pvep;
                   2251:        pt_entry_t *pte;
                   2252:        pmap_t pmap;
                   2253:        int spl;
                   2254:
                   2255:        spl = splvm();
                   2256:
                   2257: #ifdef MULTIPROCESSOR
                   2258: testbit_Retry:
                   2259: #endif
                   2260:        pvl = pg_to_pvh(pg);
                   2261:
                   2262:        if (pvl->pv_flags & bit) {
                   2263:                /* we've already cached this flag for this page,
                   2264:                   no use looking further... */
                   2265: #ifdef DEBUG
                   2266:                if (pmap_con_dbg & CD_TBIT)
                   2267:                        printf("(pmap_testbit: %x) already cached a %x flag for this page\n",
                   2268:                            curproc, bit);
                   2269: #endif
                   2270:                splx(spl);
                   2271:                return (TRUE);
                   2272:        }
                   2273:
                   2274:        if (pvl->pv_pmap == NULL) {
                   2275: #ifdef DEBUG
                   2276:                if (pmap_con_dbg & CD_TBIT)
                   2277:                        printf("(pmap_testbit: %x) vm page 0x%x not mapped\n",
                   2278:                            curproc, pg);
                   2279: #endif
                   2280:                splx(spl);
                   2281:                return (FALSE);
                   2282:        }
                   2283:
                   2284:        /* for each listed pmap, check modified bit for given page */
                   2285:        for (pvep = pvl; pvep != NULL; pvep = pvep->pv_next) {
                   2286:                pmap = pvep->pv_pmap;
                   2287: #ifdef MULTIPROCESSOR
                   2288:                if (!__cpu_simple_lock_try(&pmap->pm_lock)) {
                   2289:                        goto testbit_Retry;
                   2290:                }
                   2291: #endif
                   2292:
                   2293:                pte = pmap_pte(pmap, pvep->pv_va);
                   2294:                if (pte == NULL || !PDT_VALID(pte)) {
                   2295:                        goto next;
                   2296:                }
                   2297:
                   2298: #ifdef DIAGNOSTIC
                   2299:                if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg))
                   2300:                        panic("pmap_testbit: pte %x in pmap %p %d doesn't point to page %p %lx",
                   2301:                            *pte, pmap, pmap == kernel_pmap ? 1 : 0, pg, VM_PAGE_TO_PHYS(pg));
                   2302: #endif
                   2303:
                   2304:                if ((*pte & bit) != 0) {
                   2305:                        PMAP_UNLOCK(pmap);
                   2306:                        pvl->pv_flags |= bit;
                   2307: #ifdef DEBUG
                   2308:                        if ((pmap_con_dbg & (CD_TBIT | CD_FULL)) == (CD_TBIT | CD_FULL))
                   2309:                                printf("(pmap_testbit: %x) true on page pte@%p\n", curproc, pte);
                   2310: #endif
                   2311:                        splx(spl);
                   2312:                        return (TRUE);
                   2313:                }
                   2314: next:
                   2315:                PMAP_UNLOCK(pmap);
                   2316:        }
                   2317:
                   2318:        splx(spl);
                   2319:        return (FALSE);
                   2320: }
                   2321:
                   2322: /*
                   2323:  * Routine:    PMAP_UNSETBIT
                   2324:  *
                   2325:  * Function:
                   2326:  *     Clears a pte bit and returns its previous state, for the
                   2327:  *     specified physical page.
                   2328:  *     This is an optimized version of:
                   2329:  *             rv = pmap_testbit(pg, bit);
                   2330:  *             pmap_changebit(pg, 0, ~bit);
                   2331:  *             return rv;
                   2332:  */
                   2333: boolean_t
                   2334: pmap_unsetbit(struct vm_page *pg, int bit)
                   2335: {
                   2336:        boolean_t rv = FALSE;
                   2337:        pv_entry_t pvl, pvep;
                   2338:        pt_entry_t *pte, opte;
                   2339:        pmap_t pmap;
                   2340:        int spl;
                   2341:        vaddr_t va;
                   2342:
                   2343:        spl = splvm();
                   2344:
                   2345: #ifdef MULTIPROCESSOR
                   2346: unsetbit_Retry:
                   2347: #endif
                   2348:        pvl = pg_to_pvh(pg);
                   2349:
                   2350:        /*
                   2351:         * Clear saved attributes
                   2352:         */
                   2353:        pvl->pv_flags &= ~bit;
                   2354:
                   2355:        if (pvl->pv_pmap == NULL) {
                   2356: #ifdef DEBUG
                   2357:                if (pmap_con_dbg & CD_USBIT)
                   2358:                        printf("(pmap_unsetbit: %x) vm page 0x%x not mapped\n",
                   2359:                            curproc, pg);
                   2360: #endif
                   2361:                splx(spl);
                   2362:                return (FALSE);
                   2363:        }
                   2364:
                   2365:        /* for each listed pmap, update the specified bit */
                   2366:        for (pvep = pvl; pvep != NULL; pvep = pvep->pv_next) {
                   2367:                pmap = pvep->pv_pmap;
                   2368: #ifdef MULTIPROCESSOR
                   2369:                if (!__cpu_simple_lock_try(&pmap->pm_lock)) {
                   2370:                        goto unsetbit_Retry;
                   2371:                }
                   2372: #endif
                   2373:                va = pvep->pv_va;
                   2374:                pte = pmap_pte(pmap, va);
                   2375:
                   2376:                /*
                   2377:                 * Check for existing and valid pte
                   2378:                 */
                   2379:                if (pte == NULL || !PDT_VALID(pte)) {
                   2380:                        goto next;       /* no page mapping */
                   2381:                }
                   2382: #ifdef DIAGNOSTIC
                   2383:                if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg))
                   2384:                        panic("pmap_unsetbit: pte %x in pmap %p doesn't point to page %p %lx",
                   2385:                            *pte, pmap, pg, VM_PAGE_TO_PHYS(pg));
                   2386: #endif
                   2387:
                   2388:                /*
                   2389:                 * Update bits
                   2390:                 */
                   2391:                opte = *pte;
                   2392:                if (opte & bit) {
                   2393:                        /*
                   2394:                         * Flush TLB of which cpus using pmap.
                   2395:                         *
                   2396:                         * Invalidate pte temporarily to avoid the specified
                   2397:                         * bit being written back by any other cpu.
                   2398:                         */
                   2399:                        invalidate_pte(pte);
                   2400:                        *pte = opte ^ bit;
                   2401:                        flush_atc_entry(pmap, va);
                   2402:                } else
                   2403:                        rv = TRUE;
                   2404: next:
                   2405:                PMAP_UNLOCK(pmap);
                   2406:        }
                   2407:        splx(spl);
                   2408:
                   2409:        return (rv);
                   2410: }
                   2411:
                   2412: /*
                   2413:  * Routine:    PMAP_IS_MODIFIED
                   2414:  *
                   2415:  * Function:
                   2416:  *     Return whether or not the specified physical page is modified
                   2417:  *     by any physical maps.
                   2418:  */
                   2419: boolean_t
                   2420: pmap_is_modified(struct vm_page *pg)
                   2421: {
                   2422:        return pmap_testbit(pg, PG_M);
                   2423: }
                   2424:
                   2425: /*
                   2426:  * Routine:    PMAP_IS_REFERENCED
                   2427:  *
                   2428:  * Function:
                   2429:  *     Return whether or not the specified physical page is referenced by
                   2430:  *     any physical maps.
                   2431:  */
                   2432: boolean_t
                   2433: pmap_is_referenced(struct vm_page *pg)
                   2434: {
                   2435:        return pmap_testbit(pg, PG_U);
                   2436: }
                   2437:
                   2438: /*
                   2439:  * Routine:    PMAP_PAGE_PROTECT
                   2440:  *
                   2441:  * Calls:
                   2442:  *     pmap_changebit
                   2443:  *     pmap_remove_all
                   2444:  *
                   2445:  *     Lower the permission for all mappings to a given page.
                   2446:  */
                   2447: void
                   2448: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                   2449: {
                   2450:        if ((prot & VM_PROT_READ) == VM_PROT_NONE)
                   2451:                pmap_remove_all(pg);
                   2452:        else if ((prot & VM_PROT_WRITE) == VM_PROT_NONE)
                   2453:                pmap_changebit(pg, PG_RO, ~0);
                   2454: }
                   2455:
                   2456: void
                   2457: pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
                   2458: {
                   2459:        *startp = virtual_avail;
                   2460:        *endp = virtual_end;
                   2461: }
                   2462:
                   2463: void
                   2464: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
                   2465: {
                   2466:        int spl;
                   2467:        pt_entry_t template, *pte;
                   2468:
                   2469: #ifdef DEBUG
                   2470:        if (pmap_con_dbg & CD_ENT) {
                   2471:                printf ("(pmap_kenter_pa: %x) va %x pa %x\n", curproc, va, pa);
                   2472:        }
                   2473: #endif
                   2474:
                   2475:        spl = splvm();
                   2476:        PMAP_LOCK(kernel_pmap);
                   2477:
                   2478:        template = m88k_protection(prot);
                   2479: #ifdef M88110
                   2480:        if (CPU_IS88110 && m88k_protection(prot) != PG_RO)
                   2481:                template |= PG_M;
                   2482: #endif
                   2483:
                   2484:        /*
                   2485:         * Expand pmap to include this pte.
                   2486:         */
                   2487:        while ((pte = pmap_pte(kernel_pmap, va)) == NULL)
                   2488:                pmap_expand_kmap(va, VM_PROT_READ | VM_PROT_WRITE, 0);
                   2489:
                   2490:        /*
                   2491:         * And count the mapping.
                   2492:         */
                   2493:        kernel_pmap->pm_stats.resident_count++;
                   2494:        kernel_pmap->pm_stats.wired_count++;
                   2495:
                   2496:        invalidate_pte(pte);
                   2497:
                   2498:        /*
                   2499:         * If outside physical memory, disable cache on this (I/O) page.
                   2500:         */
                   2501:        if ((unsigned long)pa >= last_addr)
                   2502:                template |= CACHE_INH | PG_V | PG_W;
                   2503:        else
                   2504:                template |= PG_V | PG_W;
                   2505:        *pte = template | pa;
                   2506:        flush_atc_entry(kernel_pmap, va);
                   2507:
                   2508:        PMAP_UNLOCK(kernel_pmap);
                   2509:        splx(spl);
                   2510: }
                   2511:
                   2512: void
                   2513: pmap_kremove(vaddr_t va, vsize_t len)
                   2514: {
                   2515:        int spl;
                   2516:        vaddr_t e, eseg;
                   2517:
                   2518: #ifdef DEBUG
                   2519:        if (pmap_con_dbg & CD_RM)
                   2520:                printf("(pmap_kremove: %x) va %x len %x\n", curproc, va, len);
                   2521: #endif
                   2522:
                   2523:        spl = splvm();
                   2524:        PMAP_LOCK(kernel_pmap);
                   2525:
                   2526:        e = va + len;
                   2527:        while (va != e) {
                   2528:                sdt_entry_t *sdt;
                   2529:                pt_entry_t *pte;
                   2530:
                   2531:                eseg = (va & SDT_MASK) + (1 << SDT_SHIFT);
                   2532:                if (eseg > e || eseg == 0)
                   2533:                        eseg = e;
                   2534:
                   2535:                sdt = SDTENT(kernel_pmap, va);
                   2536:
                   2537:                /* If no segment table, skip a whole segment */
                   2538:                if (!SDT_VALID(sdt))
                   2539:                        va = eseg;
                   2540:                else {
                   2541:                        while (va != eseg) {
                   2542:                                pte = sdt_pte(sdt, va);
                   2543:                                if (pte != NULL && PDT_VALID(pte)) {
                   2544:                                        /* Update the counts */
                   2545:                                        kernel_pmap->pm_stats.resident_count--;
                   2546:                                        kernel_pmap->pm_stats.wired_count--;
                   2547:
                   2548:                                        invalidate_pte(pte);
                   2549:                                        flush_atc_entry(kernel_pmap, va);
                   2550:                                }
                   2551:                                va += PAGE_SIZE;
                   2552:                        }
                   2553:                }
                   2554:        }
                   2555:        PMAP_UNLOCK(kernel_pmap);
                   2556:        splx(spl);
                   2557: }
                   2558:
                   2559: void
                   2560: pmap_proc_iflush(struct proc *p, vaddr_t va, vsize_t len)
                   2561: {
                   2562:        pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map);
                   2563:        paddr_t pa;
                   2564:        vsize_t count;
                   2565:        u_int32_t users;
                   2566:        int cpu;
                   2567:
                   2568:        while (len != 0) {
                   2569:                count = min(len, PAGE_SIZE - (va & PAGE_MASK));
                   2570:                if (pmap_extract(pmap, va, &pa)) {
                   2571:                        users = pmap->pm_cpus;
                   2572:                        while ((cpu = ff1(users)) != 32) {
                   2573:                                cmmu_flush_inst_cache(cpu, pa, count);
                   2574:                                users &= ~(1 << cpu);
                   2575:                        }
                   2576:                }
                   2577:                va += count;
                   2578:                len -= count;
                   2579:        }
                   2580: }

CVSweb